]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.2.2-2.6.32.41-201106131719.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-2.6.32.41-201106131719.patch
1 diff -urNp linux-2.6.32.41/arch/alpha/include/asm/elf.h linux-2.6.32.41/arch/alpha/include/asm/elf.h
2 --- linux-2.6.32.41/arch/alpha/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3 +++ linux-2.6.32.41/arch/alpha/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
4 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8 +#ifdef CONFIG_PAX_ASLR
9 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10 +
11 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13 +#endif
14 +
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18 diff -urNp linux-2.6.32.41/arch/alpha/include/asm/pgtable.h linux-2.6.32.41/arch/alpha/include/asm/pgtable.h
19 --- linux-2.6.32.41/arch/alpha/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
20 +++ linux-2.6.32.41/arch/alpha/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
21 @@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25 +
26 +#ifdef CONFIG_PAX_PAGEEXEC
27 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30 +#else
31 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
32 +# define PAGE_COPY_NOEXEC PAGE_COPY
33 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
34 +#endif
35 +
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39 diff -urNp linux-2.6.32.41/arch/alpha/kernel/module.c linux-2.6.32.41/arch/alpha/kernel/module.c
40 --- linux-2.6.32.41/arch/alpha/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
41 +++ linux-2.6.32.41/arch/alpha/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
42 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46 - gp = (u64)me->module_core + me->core_size - 0x8000;
47 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51 diff -urNp linux-2.6.32.41/arch/alpha/kernel/osf_sys.c linux-2.6.32.41/arch/alpha/kernel/osf_sys.c
52 --- linux-2.6.32.41/arch/alpha/kernel/osf_sys.c 2011-03-27 14:31:47.000000000 -0400
53 +++ linux-2.6.32.41/arch/alpha/kernel/osf_sys.c 2011-06-13 17:19:47.000000000 -0400
54 @@ -431,7 +431,7 @@ SYSCALL_DEFINE2(osf_getdomainname, char
55 return -EFAULT;
56
57 len = namelen;
58 - if (namelen > 32)
59 + if (len > 32)
60 len = 32;
61
62 down_read(&uts_sem);
63 @@ -618,7 +618,7 @@ SYSCALL_DEFINE3(osf_sysinfo, int, comman
64 down_read(&uts_sem);
65 res = sysinfo_table[offset];
66 len = strlen(res)+1;
67 - if (len > count)
68 + if ((unsigned long)len > (unsigned long)count)
69 len = count;
70 if (copy_to_user(buf, res, len))
71 err = -EFAULT;
72 @@ -673,7 +673,7 @@ SYSCALL_DEFINE5(osf_getsysinfo, unsigned
73 return 1;
74
75 case GSI_GET_HWRPB:
76 - if (nbytes < sizeof(*hwrpb))
77 + if (nbytes > sizeof(*hwrpb))
78 return -EINVAL;
79 if (copy_to_user(buffer, hwrpb, nbytes) != 0)
80 return -EFAULT;
81 @@ -1035,6 +1035,7 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, i
82 {
83 struct rusage r;
84 long ret, err;
85 + unsigned int status = 0;
86 mm_segment_t old_fs;
87
88 if (!ur)
89 @@ -1043,13 +1044,15 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, i
90 old_fs = get_fs();
91
92 set_fs (KERNEL_DS);
93 - ret = sys_wait4(pid, ustatus, options, (struct rusage __user *) &r);
94 + ret = sys_wait4(pid, (unsigned int __user *) &status, options,
95 + (struct rusage __user *) &r);
96 set_fs (old_fs);
97
98 if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur)))
99 return -EFAULT;
100
101 err = 0;
102 + err |= put_user(status, ustatus);
103 err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec);
104 err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec);
105 err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec);
106 @@ -1169,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long a
107 /* At this point: (!vma || addr < vma->vm_end). */
108 if (limit - len < addr)
109 return -ENOMEM;
110 - if (!vma || addr + len <= vma->vm_start)
111 + if (check_heap_stack_gap(vma, addr, len))
112 return addr;
113 addr = vma->vm_end;
114 vma = vma->vm_next;
115 @@ -1205,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp
116 merely specific addresses, but regions of memory -- perhaps
117 this feature should be incorporated into all ports? */
118
119 +#ifdef CONFIG_PAX_RANDMMAP
120 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
121 +#endif
122 +
123 if (addr) {
124 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
125 if (addr != (unsigned long) -ENOMEM)
126 @@ -1212,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp
127 }
128
129 /* Next, try allocating at TASK_UNMAPPED_BASE. */
130 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
131 - len, limit);
132 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
133 +
134 if (addr != (unsigned long) -ENOMEM)
135 return addr;
136
137 diff -urNp linux-2.6.32.41/arch/alpha/mm/fault.c linux-2.6.32.41/arch/alpha/mm/fault.c
138 --- linux-2.6.32.41/arch/alpha/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
139 +++ linux-2.6.32.41/arch/alpha/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
140 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
141 __reload_thread(pcb);
142 }
143
144 +#ifdef CONFIG_PAX_PAGEEXEC
145 +/*
146 + * PaX: decide what to do with offenders (regs->pc = fault address)
147 + *
148 + * returns 1 when task should be killed
149 + * 2 when patched PLT trampoline was detected
150 + * 3 when unpatched PLT trampoline was detected
151 + */
152 +static int pax_handle_fetch_fault(struct pt_regs *regs)
153 +{
154 +
155 +#ifdef CONFIG_PAX_EMUPLT
156 + int err;
157 +
158 + do { /* PaX: patched PLT emulation #1 */
159 + unsigned int ldah, ldq, jmp;
160 +
161 + err = get_user(ldah, (unsigned int *)regs->pc);
162 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
163 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
164 +
165 + if (err)
166 + break;
167 +
168 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
169 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
170 + jmp == 0x6BFB0000U)
171 + {
172 + unsigned long r27, addr;
173 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
174 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
175 +
176 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
177 + err = get_user(r27, (unsigned long *)addr);
178 + if (err)
179 + break;
180 +
181 + regs->r27 = r27;
182 + regs->pc = r27;
183 + return 2;
184 + }
185 + } while (0);
186 +
187 + do { /* PaX: patched PLT emulation #2 */
188 + unsigned int ldah, lda, br;
189 +
190 + err = get_user(ldah, (unsigned int *)regs->pc);
191 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
192 + err |= get_user(br, (unsigned int *)(regs->pc+8));
193 +
194 + if (err)
195 + break;
196 +
197 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
198 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
199 + (br & 0xFFE00000U) == 0xC3E00000U)
200 + {
201 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
202 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
203 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
204 +
205 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
206 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
207 + return 2;
208 + }
209 + } while (0);
210 +
211 + do { /* PaX: unpatched PLT emulation */
212 + unsigned int br;
213 +
214 + err = get_user(br, (unsigned int *)regs->pc);
215 +
216 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
217 + unsigned int br2, ldq, nop, jmp;
218 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
219 +
220 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
221 + err = get_user(br2, (unsigned int *)addr);
222 + err |= get_user(ldq, (unsigned int *)(addr+4));
223 + err |= get_user(nop, (unsigned int *)(addr+8));
224 + err |= get_user(jmp, (unsigned int *)(addr+12));
225 + err |= get_user(resolver, (unsigned long *)(addr+16));
226 +
227 + if (err)
228 + break;
229 +
230 + if (br2 == 0xC3600000U &&
231 + ldq == 0xA77B000CU &&
232 + nop == 0x47FF041FU &&
233 + jmp == 0x6B7B0000U)
234 + {
235 + regs->r28 = regs->pc+4;
236 + regs->r27 = addr+16;
237 + regs->pc = resolver;
238 + return 3;
239 + }
240 + }
241 + } while (0);
242 +#endif
243 +
244 + return 1;
245 +}
246 +
247 +void pax_report_insns(void *pc, void *sp)
248 +{
249 + unsigned long i;
250 +
251 + printk(KERN_ERR "PAX: bytes at PC: ");
252 + for (i = 0; i < 5; i++) {
253 + unsigned int c;
254 + if (get_user(c, (unsigned int *)pc+i))
255 + printk(KERN_CONT "???????? ");
256 + else
257 + printk(KERN_CONT "%08x ", c);
258 + }
259 + printk("\n");
260 +}
261 +#endif
262
263 /*
264 * This routine handles page faults. It determines the address,
265 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
266 good_area:
267 si_code = SEGV_ACCERR;
268 if (cause < 0) {
269 - if (!(vma->vm_flags & VM_EXEC))
270 + if (!(vma->vm_flags & VM_EXEC)) {
271 +
272 +#ifdef CONFIG_PAX_PAGEEXEC
273 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
274 + goto bad_area;
275 +
276 + up_read(&mm->mmap_sem);
277 + switch (pax_handle_fetch_fault(regs)) {
278 +
279 +#ifdef CONFIG_PAX_EMUPLT
280 + case 2:
281 + case 3:
282 + return;
283 +#endif
284 +
285 + }
286 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
287 + do_group_exit(SIGKILL);
288 +#else
289 goto bad_area;
290 +#endif
291 +
292 + }
293 } else if (!cause) {
294 /* Allow reads even for write-only mappings */
295 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
296 diff -urNp linux-2.6.32.41/arch/arm/include/asm/elf.h linux-2.6.32.41/arch/arm/include/asm/elf.h
297 --- linux-2.6.32.41/arch/arm/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
298 +++ linux-2.6.32.41/arch/arm/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
299 @@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t
300 the loader. We need to make sure that it is out of the way of the program
301 that it will "exec", and that there is sufficient room for the brk. */
302
303 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
304 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
305 +
306 +#ifdef CONFIG_PAX_ASLR
307 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
308 +
309 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
310 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
311 +#endif
312
313 /* When the program starts, a1 contains a pointer to a function to be
314 registered with atexit, as per the SVR4 ABI. A value of 0 means we
315 diff -urNp linux-2.6.32.41/arch/arm/include/asm/kmap_types.h linux-2.6.32.41/arch/arm/include/asm/kmap_types.h
316 --- linux-2.6.32.41/arch/arm/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
317 +++ linux-2.6.32.41/arch/arm/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
318 @@ -19,6 +19,7 @@ enum km_type {
319 KM_SOFTIRQ0,
320 KM_SOFTIRQ1,
321 KM_L2_CACHE,
322 + KM_CLEARPAGE,
323 KM_TYPE_NR
324 };
325
326 diff -urNp linux-2.6.32.41/arch/arm/include/asm/uaccess.h linux-2.6.32.41/arch/arm/include/asm/uaccess.h
327 --- linux-2.6.32.41/arch/arm/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
328 +++ linux-2.6.32.41/arch/arm/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
329 @@ -403,6 +403,9 @@ extern unsigned long __must_check __strn
330
331 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
332 {
333 + if ((long)n < 0)
334 + return n;
335 +
336 if (access_ok(VERIFY_READ, from, n))
337 n = __copy_from_user(to, from, n);
338 else /* security hole - plug it */
339 @@ -412,6 +415,9 @@ static inline unsigned long __must_check
340
341 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
342 {
343 + if ((long)n < 0)
344 + return n;
345 +
346 if (access_ok(VERIFY_WRITE, to, n))
347 n = __copy_to_user(to, from, n);
348 return n;
349 diff -urNp linux-2.6.32.41/arch/arm/kernel/kgdb.c linux-2.6.32.41/arch/arm/kernel/kgdb.c
350 --- linux-2.6.32.41/arch/arm/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
351 +++ linux-2.6.32.41/arch/arm/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
352 @@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
353 * and we handle the normal undef case within the do_undefinstr
354 * handler.
355 */
356 -struct kgdb_arch arch_kgdb_ops = {
357 +const struct kgdb_arch arch_kgdb_ops = {
358 #ifndef __ARMEB__
359 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
360 #else /* ! __ARMEB__ */
361 diff -urNp linux-2.6.32.41/arch/arm/mach-at91/pm.c linux-2.6.32.41/arch/arm/mach-at91/pm.c
362 --- linux-2.6.32.41/arch/arm/mach-at91/pm.c 2011-03-27 14:31:47.000000000 -0400
363 +++ linux-2.6.32.41/arch/arm/mach-at91/pm.c 2011-04-17 15:56:45.000000000 -0400
364 @@ -348,7 +348,7 @@ static void at91_pm_end(void)
365 }
366
367
368 -static struct platform_suspend_ops at91_pm_ops ={
369 +static const struct platform_suspend_ops at91_pm_ops ={
370 .valid = at91_pm_valid_state,
371 .begin = at91_pm_begin,
372 .enter = at91_pm_enter,
373 diff -urNp linux-2.6.32.41/arch/arm/mach-omap1/pm.c linux-2.6.32.41/arch/arm/mach-omap1/pm.c
374 --- linux-2.6.32.41/arch/arm/mach-omap1/pm.c 2011-03-27 14:31:47.000000000 -0400
375 +++ linux-2.6.32.41/arch/arm/mach-omap1/pm.c 2011-04-17 15:56:45.000000000 -0400
376 @@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq
377
378
379
380 -static struct platform_suspend_ops omap_pm_ops ={
381 +static const struct platform_suspend_ops omap_pm_ops ={
382 .prepare = omap_pm_prepare,
383 .enter = omap_pm_enter,
384 .finish = omap_pm_finish,
385 diff -urNp linux-2.6.32.41/arch/arm/mach-omap2/pm24xx.c linux-2.6.32.41/arch/arm/mach-omap2/pm24xx.c
386 --- linux-2.6.32.41/arch/arm/mach-omap2/pm24xx.c 2011-03-27 14:31:47.000000000 -0400
387 +++ linux-2.6.32.41/arch/arm/mach-omap2/pm24xx.c 2011-04-17 15:56:45.000000000 -0400
388 @@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
389 enable_hlt();
390 }
391
392 -static struct platform_suspend_ops omap_pm_ops = {
393 +static const struct platform_suspend_ops omap_pm_ops = {
394 .prepare = omap2_pm_prepare,
395 .enter = omap2_pm_enter,
396 .finish = omap2_pm_finish,
397 diff -urNp linux-2.6.32.41/arch/arm/mach-omap2/pm34xx.c linux-2.6.32.41/arch/arm/mach-omap2/pm34xx.c
398 --- linux-2.6.32.41/arch/arm/mach-omap2/pm34xx.c 2011-03-27 14:31:47.000000000 -0400
399 +++ linux-2.6.32.41/arch/arm/mach-omap2/pm34xx.c 2011-04-17 15:56:45.000000000 -0400
400 @@ -401,7 +401,7 @@ static void omap3_pm_end(void)
401 return;
402 }
403
404 -static struct platform_suspend_ops omap_pm_ops = {
405 +static const struct platform_suspend_ops omap_pm_ops = {
406 .begin = omap3_pm_begin,
407 .end = omap3_pm_end,
408 .prepare = omap3_pm_prepare,
409 diff -urNp linux-2.6.32.41/arch/arm/mach-pnx4008/pm.c linux-2.6.32.41/arch/arm/mach-pnx4008/pm.c
410 --- linux-2.6.32.41/arch/arm/mach-pnx4008/pm.c 2011-03-27 14:31:47.000000000 -0400
411 +++ linux-2.6.32.41/arch/arm/mach-pnx4008/pm.c 2011-04-17 15:56:45.000000000 -0400
412 @@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_stat
413 (state == PM_SUSPEND_MEM);
414 }
415
416 -static struct platform_suspend_ops pnx4008_pm_ops = {
417 +static const struct platform_suspend_ops pnx4008_pm_ops = {
418 .enter = pnx4008_pm_enter,
419 .valid = pnx4008_pm_valid,
420 };
421 diff -urNp linux-2.6.32.41/arch/arm/mach-pxa/pm.c linux-2.6.32.41/arch/arm/mach-pxa/pm.c
422 --- linux-2.6.32.41/arch/arm/mach-pxa/pm.c 2011-03-27 14:31:47.000000000 -0400
423 +++ linux-2.6.32.41/arch/arm/mach-pxa/pm.c 2011-04-17 15:56:45.000000000 -0400
424 @@ -95,7 +95,7 @@ void pxa_pm_finish(void)
425 pxa_cpu_pm_fns->finish();
426 }
427
428 -static struct platform_suspend_ops pxa_pm_ops = {
429 +static const struct platform_suspend_ops pxa_pm_ops = {
430 .valid = pxa_pm_valid,
431 .enter = pxa_pm_enter,
432 .prepare = pxa_pm_prepare,
433 diff -urNp linux-2.6.32.41/arch/arm/mach-pxa/sharpsl_pm.c linux-2.6.32.41/arch/arm/mach-pxa/sharpsl_pm.c
434 --- linux-2.6.32.41/arch/arm/mach-pxa/sharpsl_pm.c 2011-03-27 14:31:47.000000000 -0400
435 +++ linux-2.6.32.41/arch/arm/mach-pxa/sharpsl_pm.c 2011-04-17 15:56:45.000000000 -0400
436 @@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status
437 }
438
439 #ifdef CONFIG_PM
440 -static struct platform_suspend_ops sharpsl_pm_ops = {
441 +static const struct platform_suspend_ops sharpsl_pm_ops = {
442 .prepare = pxa_pm_prepare,
443 .finish = pxa_pm_finish,
444 .enter = corgi_pxa_pm_enter,
445 diff -urNp linux-2.6.32.41/arch/arm/mach-sa1100/pm.c linux-2.6.32.41/arch/arm/mach-sa1100/pm.c
446 --- linux-2.6.32.41/arch/arm/mach-sa1100/pm.c 2011-03-27 14:31:47.000000000 -0400
447 +++ linux-2.6.32.41/arch/arm/mach-sa1100/pm.c 2011-04-17 15:56:45.000000000 -0400
448 @@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
449 return virt_to_phys(sp);
450 }
451
452 -static struct platform_suspend_ops sa11x0_pm_ops = {
453 +static const struct platform_suspend_ops sa11x0_pm_ops = {
454 .enter = sa11x0_pm_enter,
455 .valid = suspend_valid_only_mem,
456 };
457 diff -urNp linux-2.6.32.41/arch/arm/mm/fault.c linux-2.6.32.41/arch/arm/mm/fault.c
458 --- linux-2.6.32.41/arch/arm/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
459 +++ linux-2.6.32.41/arch/arm/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
460 @@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk,
461 }
462 #endif
463
464 +#ifdef CONFIG_PAX_PAGEEXEC
465 + if (fsr & FSR_LNX_PF) {
466 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
467 + do_group_exit(SIGKILL);
468 + }
469 +#endif
470 +
471 tsk->thread.address = addr;
472 tsk->thread.error_code = fsr;
473 tsk->thread.trap_no = 14;
474 @@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsign
475 }
476 #endif /* CONFIG_MMU */
477
478 +#ifdef CONFIG_PAX_PAGEEXEC
479 +void pax_report_insns(void *pc, void *sp)
480 +{
481 + long i;
482 +
483 + printk(KERN_ERR "PAX: bytes at PC: ");
484 + for (i = 0; i < 20; i++) {
485 + unsigned char c;
486 + if (get_user(c, (__force unsigned char __user *)pc+i))
487 + printk(KERN_CONT "?? ");
488 + else
489 + printk(KERN_CONT "%02x ", c);
490 + }
491 + printk("\n");
492 +
493 + printk(KERN_ERR "PAX: bytes at SP-4: ");
494 + for (i = -1; i < 20; i++) {
495 + unsigned long c;
496 + if (get_user(c, (__force unsigned long __user *)sp+i))
497 + printk(KERN_CONT "???????? ");
498 + else
499 + printk(KERN_CONT "%08lx ", c);
500 + }
501 + printk("\n");
502 +}
503 +#endif
504 +
505 /*
506 * First Level Translation Fault Handler
507 *
508 diff -urNp linux-2.6.32.41/arch/arm/mm/mmap.c linux-2.6.32.41/arch/arm/mm/mmap.c
509 --- linux-2.6.32.41/arch/arm/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
510 +++ linux-2.6.32.41/arch/arm/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
511 @@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp
512 if (len > TASK_SIZE)
513 return -ENOMEM;
514
515 +#ifdef CONFIG_PAX_RANDMMAP
516 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
517 +#endif
518 +
519 if (addr) {
520 if (do_align)
521 addr = COLOUR_ALIGN(addr, pgoff);
522 @@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp
523 addr = PAGE_ALIGN(addr);
524
525 vma = find_vma(mm, addr);
526 - if (TASK_SIZE - len >= addr &&
527 - (!vma || addr + len <= vma->vm_start))
528 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
529 return addr;
530 }
531 if (len > mm->cached_hole_size) {
532 - start_addr = addr = mm->free_area_cache;
533 + start_addr = addr = mm->free_area_cache;
534 } else {
535 - start_addr = addr = TASK_UNMAPPED_BASE;
536 - mm->cached_hole_size = 0;
537 + start_addr = addr = mm->mmap_base;
538 + mm->cached_hole_size = 0;
539 }
540
541 full_search:
542 @@ -94,14 +97,14 @@ full_search:
543 * Start a new search - just in case we missed
544 * some holes.
545 */
546 - if (start_addr != TASK_UNMAPPED_BASE) {
547 - start_addr = addr = TASK_UNMAPPED_BASE;
548 + if (start_addr != mm->mmap_base) {
549 + start_addr = addr = mm->mmap_base;
550 mm->cached_hole_size = 0;
551 goto full_search;
552 }
553 return -ENOMEM;
554 }
555 - if (!vma || addr + len <= vma->vm_start) {
556 + if (check_heap_stack_gap(vma, addr, len)) {
557 /*
558 * Remember the place where we stopped the search:
559 */
560 diff -urNp linux-2.6.32.41/arch/arm/plat-s3c/pm.c linux-2.6.32.41/arch/arm/plat-s3c/pm.c
561 --- linux-2.6.32.41/arch/arm/plat-s3c/pm.c 2011-03-27 14:31:47.000000000 -0400
562 +++ linux-2.6.32.41/arch/arm/plat-s3c/pm.c 2011-04-17 15:56:45.000000000 -0400
563 @@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
564 s3c_pm_check_cleanup();
565 }
566
567 -static struct platform_suspend_ops s3c_pm_ops = {
568 +static const struct platform_suspend_ops s3c_pm_ops = {
569 .enter = s3c_pm_enter,
570 .prepare = s3c_pm_prepare,
571 .finish = s3c_pm_finish,
572 diff -urNp linux-2.6.32.41/arch/avr32/include/asm/elf.h linux-2.6.32.41/arch/avr32/include/asm/elf.h
573 --- linux-2.6.32.41/arch/avr32/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
574 +++ linux-2.6.32.41/arch/avr32/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
575 @@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpreg
576 the loader. We need to make sure that it is out of the way of the program
577 that it will "exec", and that there is sufficient room for the brk. */
578
579 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
580 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
581
582 +#ifdef CONFIG_PAX_ASLR
583 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
584 +
585 +#define PAX_DELTA_MMAP_LEN 15
586 +#define PAX_DELTA_STACK_LEN 15
587 +#endif
588
589 /* This yields a mask that user programs can use to figure out what
590 instruction set this CPU supports. This could be done in user space,
591 diff -urNp linux-2.6.32.41/arch/avr32/include/asm/kmap_types.h linux-2.6.32.41/arch/avr32/include/asm/kmap_types.h
592 --- linux-2.6.32.41/arch/avr32/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
593 +++ linux-2.6.32.41/arch/avr32/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
594 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
595 D(11) KM_IRQ1,
596 D(12) KM_SOFTIRQ0,
597 D(13) KM_SOFTIRQ1,
598 -D(14) KM_TYPE_NR
599 +D(14) KM_CLEARPAGE,
600 +D(15) KM_TYPE_NR
601 };
602
603 #undef D
604 diff -urNp linux-2.6.32.41/arch/avr32/mach-at32ap/pm.c linux-2.6.32.41/arch/avr32/mach-at32ap/pm.c
605 --- linux-2.6.32.41/arch/avr32/mach-at32ap/pm.c 2011-03-27 14:31:47.000000000 -0400
606 +++ linux-2.6.32.41/arch/avr32/mach-at32ap/pm.c 2011-04-17 15:56:45.000000000 -0400
607 @@ -176,7 +176,7 @@ out:
608 return 0;
609 }
610
611 -static struct platform_suspend_ops avr32_pm_ops = {
612 +static const struct platform_suspend_ops avr32_pm_ops = {
613 .valid = avr32_pm_valid_state,
614 .enter = avr32_pm_enter,
615 };
616 diff -urNp linux-2.6.32.41/arch/avr32/mm/fault.c linux-2.6.32.41/arch/avr32/mm/fault.c
617 --- linux-2.6.32.41/arch/avr32/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
618 +++ linux-2.6.32.41/arch/avr32/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
619 @@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
620
621 int exception_trace = 1;
622
623 +#ifdef CONFIG_PAX_PAGEEXEC
624 +void pax_report_insns(void *pc, void *sp)
625 +{
626 + unsigned long i;
627 +
628 + printk(KERN_ERR "PAX: bytes at PC: ");
629 + for (i = 0; i < 20; i++) {
630 + unsigned char c;
631 + if (get_user(c, (unsigned char *)pc+i))
632 + printk(KERN_CONT "???????? ");
633 + else
634 + printk(KERN_CONT "%02x ", c);
635 + }
636 + printk("\n");
637 +}
638 +#endif
639 +
640 /*
641 * This routine handles page faults. It determines the address and the
642 * problem, and then passes it off to one of the appropriate routines.
643 @@ -157,6 +174,16 @@ bad_area:
644 up_read(&mm->mmap_sem);
645
646 if (user_mode(regs)) {
647 +
648 +#ifdef CONFIG_PAX_PAGEEXEC
649 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
650 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
651 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
652 + do_group_exit(SIGKILL);
653 + }
654 + }
655 +#endif
656 +
657 if (exception_trace && printk_ratelimit())
658 printk("%s%s[%d]: segfault at %08lx pc %08lx "
659 "sp %08lx ecr %lu\n",
660 diff -urNp linux-2.6.32.41/arch/blackfin/kernel/kgdb.c linux-2.6.32.41/arch/blackfin/kernel/kgdb.c
661 --- linux-2.6.32.41/arch/blackfin/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
662 +++ linux-2.6.32.41/arch/blackfin/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
663 @@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vecto
664 return -1; /* this means that we do not want to exit from the handler */
665 }
666
667 -struct kgdb_arch arch_kgdb_ops = {
668 +const struct kgdb_arch arch_kgdb_ops = {
669 .gdb_bpt_instr = {0xa1},
670 #ifdef CONFIG_SMP
671 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
672 diff -urNp linux-2.6.32.41/arch/blackfin/mach-common/pm.c linux-2.6.32.41/arch/blackfin/mach-common/pm.c
673 --- linux-2.6.32.41/arch/blackfin/mach-common/pm.c 2011-03-27 14:31:47.000000000 -0400
674 +++ linux-2.6.32.41/arch/blackfin/mach-common/pm.c 2011-04-17 15:56:45.000000000 -0400
675 @@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t
676 return 0;
677 }
678
679 -struct platform_suspend_ops bfin_pm_ops = {
680 +const struct platform_suspend_ops bfin_pm_ops = {
681 .enter = bfin_pm_enter,
682 .valid = bfin_pm_valid,
683 };
684 diff -urNp linux-2.6.32.41/arch/frv/include/asm/kmap_types.h linux-2.6.32.41/arch/frv/include/asm/kmap_types.h
685 --- linux-2.6.32.41/arch/frv/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
686 +++ linux-2.6.32.41/arch/frv/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
687 @@ -23,6 +23,7 @@ enum km_type {
688 KM_IRQ1,
689 KM_SOFTIRQ0,
690 KM_SOFTIRQ1,
691 + KM_CLEARPAGE,
692 KM_TYPE_NR
693 };
694
695 diff -urNp linux-2.6.32.41/arch/frv/mm/elf-fdpic.c linux-2.6.32.41/arch/frv/mm/elf-fdpic.c
696 --- linux-2.6.32.41/arch/frv/mm/elf-fdpic.c 2011-03-27 14:31:47.000000000 -0400
697 +++ linux-2.6.32.41/arch/frv/mm/elf-fdpic.c 2011-04-17 15:56:45.000000000 -0400
698 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
699 if (addr) {
700 addr = PAGE_ALIGN(addr);
701 vma = find_vma(current->mm, addr);
702 - if (TASK_SIZE - len >= addr &&
703 - (!vma || addr + len <= vma->vm_start))
704 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
705 goto success;
706 }
707
708 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
709 for (; vma; vma = vma->vm_next) {
710 if (addr > limit)
711 break;
712 - if (addr + len <= vma->vm_start)
713 + if (check_heap_stack_gap(vma, addr, len))
714 goto success;
715 addr = vma->vm_end;
716 }
717 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
718 for (; vma; vma = vma->vm_next) {
719 if (addr > limit)
720 break;
721 - if (addr + len <= vma->vm_start)
722 + if (check_heap_stack_gap(vma, addr, len))
723 goto success;
724 addr = vma->vm_end;
725 }
726 diff -urNp linux-2.6.32.41/arch/ia64/hp/common/hwsw_iommu.c linux-2.6.32.41/arch/ia64/hp/common/hwsw_iommu.c
727 --- linux-2.6.32.41/arch/ia64/hp/common/hwsw_iommu.c 2011-03-27 14:31:47.000000000 -0400
728 +++ linux-2.6.32.41/arch/ia64/hp/common/hwsw_iommu.c 2011-04-17 15:56:45.000000000 -0400
729 @@ -17,7 +17,7 @@
730 #include <linux/swiotlb.h>
731 #include <asm/machvec.h>
732
733 -extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
734 +extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
735
736 /* swiotlb declarations & definitions: */
737 extern int swiotlb_late_init_with_default_size (size_t size);
738 @@ -33,7 +33,7 @@ static inline int use_swiotlb(struct dev
739 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
740 }
741
742 -struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
743 +const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
744 {
745 if (use_swiotlb(dev))
746 return &swiotlb_dma_ops;
747 diff -urNp linux-2.6.32.41/arch/ia64/hp/common/sba_iommu.c linux-2.6.32.41/arch/ia64/hp/common/sba_iommu.c
748 --- linux-2.6.32.41/arch/ia64/hp/common/sba_iommu.c 2011-03-27 14:31:47.000000000 -0400
749 +++ linux-2.6.32.41/arch/ia64/hp/common/sba_iommu.c 2011-04-17 15:56:45.000000000 -0400
750 @@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_d
751 },
752 };
753
754 -extern struct dma_map_ops swiotlb_dma_ops;
755 +extern const struct dma_map_ops swiotlb_dma_ops;
756
757 static int __init
758 sba_init(void)
759 @@ -2211,7 +2211,7 @@ sba_page_override(char *str)
760
761 __setup("sbapagesize=",sba_page_override);
762
763 -struct dma_map_ops sba_dma_ops = {
764 +const struct dma_map_ops sba_dma_ops = {
765 .alloc_coherent = sba_alloc_coherent,
766 .free_coherent = sba_free_coherent,
767 .map_page = sba_map_page,
768 diff -urNp linux-2.6.32.41/arch/ia64/ia32/binfmt_elf32.c linux-2.6.32.41/arch/ia64/ia32/binfmt_elf32.c
769 --- linux-2.6.32.41/arch/ia64/ia32/binfmt_elf32.c 2011-03-27 14:31:47.000000000 -0400
770 +++ linux-2.6.32.41/arch/ia64/ia32/binfmt_elf32.c 2011-04-17 15:56:45.000000000 -0400
771 @@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_
772
773 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
774
775 +#ifdef CONFIG_PAX_ASLR
776 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
777 +
778 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
779 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
780 +#endif
781 +
782 /* Ugly but avoids duplication */
783 #include "../../../fs/binfmt_elf.c"
784
785 diff -urNp linux-2.6.32.41/arch/ia64/ia32/ia32priv.h linux-2.6.32.41/arch/ia64/ia32/ia32priv.h
786 --- linux-2.6.32.41/arch/ia64/ia32/ia32priv.h 2011-03-27 14:31:47.000000000 -0400
787 +++ linux-2.6.32.41/arch/ia64/ia32/ia32priv.h 2011-04-17 15:56:45.000000000 -0400
788 @@ -296,7 +296,14 @@ typedef struct compat_siginfo {
789 #define ELF_DATA ELFDATA2LSB
790 #define ELF_ARCH EM_386
791
792 -#define IA32_STACK_TOP IA32_PAGE_OFFSET
793 +#ifdef CONFIG_PAX_RANDUSTACK
794 +#define __IA32_DELTA_STACK (current->mm->delta_stack)
795 +#else
796 +#define __IA32_DELTA_STACK 0UL
797 +#endif
798 +
799 +#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
800 +
801 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
802 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
803
804 diff -urNp linux-2.6.32.41/arch/ia64/include/asm/dma-mapping.h linux-2.6.32.41/arch/ia64/include/asm/dma-mapping.h
805 --- linux-2.6.32.41/arch/ia64/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
806 +++ linux-2.6.32.41/arch/ia64/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
807 @@ -12,7 +12,7 @@
808
809 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
810
811 -extern struct dma_map_ops *dma_ops;
812 +extern const struct dma_map_ops *dma_ops;
813 extern struct ia64_machine_vector ia64_mv;
814 extern void set_iommu_machvec(void);
815
816 @@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct d
817 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
818 dma_addr_t *daddr, gfp_t gfp)
819 {
820 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
821 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
822 void *caddr;
823
824 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
825 @@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(s
826 static inline void dma_free_coherent(struct device *dev, size_t size,
827 void *caddr, dma_addr_t daddr)
828 {
829 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
830 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
831 debug_dma_free_coherent(dev, size, caddr, daddr);
832 ops->free_coherent(dev, size, caddr, daddr);
833 }
834 @@ -49,13 +49,13 @@ static inline void dma_free_coherent(str
835
836 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
837 {
838 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
839 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
840 return ops->mapping_error(dev, daddr);
841 }
842
843 static inline int dma_supported(struct device *dev, u64 mask)
844 {
845 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
846 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
847 return ops->dma_supported(dev, mask);
848 }
849
850 diff -urNp linux-2.6.32.41/arch/ia64/include/asm/elf.h linux-2.6.32.41/arch/ia64/include/asm/elf.h
851 --- linux-2.6.32.41/arch/ia64/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
852 +++ linux-2.6.32.41/arch/ia64/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
853 @@ -43,6 +43,13 @@
854 */
855 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
856
857 +#ifdef CONFIG_PAX_ASLR
858 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
859 +
860 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
861 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
862 +#endif
863 +
864 #define PT_IA_64_UNWIND 0x70000001
865
866 /* IA-64 relocations: */
867 diff -urNp linux-2.6.32.41/arch/ia64/include/asm/machvec.h linux-2.6.32.41/arch/ia64/include/asm/machvec.h
868 --- linux-2.6.32.41/arch/ia64/include/asm/machvec.h 2011-03-27 14:31:47.000000000 -0400
869 +++ linux-2.6.32.41/arch/ia64/include/asm/machvec.h 2011-04-17 15:56:45.000000000 -0400
870 @@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event
871 /* DMA-mapping interface: */
872 typedef void ia64_mv_dma_init (void);
873 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
874 -typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
875 +typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
876
877 /*
878 * WARNING: The legacy I/O space is _architected_. Platforms are
879 @@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(co
880 # endif /* CONFIG_IA64_GENERIC */
881
882 extern void swiotlb_dma_init(void);
883 -extern struct dma_map_ops *dma_get_ops(struct device *);
884 +extern const struct dma_map_ops *dma_get_ops(struct device *);
885
886 /*
887 * Define default versions so we can extend machvec for new platforms without having
888 diff -urNp linux-2.6.32.41/arch/ia64/include/asm/pgtable.h linux-2.6.32.41/arch/ia64/include/asm/pgtable.h
889 --- linux-2.6.32.41/arch/ia64/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
890 +++ linux-2.6.32.41/arch/ia64/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
891 @@ -12,7 +12,7 @@
892 * David Mosberger-Tang <davidm@hpl.hp.com>
893 */
894
895 -
896 +#include <linux/const.h>
897 #include <asm/mman.h>
898 #include <asm/page.h>
899 #include <asm/processor.h>
900 @@ -143,6 +143,17 @@
901 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
902 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
903 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
904 +
905 +#ifdef CONFIG_PAX_PAGEEXEC
906 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
907 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
908 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
909 +#else
910 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
911 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
912 +# define PAGE_COPY_NOEXEC PAGE_COPY
913 +#endif
914 +
915 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
916 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
917 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
918 diff -urNp linux-2.6.32.41/arch/ia64/include/asm/spinlock.h linux-2.6.32.41/arch/ia64/include/asm/spinlock.h
919 --- linux-2.6.32.41/arch/ia64/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
920 +++ linux-2.6.32.41/arch/ia64/include/asm/spinlock.h 2011-04-17 15:56:45.000000000 -0400
921 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
922 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
923
924 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
925 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
926 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
927 }
928
929 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
930 diff -urNp linux-2.6.32.41/arch/ia64/include/asm/uaccess.h linux-2.6.32.41/arch/ia64/include/asm/uaccess.h
931 --- linux-2.6.32.41/arch/ia64/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
932 +++ linux-2.6.32.41/arch/ia64/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
933 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
934 const void *__cu_from = (from); \
935 long __cu_len = (n); \
936 \
937 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
938 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
939 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
940 __cu_len; \
941 })
942 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
943 long __cu_len = (n); \
944 \
945 __chk_user_ptr(__cu_from); \
946 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
947 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
948 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
949 __cu_len; \
950 })
951 diff -urNp linux-2.6.32.41/arch/ia64/kernel/dma-mapping.c linux-2.6.32.41/arch/ia64/kernel/dma-mapping.c
952 --- linux-2.6.32.41/arch/ia64/kernel/dma-mapping.c 2011-03-27 14:31:47.000000000 -0400
953 +++ linux-2.6.32.41/arch/ia64/kernel/dma-mapping.c 2011-04-17 15:56:45.000000000 -0400
954 @@ -3,7 +3,7 @@
955 /* Set this to 1 if there is a HW IOMMU in the system */
956 int iommu_detected __read_mostly;
957
958 -struct dma_map_ops *dma_ops;
959 +const struct dma_map_ops *dma_ops;
960 EXPORT_SYMBOL(dma_ops);
961
962 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
963 @@ -16,7 +16,7 @@ static int __init dma_init(void)
964 }
965 fs_initcall(dma_init);
966
967 -struct dma_map_ops *dma_get_ops(struct device *dev)
968 +const struct dma_map_ops *dma_get_ops(struct device *dev)
969 {
970 return dma_ops;
971 }
972 diff -urNp linux-2.6.32.41/arch/ia64/kernel/module.c linux-2.6.32.41/arch/ia64/kernel/module.c
973 --- linux-2.6.32.41/arch/ia64/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
974 +++ linux-2.6.32.41/arch/ia64/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
975 @@ -315,8 +315,7 @@ module_alloc (unsigned long size)
976 void
977 module_free (struct module *mod, void *module_region)
978 {
979 - if (mod && mod->arch.init_unw_table &&
980 - module_region == mod->module_init) {
981 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
982 unw_remove_unwind_table(mod->arch.init_unw_table);
983 mod->arch.init_unw_table = NULL;
984 }
985 @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
986 }
987
988 static inline int
989 +in_init_rx (const struct module *mod, uint64_t addr)
990 +{
991 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
992 +}
993 +
994 +static inline int
995 +in_init_rw (const struct module *mod, uint64_t addr)
996 +{
997 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
998 +}
999 +
1000 +static inline int
1001 in_init (const struct module *mod, uint64_t addr)
1002 {
1003 - return addr - (uint64_t) mod->module_init < mod->init_size;
1004 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1005 +}
1006 +
1007 +static inline int
1008 +in_core_rx (const struct module *mod, uint64_t addr)
1009 +{
1010 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1011 +}
1012 +
1013 +static inline int
1014 +in_core_rw (const struct module *mod, uint64_t addr)
1015 +{
1016 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1017 }
1018
1019 static inline int
1020 in_core (const struct module *mod, uint64_t addr)
1021 {
1022 - return addr - (uint64_t) mod->module_core < mod->core_size;
1023 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1024 }
1025
1026 static inline int
1027 @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
1028 break;
1029
1030 case RV_BDREL:
1031 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1032 + if (in_init_rx(mod, val))
1033 + val -= (uint64_t) mod->module_init_rx;
1034 + else if (in_init_rw(mod, val))
1035 + val -= (uint64_t) mod->module_init_rw;
1036 + else if (in_core_rx(mod, val))
1037 + val -= (uint64_t) mod->module_core_rx;
1038 + else if (in_core_rw(mod, val))
1039 + val -= (uint64_t) mod->module_core_rw;
1040 break;
1041
1042 case RV_LTV:
1043 @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
1044 * addresses have been selected...
1045 */
1046 uint64_t gp;
1047 - if (mod->core_size > MAX_LTOFF)
1048 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1049 /*
1050 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1051 * at the end of the module.
1052 */
1053 - gp = mod->core_size - MAX_LTOFF / 2;
1054 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1055 else
1056 - gp = mod->core_size / 2;
1057 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1058 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1059 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1060 mod->arch.gp = gp;
1061 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1062 }
1063 diff -urNp linux-2.6.32.41/arch/ia64/kernel/pci-dma.c linux-2.6.32.41/arch/ia64/kernel/pci-dma.c
1064 --- linux-2.6.32.41/arch/ia64/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
1065 +++ linux-2.6.32.41/arch/ia64/kernel/pci-dma.c 2011-04-17 15:56:45.000000000 -0400
1066 @@ -43,7 +43,7 @@ struct device fallback_dev = {
1067 .dma_mask = &fallback_dev.coherent_dma_mask,
1068 };
1069
1070 -extern struct dma_map_ops intel_dma_ops;
1071 +extern const struct dma_map_ops intel_dma_ops;
1072
1073 static int __init pci_iommu_init(void)
1074 {
1075 @@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *d
1076 }
1077 EXPORT_SYMBOL(iommu_dma_supported);
1078
1079 +extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1080 +extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1081 +extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1082 +extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1083 +extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1084 +extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1085 +extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1086 +
1087 +static const struct dma_map_ops intel_iommu_dma_ops = {
1088 + /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1089 + .alloc_coherent = intel_alloc_coherent,
1090 + .free_coherent = intel_free_coherent,
1091 + .map_sg = intel_map_sg,
1092 + .unmap_sg = intel_unmap_sg,
1093 + .map_page = intel_map_page,
1094 + .unmap_page = intel_unmap_page,
1095 + .mapping_error = intel_mapping_error,
1096 +
1097 + .sync_single_for_cpu = machvec_dma_sync_single,
1098 + .sync_sg_for_cpu = machvec_dma_sync_sg,
1099 + .sync_single_for_device = machvec_dma_sync_single,
1100 + .sync_sg_for_device = machvec_dma_sync_sg,
1101 + .dma_supported = iommu_dma_supported,
1102 +};
1103 +
1104 void __init pci_iommu_alloc(void)
1105 {
1106 - dma_ops = &intel_dma_ops;
1107 -
1108 - dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
1109 - dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
1110 - dma_ops->sync_single_for_device = machvec_dma_sync_single;
1111 - dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
1112 - dma_ops->dma_supported = iommu_dma_supported;
1113 + dma_ops = &intel_iommu_dma_ops;
1114
1115 /*
1116 * The order of these functions is important for
1117 diff -urNp linux-2.6.32.41/arch/ia64/kernel/pci-swiotlb.c linux-2.6.32.41/arch/ia64/kernel/pci-swiotlb.c
1118 --- linux-2.6.32.41/arch/ia64/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
1119 +++ linux-2.6.32.41/arch/ia64/kernel/pci-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
1120 @@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent
1121 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1122 }
1123
1124 -struct dma_map_ops swiotlb_dma_ops = {
1125 +const struct dma_map_ops swiotlb_dma_ops = {
1126 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1127 .free_coherent = swiotlb_free_coherent,
1128 .map_page = swiotlb_map_page,
1129 diff -urNp linux-2.6.32.41/arch/ia64/kernel/sys_ia64.c linux-2.6.32.41/arch/ia64/kernel/sys_ia64.c
1130 --- linux-2.6.32.41/arch/ia64/kernel/sys_ia64.c 2011-03-27 14:31:47.000000000 -0400
1131 +++ linux-2.6.32.41/arch/ia64/kernel/sys_ia64.c 2011-04-17 15:56:45.000000000 -0400
1132 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
1133 if (REGION_NUMBER(addr) == RGN_HPAGE)
1134 addr = 0;
1135 #endif
1136 +
1137 +#ifdef CONFIG_PAX_RANDMMAP
1138 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1139 + addr = mm->free_area_cache;
1140 + else
1141 +#endif
1142 +
1143 if (!addr)
1144 addr = mm->free_area_cache;
1145
1146 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
1147 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1148 /* At this point: (!vma || addr < vma->vm_end). */
1149 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1150 - if (start_addr != TASK_UNMAPPED_BASE) {
1151 + if (start_addr != mm->mmap_base) {
1152 /* Start a new search --- just in case we missed some holes. */
1153 - addr = TASK_UNMAPPED_BASE;
1154 + addr = mm->mmap_base;
1155 goto full_search;
1156 }
1157 return -ENOMEM;
1158 }
1159 - if (!vma || addr + len <= vma->vm_start) {
1160 + if (check_heap_stack_gap(vma, addr, len)) {
1161 /* Remember the address where we stopped this search: */
1162 mm->free_area_cache = addr + len;
1163 return addr;
1164 diff -urNp linux-2.6.32.41/arch/ia64/kernel/topology.c linux-2.6.32.41/arch/ia64/kernel/topology.c
1165 --- linux-2.6.32.41/arch/ia64/kernel/topology.c 2011-03-27 14:31:47.000000000 -0400
1166 +++ linux-2.6.32.41/arch/ia64/kernel/topology.c 2011-04-17 15:56:45.000000000 -0400
1167 @@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject
1168 return ret;
1169 }
1170
1171 -static struct sysfs_ops cache_sysfs_ops = {
1172 +static const struct sysfs_ops cache_sysfs_ops = {
1173 .show = cache_show
1174 };
1175
1176 diff -urNp linux-2.6.32.41/arch/ia64/kernel/vmlinux.lds.S linux-2.6.32.41/arch/ia64/kernel/vmlinux.lds.S
1177 --- linux-2.6.32.41/arch/ia64/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
1178 +++ linux-2.6.32.41/arch/ia64/kernel/vmlinux.lds.S 2011-04-17 15:56:45.000000000 -0400
1179 @@ -190,7 +190,7 @@ SECTIONS
1180 /* Per-cpu data: */
1181 . = ALIGN(PERCPU_PAGE_SIZE);
1182 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1183 - __phys_per_cpu_start = __per_cpu_load;
1184 + __phys_per_cpu_start = per_cpu_load;
1185 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1186 * into percpu page size
1187 */
1188 diff -urNp linux-2.6.32.41/arch/ia64/mm/fault.c linux-2.6.32.41/arch/ia64/mm/fault.c
1189 --- linux-2.6.32.41/arch/ia64/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1190 +++ linux-2.6.32.41/arch/ia64/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1191 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
1192 return pte_present(pte);
1193 }
1194
1195 +#ifdef CONFIG_PAX_PAGEEXEC
1196 +void pax_report_insns(void *pc, void *sp)
1197 +{
1198 + unsigned long i;
1199 +
1200 + printk(KERN_ERR "PAX: bytes at PC: ");
1201 + for (i = 0; i < 8; i++) {
1202 + unsigned int c;
1203 + if (get_user(c, (unsigned int *)pc+i))
1204 + printk(KERN_CONT "???????? ");
1205 + else
1206 + printk(KERN_CONT "%08x ", c);
1207 + }
1208 + printk("\n");
1209 +}
1210 +#endif
1211 +
1212 void __kprobes
1213 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1214 {
1215 @@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres
1216 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1217 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1218
1219 - if ((vma->vm_flags & mask) != mask)
1220 + if ((vma->vm_flags & mask) != mask) {
1221 +
1222 +#ifdef CONFIG_PAX_PAGEEXEC
1223 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1224 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1225 + goto bad_area;
1226 +
1227 + up_read(&mm->mmap_sem);
1228 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1229 + do_group_exit(SIGKILL);
1230 + }
1231 +#endif
1232 +
1233 goto bad_area;
1234
1235 + }
1236 +
1237 survive:
1238 /*
1239 * If for any reason at all we couldn't handle the fault, make
1240 diff -urNp linux-2.6.32.41/arch/ia64/mm/hugetlbpage.c linux-2.6.32.41/arch/ia64/mm/hugetlbpage.c
1241 --- linux-2.6.32.41/arch/ia64/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
1242 +++ linux-2.6.32.41/arch/ia64/mm/hugetlbpage.c 2011-04-17 15:56:45.000000000 -0400
1243 @@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(
1244 /* At this point: (!vmm || addr < vmm->vm_end). */
1245 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1246 return -ENOMEM;
1247 - if (!vmm || (addr + len) <= vmm->vm_start)
1248 + if (check_heap_stack_gap(vmm, addr, len))
1249 return addr;
1250 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1251 }
1252 diff -urNp linux-2.6.32.41/arch/ia64/mm/init.c linux-2.6.32.41/arch/ia64/mm/init.c
1253 --- linux-2.6.32.41/arch/ia64/mm/init.c 2011-03-27 14:31:47.000000000 -0400
1254 +++ linux-2.6.32.41/arch/ia64/mm/init.c 2011-04-17 15:56:45.000000000 -0400
1255 @@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1256 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1257 vma->vm_end = vma->vm_start + PAGE_SIZE;
1258 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1259 +
1260 +#ifdef CONFIG_PAX_PAGEEXEC
1261 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1262 + vma->vm_flags &= ~VM_EXEC;
1263 +
1264 +#ifdef CONFIG_PAX_MPROTECT
1265 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1266 + vma->vm_flags &= ~VM_MAYEXEC;
1267 +#endif
1268 +
1269 + }
1270 +#endif
1271 +
1272 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1273 down_write(&current->mm->mmap_sem);
1274 if (insert_vm_struct(current->mm, vma)) {
1275 diff -urNp linux-2.6.32.41/arch/ia64/sn/pci/pci_dma.c linux-2.6.32.41/arch/ia64/sn/pci/pci_dma.c
1276 --- linux-2.6.32.41/arch/ia64/sn/pci/pci_dma.c 2011-03-27 14:31:47.000000000 -0400
1277 +++ linux-2.6.32.41/arch/ia64/sn/pci/pci_dma.c 2011-04-17 15:56:45.000000000 -0400
1278 @@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *
1279 return ret;
1280 }
1281
1282 -static struct dma_map_ops sn_dma_ops = {
1283 +static const struct dma_map_ops sn_dma_ops = {
1284 .alloc_coherent = sn_dma_alloc_coherent,
1285 .free_coherent = sn_dma_free_coherent,
1286 .map_page = sn_dma_map_page,
1287 diff -urNp linux-2.6.32.41/arch/m32r/lib/usercopy.c linux-2.6.32.41/arch/m32r/lib/usercopy.c
1288 --- linux-2.6.32.41/arch/m32r/lib/usercopy.c 2011-03-27 14:31:47.000000000 -0400
1289 +++ linux-2.6.32.41/arch/m32r/lib/usercopy.c 2011-04-17 15:56:45.000000000 -0400
1290 @@ -14,6 +14,9 @@
1291 unsigned long
1292 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1293 {
1294 + if ((long)n < 0)
1295 + return n;
1296 +
1297 prefetch(from);
1298 if (access_ok(VERIFY_WRITE, to, n))
1299 __copy_user(to,from,n);
1300 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1301 unsigned long
1302 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1303 {
1304 + if ((long)n < 0)
1305 + return n;
1306 +
1307 prefetchw(to);
1308 if (access_ok(VERIFY_READ, from, n))
1309 __copy_user_zeroing(to,from,n);
1310 diff -urNp linux-2.6.32.41/arch/mips/alchemy/devboards/pm.c linux-2.6.32.41/arch/mips/alchemy/devboards/pm.c
1311 --- linux-2.6.32.41/arch/mips/alchemy/devboards/pm.c 2011-03-27 14:31:47.000000000 -0400
1312 +++ linux-2.6.32.41/arch/mips/alchemy/devboards/pm.c 2011-04-17 15:56:45.000000000 -0400
1313 @@ -78,7 +78,7 @@ static void db1x_pm_end(void)
1314
1315 }
1316
1317 -static struct platform_suspend_ops db1x_pm_ops = {
1318 +static const struct platform_suspend_ops db1x_pm_ops = {
1319 .valid = suspend_valid_only_mem,
1320 .begin = db1x_pm_begin,
1321 .enter = db1x_pm_enter,
1322 diff -urNp linux-2.6.32.41/arch/mips/include/asm/elf.h linux-2.6.32.41/arch/mips/include/asm/elf.h
1323 --- linux-2.6.32.41/arch/mips/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1324 +++ linux-2.6.32.41/arch/mips/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1325 @@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_str
1326 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1327 #endif
1328
1329 +#ifdef CONFIG_PAX_ASLR
1330 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1331 +
1332 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1333 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1334 +#endif
1335 +
1336 #endif /* _ASM_ELF_H */
1337 diff -urNp linux-2.6.32.41/arch/mips/include/asm/page.h linux-2.6.32.41/arch/mips/include/asm/page.h
1338 --- linux-2.6.32.41/arch/mips/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
1339 +++ linux-2.6.32.41/arch/mips/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
1340 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1341 #ifdef CONFIG_CPU_MIPS32
1342 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1343 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1344 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1345 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1346 #else
1347 typedef struct { unsigned long long pte; } pte_t;
1348 #define pte_val(x) ((x).pte)
1349 diff -urNp linux-2.6.32.41/arch/mips/include/asm/system.h linux-2.6.32.41/arch/mips/include/asm/system.h
1350 --- linux-2.6.32.41/arch/mips/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
1351 +++ linux-2.6.32.41/arch/mips/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
1352 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1353 */
1354 #define __ARCH_WANT_UNLOCKED_CTXSW
1355
1356 -extern unsigned long arch_align_stack(unsigned long sp);
1357 +#define arch_align_stack(x) ((x) & ~0xfUL)
1358
1359 #endif /* _ASM_SYSTEM_H */
1360 diff -urNp linux-2.6.32.41/arch/mips/kernel/binfmt_elfn32.c linux-2.6.32.41/arch/mips/kernel/binfmt_elfn32.c
1361 --- linux-2.6.32.41/arch/mips/kernel/binfmt_elfn32.c 2011-03-27 14:31:47.000000000 -0400
1362 +++ linux-2.6.32.41/arch/mips/kernel/binfmt_elfn32.c 2011-04-17 15:56:45.000000000 -0400
1363 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1364 #undef ELF_ET_DYN_BASE
1365 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1366
1367 +#ifdef CONFIG_PAX_ASLR
1368 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1369 +
1370 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1371 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1372 +#endif
1373 +
1374 #include <asm/processor.h>
1375 #include <linux/module.h>
1376 #include <linux/elfcore.h>
1377 diff -urNp linux-2.6.32.41/arch/mips/kernel/binfmt_elfo32.c linux-2.6.32.41/arch/mips/kernel/binfmt_elfo32.c
1378 --- linux-2.6.32.41/arch/mips/kernel/binfmt_elfo32.c 2011-03-27 14:31:47.000000000 -0400
1379 +++ linux-2.6.32.41/arch/mips/kernel/binfmt_elfo32.c 2011-04-17 15:56:45.000000000 -0400
1380 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1381 #undef ELF_ET_DYN_BASE
1382 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1383
1384 +#ifdef CONFIG_PAX_ASLR
1385 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1386 +
1387 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1388 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1389 +#endif
1390 +
1391 #include <asm/processor.h>
1392
1393 /*
1394 diff -urNp linux-2.6.32.41/arch/mips/kernel/kgdb.c linux-2.6.32.41/arch/mips/kernel/kgdb.c
1395 --- linux-2.6.32.41/arch/mips/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
1396 +++ linux-2.6.32.41/arch/mips/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
1397 @@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vecto
1398 return -1;
1399 }
1400
1401 +/* cannot be const */
1402 struct kgdb_arch arch_kgdb_ops;
1403
1404 /*
1405 diff -urNp linux-2.6.32.41/arch/mips/kernel/process.c linux-2.6.32.41/arch/mips/kernel/process.c
1406 --- linux-2.6.32.41/arch/mips/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
1407 +++ linux-2.6.32.41/arch/mips/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
1408 @@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_stru
1409 out:
1410 return pc;
1411 }
1412 -
1413 -/*
1414 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1415 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1416 - */
1417 -unsigned long arch_align_stack(unsigned long sp)
1418 -{
1419 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1420 - sp -= get_random_int() & ~PAGE_MASK;
1421 -
1422 - return sp & ALMASK;
1423 -}
1424 diff -urNp linux-2.6.32.41/arch/mips/kernel/syscall.c linux-2.6.32.41/arch/mips/kernel/syscall.c
1425 --- linux-2.6.32.41/arch/mips/kernel/syscall.c 2011-03-27 14:31:47.000000000 -0400
1426 +++ linux-2.6.32.41/arch/mips/kernel/syscall.c 2011-04-17 15:56:45.000000000 -0400
1427 @@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(str
1428 do_color_align = 0;
1429 if (filp || (flags & MAP_SHARED))
1430 do_color_align = 1;
1431 +
1432 +#ifdef CONFIG_PAX_RANDMMAP
1433 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1434 +#endif
1435 +
1436 if (addr) {
1437 if (do_color_align)
1438 addr = COLOUR_ALIGN(addr, pgoff);
1439 else
1440 addr = PAGE_ALIGN(addr);
1441 vmm = find_vma(current->mm, addr);
1442 - if (task_size - len >= addr &&
1443 - (!vmm || addr + len <= vmm->vm_start))
1444 + if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
1445 return addr;
1446 }
1447 - addr = TASK_UNMAPPED_BASE;
1448 + addr = current->mm->mmap_base;
1449 if (do_color_align)
1450 addr = COLOUR_ALIGN(addr, pgoff);
1451 else
1452 @@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(str
1453 /* At this point: (!vmm || addr < vmm->vm_end). */
1454 if (task_size - len < addr)
1455 return -ENOMEM;
1456 - if (!vmm || addr + len <= vmm->vm_start)
1457 + if (check_heap_stack_gap(vmm, addr, len))
1458 return addr;
1459 addr = vmm->vm_end;
1460 if (do_color_align)
1461 diff -urNp linux-2.6.32.41/arch/mips/mm/fault.c linux-2.6.32.41/arch/mips/mm/fault.c
1462 --- linux-2.6.32.41/arch/mips/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1463 +++ linux-2.6.32.41/arch/mips/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1464 @@ -26,6 +26,23 @@
1465 #include <asm/ptrace.h>
1466 #include <asm/highmem.h> /* For VMALLOC_END */
1467
1468 +#ifdef CONFIG_PAX_PAGEEXEC
1469 +void pax_report_insns(void *pc, void *sp)
1470 +{
1471 + unsigned long i;
1472 +
1473 + printk(KERN_ERR "PAX: bytes at PC: ");
1474 + for (i = 0; i < 5; i++) {
1475 + unsigned int c;
1476 + if (get_user(c, (unsigned int *)pc+i))
1477 + printk(KERN_CONT "???????? ");
1478 + else
1479 + printk(KERN_CONT "%08x ", c);
1480 + }
1481 + printk("\n");
1482 +}
1483 +#endif
1484 +
1485 /*
1486 * This routine handles page faults. It determines the address,
1487 * and the problem, and then passes it off to one of the appropriate
1488 diff -urNp linux-2.6.32.41/arch/parisc/include/asm/elf.h linux-2.6.32.41/arch/parisc/include/asm/elf.h
1489 --- linux-2.6.32.41/arch/parisc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1490 +++ linux-2.6.32.41/arch/parisc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1491 @@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration..
1492
1493 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1494
1495 +#ifdef CONFIG_PAX_ASLR
1496 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
1497 +
1498 +#define PAX_DELTA_MMAP_LEN 16
1499 +#define PAX_DELTA_STACK_LEN 16
1500 +#endif
1501 +
1502 /* This yields a mask that user programs can use to figure out what
1503 instruction set this CPU supports. This could be done in user space,
1504 but it's not easy, and we've already done it here. */
1505 diff -urNp linux-2.6.32.41/arch/parisc/include/asm/pgtable.h linux-2.6.32.41/arch/parisc/include/asm/pgtable.h
1506 --- linux-2.6.32.41/arch/parisc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1507 +++ linux-2.6.32.41/arch/parisc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1508 @@ -207,6 +207,17 @@
1509 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1510 #define PAGE_COPY PAGE_EXECREAD
1511 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1512 +
1513 +#ifdef CONFIG_PAX_PAGEEXEC
1514 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1515 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1516 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1517 +#else
1518 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1519 +# define PAGE_COPY_NOEXEC PAGE_COPY
1520 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1521 +#endif
1522 +
1523 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1524 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
1525 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
1526 diff -urNp linux-2.6.32.41/arch/parisc/kernel/module.c linux-2.6.32.41/arch/parisc/kernel/module.c
1527 --- linux-2.6.32.41/arch/parisc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1528 +++ linux-2.6.32.41/arch/parisc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1529 @@ -95,16 +95,38 @@
1530
1531 /* three functions to determine where in the module core
1532 * or init pieces the location is */
1533 +static inline int in_init_rx(struct module *me, void *loc)
1534 +{
1535 + return (loc >= me->module_init_rx &&
1536 + loc < (me->module_init_rx + me->init_size_rx));
1537 +}
1538 +
1539 +static inline int in_init_rw(struct module *me, void *loc)
1540 +{
1541 + return (loc >= me->module_init_rw &&
1542 + loc < (me->module_init_rw + me->init_size_rw));
1543 +}
1544 +
1545 static inline int in_init(struct module *me, void *loc)
1546 {
1547 - return (loc >= me->module_init &&
1548 - loc <= (me->module_init + me->init_size));
1549 + return in_init_rx(me, loc) || in_init_rw(me, loc);
1550 +}
1551 +
1552 +static inline int in_core_rx(struct module *me, void *loc)
1553 +{
1554 + return (loc >= me->module_core_rx &&
1555 + loc < (me->module_core_rx + me->core_size_rx));
1556 +}
1557 +
1558 +static inline int in_core_rw(struct module *me, void *loc)
1559 +{
1560 + return (loc >= me->module_core_rw &&
1561 + loc < (me->module_core_rw + me->core_size_rw));
1562 }
1563
1564 static inline int in_core(struct module *me, void *loc)
1565 {
1566 - return (loc >= me->module_core &&
1567 - loc <= (me->module_core + me->core_size));
1568 + return in_core_rx(me, loc) || in_core_rw(me, loc);
1569 }
1570
1571 static inline int in_local(struct module *me, void *loc)
1572 @@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_
1573 }
1574
1575 /* align things a bit */
1576 - me->core_size = ALIGN(me->core_size, 16);
1577 - me->arch.got_offset = me->core_size;
1578 - me->core_size += gots * sizeof(struct got_entry);
1579 -
1580 - me->core_size = ALIGN(me->core_size, 16);
1581 - me->arch.fdesc_offset = me->core_size;
1582 - me->core_size += fdescs * sizeof(Elf_Fdesc);
1583 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1584 + me->arch.got_offset = me->core_size_rw;
1585 + me->core_size_rw += gots * sizeof(struct got_entry);
1586 +
1587 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1588 + me->arch.fdesc_offset = me->core_size_rw;
1589 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1590
1591 me->arch.got_max = gots;
1592 me->arch.fdesc_max = fdescs;
1593 @@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module
1594
1595 BUG_ON(value == 0);
1596
1597 - got = me->module_core + me->arch.got_offset;
1598 + got = me->module_core_rw + me->arch.got_offset;
1599 for (i = 0; got[i].addr; i++)
1600 if (got[i].addr == value)
1601 goto out;
1602 @@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module
1603 #ifdef CONFIG_64BIT
1604 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1605 {
1606 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1607 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1608
1609 if (!value) {
1610 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1611 @@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module
1612
1613 /* Create new one */
1614 fdesc->addr = value;
1615 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1616 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1617 return (Elf_Addr)fdesc;
1618 }
1619 #endif /* CONFIG_64BIT */
1620 @@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
1621
1622 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1623 end = table + sechdrs[me->arch.unwind_section].sh_size;
1624 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1625 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1626
1627 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1628 me->arch.unwind_section, table, end, gp);
1629 diff -urNp linux-2.6.32.41/arch/parisc/kernel/sys_parisc.c linux-2.6.32.41/arch/parisc/kernel/sys_parisc.c
1630 --- linux-2.6.32.41/arch/parisc/kernel/sys_parisc.c 2011-03-27 14:31:47.000000000 -0400
1631 +++ linux-2.6.32.41/arch/parisc/kernel/sys_parisc.c 2011-04-17 15:56:45.000000000 -0400
1632 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1633 /* At this point: (!vma || addr < vma->vm_end). */
1634 if (TASK_SIZE - len < addr)
1635 return -ENOMEM;
1636 - if (!vma || addr + len <= vma->vm_start)
1637 + if (check_heap_stack_gap(vma, addr, len))
1638 return addr;
1639 addr = vma->vm_end;
1640 }
1641 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1642 /* At this point: (!vma || addr < vma->vm_end). */
1643 if (TASK_SIZE - len < addr)
1644 return -ENOMEM;
1645 - if (!vma || addr + len <= vma->vm_start)
1646 + if (check_heap_stack_gap(vma, addr, len))
1647 return addr;
1648 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1649 if (addr < vma->vm_end) /* handle wraparound */
1650 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1651 if (flags & MAP_FIXED)
1652 return addr;
1653 if (!addr)
1654 - addr = TASK_UNMAPPED_BASE;
1655 + addr = current->mm->mmap_base;
1656
1657 if (filp) {
1658 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1659 diff -urNp linux-2.6.32.41/arch/parisc/kernel/traps.c linux-2.6.32.41/arch/parisc/kernel/traps.c
1660 --- linux-2.6.32.41/arch/parisc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
1661 +++ linux-2.6.32.41/arch/parisc/kernel/traps.c 2011-04-17 15:56:45.000000000 -0400
1662 @@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1663
1664 down_read(&current->mm->mmap_sem);
1665 vma = find_vma(current->mm,regs->iaoq[0]);
1666 - if (vma && (regs->iaoq[0] >= vma->vm_start)
1667 - && (vma->vm_flags & VM_EXEC)) {
1668 -
1669 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1670 fault_address = regs->iaoq[0];
1671 fault_space = regs->iasq[0];
1672
1673 diff -urNp linux-2.6.32.41/arch/parisc/mm/fault.c linux-2.6.32.41/arch/parisc/mm/fault.c
1674 --- linux-2.6.32.41/arch/parisc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1675 +++ linux-2.6.32.41/arch/parisc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1676 @@ -15,6 +15,7 @@
1677 #include <linux/sched.h>
1678 #include <linux/interrupt.h>
1679 #include <linux/module.h>
1680 +#include <linux/unistd.h>
1681
1682 #include <asm/uaccess.h>
1683 #include <asm/traps.h>
1684 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1685 static unsigned long
1686 parisc_acctyp(unsigned long code, unsigned int inst)
1687 {
1688 - if (code == 6 || code == 16)
1689 + if (code == 6 || code == 7 || code == 16)
1690 return VM_EXEC;
1691
1692 switch (inst & 0xf0000000) {
1693 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1694 }
1695 #endif
1696
1697 +#ifdef CONFIG_PAX_PAGEEXEC
1698 +/*
1699 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1700 + *
1701 + * returns 1 when task should be killed
1702 + * 2 when rt_sigreturn trampoline was detected
1703 + * 3 when unpatched PLT trampoline was detected
1704 + */
1705 +static int pax_handle_fetch_fault(struct pt_regs *regs)
1706 +{
1707 +
1708 +#ifdef CONFIG_PAX_EMUPLT
1709 + int err;
1710 +
1711 + do { /* PaX: unpatched PLT emulation */
1712 + unsigned int bl, depwi;
1713 +
1714 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1715 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1716 +
1717 + if (err)
1718 + break;
1719 +
1720 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1721 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1722 +
1723 + err = get_user(ldw, (unsigned int *)addr);
1724 + err |= get_user(bv, (unsigned int *)(addr+4));
1725 + err |= get_user(ldw2, (unsigned int *)(addr+8));
1726 +
1727 + if (err)
1728 + break;
1729 +
1730 + if (ldw == 0x0E801096U &&
1731 + bv == 0xEAC0C000U &&
1732 + ldw2 == 0x0E881095U)
1733 + {
1734 + unsigned int resolver, map;
1735 +
1736 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1737 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1738 + if (err)
1739 + break;
1740 +
1741 + regs->gr[20] = instruction_pointer(regs)+8;
1742 + regs->gr[21] = map;
1743 + regs->gr[22] = resolver;
1744 + regs->iaoq[0] = resolver | 3UL;
1745 + regs->iaoq[1] = regs->iaoq[0] + 4;
1746 + return 3;
1747 + }
1748 + }
1749 + } while (0);
1750 +#endif
1751 +
1752 +#ifdef CONFIG_PAX_EMUTRAMP
1753 +
1754 +#ifndef CONFIG_PAX_EMUSIGRT
1755 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1756 + return 1;
1757 +#endif
1758 +
1759 + do { /* PaX: rt_sigreturn emulation */
1760 + unsigned int ldi1, ldi2, bel, nop;
1761 +
1762 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1763 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1764 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1765 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1766 +
1767 + if (err)
1768 + break;
1769 +
1770 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1771 + ldi2 == 0x3414015AU &&
1772 + bel == 0xE4008200U &&
1773 + nop == 0x08000240U)
1774 + {
1775 + regs->gr[25] = (ldi1 & 2) >> 1;
1776 + regs->gr[20] = __NR_rt_sigreturn;
1777 + regs->gr[31] = regs->iaoq[1] + 16;
1778 + regs->sr[0] = regs->iasq[1];
1779 + regs->iaoq[0] = 0x100UL;
1780 + regs->iaoq[1] = regs->iaoq[0] + 4;
1781 + regs->iasq[0] = regs->sr[2];
1782 + regs->iasq[1] = regs->sr[2];
1783 + return 2;
1784 + }
1785 + } while (0);
1786 +#endif
1787 +
1788 + return 1;
1789 +}
1790 +
1791 +void pax_report_insns(void *pc, void *sp)
1792 +{
1793 + unsigned long i;
1794 +
1795 + printk(KERN_ERR "PAX: bytes at PC: ");
1796 + for (i = 0; i < 5; i++) {
1797 + unsigned int c;
1798 + if (get_user(c, (unsigned int *)pc+i))
1799 + printk(KERN_CONT "???????? ");
1800 + else
1801 + printk(KERN_CONT "%08x ", c);
1802 + }
1803 + printk("\n");
1804 +}
1805 +#endif
1806 +
1807 int fixup_exception(struct pt_regs *regs)
1808 {
1809 const struct exception_table_entry *fix;
1810 @@ -192,8 +303,33 @@ good_area:
1811
1812 acc_type = parisc_acctyp(code,regs->iir);
1813
1814 - if ((vma->vm_flags & acc_type) != acc_type)
1815 + if ((vma->vm_flags & acc_type) != acc_type) {
1816 +
1817 +#ifdef CONFIG_PAX_PAGEEXEC
1818 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1819 + (address & ~3UL) == instruction_pointer(regs))
1820 + {
1821 + up_read(&mm->mmap_sem);
1822 + switch (pax_handle_fetch_fault(regs)) {
1823 +
1824 +#ifdef CONFIG_PAX_EMUPLT
1825 + case 3:
1826 + return;
1827 +#endif
1828 +
1829 +#ifdef CONFIG_PAX_EMUTRAMP
1830 + case 2:
1831 + return;
1832 +#endif
1833 +
1834 + }
1835 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1836 + do_group_exit(SIGKILL);
1837 + }
1838 +#endif
1839 +
1840 goto bad_area;
1841 + }
1842
1843 /*
1844 * If for any reason at all we couldn't handle the fault, make
1845 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/device.h linux-2.6.32.41/arch/powerpc/include/asm/device.h
1846 --- linux-2.6.32.41/arch/powerpc/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
1847 +++ linux-2.6.32.41/arch/powerpc/include/asm/device.h 2011-04-17 15:56:45.000000000 -0400
1848 @@ -14,7 +14,7 @@ struct dev_archdata {
1849 struct device_node *of_node;
1850
1851 /* DMA operations on that device */
1852 - struct dma_map_ops *dma_ops;
1853 + const struct dma_map_ops *dma_ops;
1854
1855 /*
1856 * When an iommu is in use, dma_data is used as a ptr to the base of the
1857 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/dma-mapping.h linux-2.6.32.41/arch/powerpc/include/asm/dma-mapping.h
1858 --- linux-2.6.32.41/arch/powerpc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
1859 +++ linux-2.6.32.41/arch/powerpc/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
1860 @@ -69,9 +69,9 @@ static inline unsigned long device_to_ma
1861 #ifdef CONFIG_PPC64
1862 extern struct dma_map_ops dma_iommu_ops;
1863 #endif
1864 -extern struct dma_map_ops dma_direct_ops;
1865 +extern const struct dma_map_ops dma_direct_ops;
1866
1867 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
1868 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
1869 {
1870 /* We don't handle the NULL dev case for ISA for now. We could
1871 * do it via an out of line call but it is not needed for now. The
1872 @@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dm
1873 return dev->archdata.dma_ops;
1874 }
1875
1876 -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
1877 +static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
1878 {
1879 dev->archdata.dma_ops = ops;
1880 }
1881 @@ -118,7 +118,7 @@ static inline void set_dma_offset(struct
1882
1883 static inline int dma_supported(struct device *dev, u64 mask)
1884 {
1885 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
1886 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
1887
1888 if (unlikely(dma_ops == NULL))
1889 return 0;
1890 @@ -132,7 +132,7 @@ static inline int dma_supported(struct d
1891
1892 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
1893 {
1894 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
1895 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
1896
1897 if (unlikely(dma_ops == NULL))
1898 return -EIO;
1899 @@ -147,7 +147,7 @@ static inline int dma_set_mask(struct de
1900 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1901 dma_addr_t *dma_handle, gfp_t flag)
1902 {
1903 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
1904 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
1905 void *cpu_addr;
1906
1907 BUG_ON(!dma_ops);
1908 @@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(s
1909 static inline void dma_free_coherent(struct device *dev, size_t size,
1910 void *cpu_addr, dma_addr_t dma_handle)
1911 {
1912 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
1913 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
1914
1915 BUG_ON(!dma_ops);
1916
1917 @@ -173,7 +173,7 @@ static inline void dma_free_coherent(str
1918
1919 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1920 {
1921 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
1922 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
1923
1924 if (dma_ops->mapping_error)
1925 return dma_ops->mapping_error(dev, dma_addr);
1926 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/elf.h linux-2.6.32.41/arch/powerpc/include/asm/elf.h
1927 --- linux-2.6.32.41/arch/powerpc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1928 +++ linux-2.6.32.41/arch/powerpc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1929 @@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
1930 the loader. We need to make sure that it is out of the way of the program
1931 that it will "exec", and that there is sufficient room for the brk. */
1932
1933 -extern unsigned long randomize_et_dyn(unsigned long base);
1934 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
1935 +#define ELF_ET_DYN_BASE (0x20000000)
1936 +
1937 +#ifdef CONFIG_PAX_ASLR
1938 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
1939 +
1940 +#ifdef __powerpc64__
1941 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
1942 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
1943 +#else
1944 +#define PAX_DELTA_MMAP_LEN 15
1945 +#define PAX_DELTA_STACK_LEN 15
1946 +#endif
1947 +#endif
1948
1949 /*
1950 * Our registers are always unsigned longs, whether we're a 32 bit
1951 @@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(s
1952 (0x7ff >> (PAGE_SHIFT - 12)) : \
1953 (0x3ffff >> (PAGE_SHIFT - 12)))
1954
1955 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1956 -#define arch_randomize_brk arch_randomize_brk
1957 -
1958 #endif /* __KERNEL__ */
1959
1960 /*
1961 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/iommu.h linux-2.6.32.41/arch/powerpc/include/asm/iommu.h
1962 --- linux-2.6.32.41/arch/powerpc/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
1963 +++ linux-2.6.32.41/arch/powerpc/include/asm/iommu.h 2011-04-17 15:56:45.000000000 -0400
1964 @@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(voi
1965 extern void iommu_init_early_dart(void);
1966 extern void iommu_init_early_pasemi(void);
1967
1968 +/* dma-iommu.c */
1969 +extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
1970 +
1971 #ifdef CONFIG_PCI
1972 extern void pci_iommu_init(void);
1973 extern void pci_direct_iommu_init(void);
1974 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/kmap_types.h linux-2.6.32.41/arch/powerpc/include/asm/kmap_types.h
1975 --- linux-2.6.32.41/arch/powerpc/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
1976 +++ linux-2.6.32.41/arch/powerpc/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
1977 @@ -26,6 +26,7 @@ enum km_type {
1978 KM_SOFTIRQ1,
1979 KM_PPC_SYNC_PAGE,
1980 KM_PPC_SYNC_ICACHE,
1981 + KM_CLEARPAGE,
1982 KM_TYPE_NR
1983 };
1984
1985 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/page_64.h linux-2.6.32.41/arch/powerpc/include/asm/page_64.h
1986 --- linux-2.6.32.41/arch/powerpc/include/asm/page_64.h 2011-03-27 14:31:47.000000000 -0400
1987 +++ linux-2.6.32.41/arch/powerpc/include/asm/page_64.h 2011-04-17 15:56:45.000000000 -0400
1988 @@ -180,15 +180,18 @@ do { \
1989 * stack by default, so in the absense of a PT_GNU_STACK program header
1990 * we turn execute permission off.
1991 */
1992 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1993 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1994 +#define VM_STACK_DEFAULT_FLAGS32 \
1995 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1996 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1997
1998 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1999 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2000
2001 +#ifndef CONFIG_PAX_PAGEEXEC
2002 #define VM_STACK_DEFAULT_FLAGS \
2003 (test_thread_flag(TIF_32BIT) ? \
2004 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2005 +#endif
2006
2007 #include <asm-generic/getorder.h>
2008
2009 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/page.h linux-2.6.32.41/arch/powerpc/include/asm/page.h
2010 --- linux-2.6.32.41/arch/powerpc/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
2011 +++ linux-2.6.32.41/arch/powerpc/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
2012 @@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
2013 * and needs to be executable. This means the whole heap ends
2014 * up being executable.
2015 */
2016 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2017 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2018 +#define VM_DATA_DEFAULT_FLAGS32 \
2019 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2020 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2021
2022 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2023 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2024 @@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
2025 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2026 #endif
2027
2028 +#define ktla_ktva(addr) (addr)
2029 +#define ktva_ktla(addr) (addr)
2030 +
2031 #ifndef __ASSEMBLY__
2032
2033 #undef STRICT_MM_TYPECHECKS
2034 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/pci.h linux-2.6.32.41/arch/powerpc/include/asm/pci.h
2035 --- linux-2.6.32.41/arch/powerpc/include/asm/pci.h 2011-03-27 14:31:47.000000000 -0400
2036 +++ linux-2.6.32.41/arch/powerpc/include/asm/pci.h 2011-04-17 15:56:45.000000000 -0400
2037 @@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq
2038 }
2039
2040 #ifdef CONFIG_PCI
2041 -extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
2042 -extern struct dma_map_ops *get_pci_dma_ops(void);
2043 +extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
2044 +extern const struct dma_map_ops *get_pci_dma_ops(void);
2045 #else /* CONFIG_PCI */
2046 #define set_pci_dma_ops(d)
2047 #define get_pci_dma_ops() NULL
2048 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/pgtable.h linux-2.6.32.41/arch/powerpc/include/asm/pgtable.h
2049 --- linux-2.6.32.41/arch/powerpc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
2050 +++ linux-2.6.32.41/arch/powerpc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
2051 @@ -2,6 +2,7 @@
2052 #define _ASM_POWERPC_PGTABLE_H
2053 #ifdef __KERNEL__
2054
2055 +#include <linux/const.h>
2056 #ifndef __ASSEMBLY__
2057 #include <asm/processor.h> /* For TASK_SIZE */
2058 #include <asm/mmu.h>
2059 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/pte-hash32.h linux-2.6.32.41/arch/powerpc/include/asm/pte-hash32.h
2060 --- linux-2.6.32.41/arch/powerpc/include/asm/pte-hash32.h 2011-03-27 14:31:47.000000000 -0400
2061 +++ linux-2.6.32.41/arch/powerpc/include/asm/pte-hash32.h 2011-04-17 15:56:45.000000000 -0400
2062 @@ -21,6 +21,7 @@
2063 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2064 #define _PAGE_USER 0x004 /* usermode access allowed */
2065 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2066 +#define _PAGE_EXEC _PAGE_GUARDED
2067 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2068 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2069 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2070 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/reg.h linux-2.6.32.41/arch/powerpc/include/asm/reg.h
2071 --- linux-2.6.32.41/arch/powerpc/include/asm/reg.h 2011-03-27 14:31:47.000000000 -0400
2072 +++ linux-2.6.32.41/arch/powerpc/include/asm/reg.h 2011-04-17 15:56:45.000000000 -0400
2073 @@ -191,6 +191,7 @@
2074 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2075 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2076 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2077 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2078 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2079 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2080 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2081 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/swiotlb.h linux-2.6.32.41/arch/powerpc/include/asm/swiotlb.h
2082 --- linux-2.6.32.41/arch/powerpc/include/asm/swiotlb.h 2011-03-27 14:31:47.000000000 -0400
2083 +++ linux-2.6.32.41/arch/powerpc/include/asm/swiotlb.h 2011-04-17 15:56:45.000000000 -0400
2084 @@ -13,7 +13,7 @@
2085
2086 #include <linux/swiotlb.h>
2087
2088 -extern struct dma_map_ops swiotlb_dma_ops;
2089 +extern const struct dma_map_ops swiotlb_dma_ops;
2090
2091 static inline void dma_mark_clean(void *addr, size_t size) {}
2092
2093 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/system.h linux-2.6.32.41/arch/powerpc/include/asm/system.h
2094 --- linux-2.6.32.41/arch/powerpc/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
2095 +++ linux-2.6.32.41/arch/powerpc/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
2096 @@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
2097 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2098 #endif
2099
2100 -extern unsigned long arch_align_stack(unsigned long sp);
2101 +#define arch_align_stack(x) ((x) & ~0xfUL)
2102
2103 /* Used in very early kernel initialization. */
2104 extern unsigned long reloc_offset(void);
2105 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/uaccess.h linux-2.6.32.41/arch/powerpc/include/asm/uaccess.h
2106 --- linux-2.6.32.41/arch/powerpc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
2107 +++ linux-2.6.32.41/arch/powerpc/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
2108 @@ -13,6 +13,8 @@
2109 #define VERIFY_READ 0
2110 #define VERIFY_WRITE 1
2111
2112 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
2113 +
2114 /*
2115 * The fs value determines whether argument validity checking should be
2116 * performed or not. If get_fs() == USER_DS, checking is performed, with
2117 @@ -327,52 +329,6 @@ do { \
2118 extern unsigned long __copy_tofrom_user(void __user *to,
2119 const void __user *from, unsigned long size);
2120
2121 -#ifndef __powerpc64__
2122 -
2123 -static inline unsigned long copy_from_user(void *to,
2124 - const void __user *from, unsigned long n)
2125 -{
2126 - unsigned long over;
2127 -
2128 - if (access_ok(VERIFY_READ, from, n))
2129 - return __copy_tofrom_user((__force void __user *)to, from, n);
2130 - if ((unsigned long)from < TASK_SIZE) {
2131 - over = (unsigned long)from + n - TASK_SIZE;
2132 - return __copy_tofrom_user((__force void __user *)to, from,
2133 - n - over) + over;
2134 - }
2135 - return n;
2136 -}
2137 -
2138 -static inline unsigned long copy_to_user(void __user *to,
2139 - const void *from, unsigned long n)
2140 -{
2141 - unsigned long over;
2142 -
2143 - if (access_ok(VERIFY_WRITE, to, n))
2144 - return __copy_tofrom_user(to, (__force void __user *)from, n);
2145 - if ((unsigned long)to < TASK_SIZE) {
2146 - over = (unsigned long)to + n - TASK_SIZE;
2147 - return __copy_tofrom_user(to, (__force void __user *)from,
2148 - n - over) + over;
2149 - }
2150 - return n;
2151 -}
2152 -
2153 -#else /* __powerpc64__ */
2154 -
2155 -#define __copy_in_user(to, from, size) \
2156 - __copy_tofrom_user((to), (from), (size))
2157 -
2158 -extern unsigned long copy_from_user(void *to, const void __user *from,
2159 - unsigned long n);
2160 -extern unsigned long copy_to_user(void __user *to, const void *from,
2161 - unsigned long n);
2162 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
2163 - unsigned long n);
2164 -
2165 -#endif /* __powerpc64__ */
2166 -
2167 static inline unsigned long __copy_from_user_inatomic(void *to,
2168 const void __user *from, unsigned long n)
2169 {
2170 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
2171 if (ret == 0)
2172 return 0;
2173 }
2174 +
2175 + if (!__builtin_constant_p(n))
2176 + check_object_size(to, n, false);
2177 +
2178 return __copy_tofrom_user((__force void __user *)to, from, n);
2179 }
2180
2181 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
2182 if (ret == 0)
2183 return 0;
2184 }
2185 +
2186 + if (!__builtin_constant_p(n))
2187 + check_object_size(from, n, true);
2188 +
2189 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2190 }
2191
2192 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
2193 return __copy_to_user_inatomic(to, from, size);
2194 }
2195
2196 +#ifndef __powerpc64__
2197 +
2198 +static inline unsigned long __must_check copy_from_user(void *to,
2199 + const void __user *from, unsigned long n)
2200 +{
2201 + unsigned long over;
2202 +
2203 + if ((long)n < 0)
2204 + return n;
2205 +
2206 + if (access_ok(VERIFY_READ, from, n)) {
2207 + if (!__builtin_constant_p(n))
2208 + check_object_size(to, n, false);
2209 + return __copy_tofrom_user((__force void __user *)to, from, n);
2210 + }
2211 + if ((unsigned long)from < TASK_SIZE) {
2212 + over = (unsigned long)from + n - TASK_SIZE;
2213 + if (!__builtin_constant_p(n - over))
2214 + check_object_size(to, n - over, false);
2215 + return __copy_tofrom_user((__force void __user *)to, from,
2216 + n - over) + over;
2217 + }
2218 + return n;
2219 +}
2220 +
2221 +static inline unsigned long __must_check copy_to_user(void __user *to,
2222 + const void *from, unsigned long n)
2223 +{
2224 + unsigned long over;
2225 +
2226 + if ((long)n < 0)
2227 + return n;
2228 +
2229 + if (access_ok(VERIFY_WRITE, to, n)) {
2230 + if (!__builtin_constant_p(n))
2231 + check_object_size(from, n, true);
2232 + return __copy_tofrom_user(to, (__force void __user *)from, n);
2233 + }
2234 + if ((unsigned long)to < TASK_SIZE) {
2235 + over = (unsigned long)to + n - TASK_SIZE;
2236 + if (!__builtin_constant_p(n))
2237 + check_object_size(from, n - over, true);
2238 + return __copy_tofrom_user(to, (__force void __user *)from,
2239 + n - over) + over;
2240 + }
2241 + return n;
2242 +}
2243 +
2244 +#else /* __powerpc64__ */
2245 +
2246 +#define __copy_in_user(to, from, size) \
2247 + __copy_tofrom_user((to), (from), (size))
2248 +
2249 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2250 +{
2251 + if ((long)n < 0 || n > INT_MAX)
2252 + return n;
2253 +
2254 + if (!__builtin_constant_p(n))
2255 + check_object_size(to, n, false);
2256 +
2257 + if (likely(access_ok(VERIFY_READ, from, n)))
2258 + n = __copy_from_user(to, from, n);
2259 + else
2260 + memset(to, 0, n);
2261 + return n;
2262 +}
2263 +
2264 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2265 +{
2266 + if ((long)n < 0 || n > INT_MAX)
2267 + return n;
2268 +
2269 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
2270 + if (!__builtin_constant_p(n))
2271 + check_object_size(from, n, true);
2272 + n = __copy_to_user(to, from, n);
2273 + }
2274 + return n;
2275 +}
2276 +
2277 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
2278 + unsigned long n);
2279 +
2280 +#endif /* __powerpc64__ */
2281 +
2282 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2283
2284 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2285 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/cacheinfo.c linux-2.6.32.41/arch/powerpc/kernel/cacheinfo.c
2286 --- linux-2.6.32.41/arch/powerpc/kernel/cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
2287 +++ linux-2.6.32.41/arch/powerpc/kernel/cacheinfo.c 2011-04-17 15:56:45.000000000 -0400
2288 @@ -642,7 +642,7 @@ static struct kobj_attribute *cache_inde
2289 &cache_assoc_attr,
2290 };
2291
2292 -static struct sysfs_ops cache_index_ops = {
2293 +static const struct sysfs_ops cache_index_ops = {
2294 .show = cache_index_show,
2295 };
2296
2297 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/dma.c linux-2.6.32.41/arch/powerpc/kernel/dma.c
2298 --- linux-2.6.32.41/arch/powerpc/kernel/dma.c 2011-03-27 14:31:47.000000000 -0400
2299 +++ linux-2.6.32.41/arch/powerpc/kernel/dma.c 2011-04-17 15:56:45.000000000 -0400
2300 @@ -134,7 +134,7 @@ static inline void dma_direct_sync_singl
2301 }
2302 #endif
2303
2304 -struct dma_map_ops dma_direct_ops = {
2305 +const struct dma_map_ops dma_direct_ops = {
2306 .alloc_coherent = dma_direct_alloc_coherent,
2307 .free_coherent = dma_direct_free_coherent,
2308 .map_sg = dma_direct_map_sg,
2309 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/dma-iommu.c linux-2.6.32.41/arch/powerpc/kernel/dma-iommu.c
2310 --- linux-2.6.32.41/arch/powerpc/kernel/dma-iommu.c 2011-03-27 14:31:47.000000000 -0400
2311 +++ linux-2.6.32.41/arch/powerpc/kernel/dma-iommu.c 2011-04-17 15:56:45.000000000 -0400
2312 @@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct de
2313 }
2314
2315 /* We support DMA to/from any memory page via the iommu */
2316 -static int dma_iommu_dma_supported(struct device *dev, u64 mask)
2317 +int dma_iommu_dma_supported(struct device *dev, u64 mask)
2318 {
2319 struct iommu_table *tbl = get_iommu_table_base(dev);
2320
2321 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/dma-swiotlb.c linux-2.6.32.41/arch/powerpc/kernel/dma-swiotlb.c
2322 --- linux-2.6.32.41/arch/powerpc/kernel/dma-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
2323 +++ linux-2.6.32.41/arch/powerpc/kernel/dma-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
2324 @@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
2325 * map_page, and unmap_page on highmem, use normal dma_ops
2326 * for everything else.
2327 */
2328 -struct dma_map_ops swiotlb_dma_ops = {
2329 +const struct dma_map_ops swiotlb_dma_ops = {
2330 .alloc_coherent = dma_direct_alloc_coherent,
2331 .free_coherent = dma_direct_free_coherent,
2332 .map_sg = swiotlb_map_sg_attrs,
2333 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/exceptions-64e.S linux-2.6.32.41/arch/powerpc/kernel/exceptions-64e.S
2334 --- linux-2.6.32.41/arch/powerpc/kernel/exceptions-64e.S 2011-03-27 14:31:47.000000000 -0400
2335 +++ linux-2.6.32.41/arch/powerpc/kernel/exceptions-64e.S 2011-04-17 15:56:45.000000000 -0400
2336 @@ -455,6 +455,7 @@ storage_fault_common:
2337 std r14,_DAR(r1)
2338 std r15,_DSISR(r1)
2339 addi r3,r1,STACK_FRAME_OVERHEAD
2340 + bl .save_nvgprs
2341 mr r4,r14
2342 mr r5,r15
2343 ld r14,PACA_EXGEN+EX_R14(r13)
2344 @@ -464,8 +465,7 @@ storage_fault_common:
2345 cmpdi r3,0
2346 bne- 1f
2347 b .ret_from_except_lite
2348 -1: bl .save_nvgprs
2349 - mr r5,r3
2350 +1: mr r5,r3
2351 addi r3,r1,STACK_FRAME_OVERHEAD
2352 ld r4,_DAR(r1)
2353 bl .bad_page_fault
2354 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/exceptions-64s.S linux-2.6.32.41/arch/powerpc/kernel/exceptions-64s.S
2355 --- linux-2.6.32.41/arch/powerpc/kernel/exceptions-64s.S 2011-03-27 14:31:47.000000000 -0400
2356 +++ linux-2.6.32.41/arch/powerpc/kernel/exceptions-64s.S 2011-04-17 15:56:45.000000000 -0400
2357 @@ -818,10 +818,10 @@ handle_page_fault:
2358 11: ld r4,_DAR(r1)
2359 ld r5,_DSISR(r1)
2360 addi r3,r1,STACK_FRAME_OVERHEAD
2361 + bl .save_nvgprs
2362 bl .do_page_fault
2363 cmpdi r3,0
2364 beq+ 13f
2365 - bl .save_nvgprs
2366 mr r5,r3
2367 addi r3,r1,STACK_FRAME_OVERHEAD
2368 lwz r4,_DAR(r1)
2369 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/ibmebus.c linux-2.6.32.41/arch/powerpc/kernel/ibmebus.c
2370 --- linux-2.6.32.41/arch/powerpc/kernel/ibmebus.c 2011-03-27 14:31:47.000000000 -0400
2371 +++ linux-2.6.32.41/arch/powerpc/kernel/ibmebus.c 2011-04-17 15:56:45.000000000 -0400
2372 @@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct
2373 return 1;
2374 }
2375
2376 -static struct dma_map_ops ibmebus_dma_ops = {
2377 +static const struct dma_map_ops ibmebus_dma_ops = {
2378 .alloc_coherent = ibmebus_alloc_coherent,
2379 .free_coherent = ibmebus_free_coherent,
2380 .map_sg = ibmebus_map_sg,
2381 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/kgdb.c linux-2.6.32.41/arch/powerpc/kernel/kgdb.c
2382 --- linux-2.6.32.41/arch/powerpc/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
2383 +++ linux-2.6.32.41/arch/powerpc/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
2384 @@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct
2385 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
2386 return 0;
2387
2388 - if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2389 + if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2390 regs->nip += 4;
2391
2392 return 1;
2393 @@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vecto
2394 /*
2395 * Global data
2396 */
2397 -struct kgdb_arch arch_kgdb_ops = {
2398 +const struct kgdb_arch arch_kgdb_ops = {
2399 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
2400 };
2401
2402 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/module_32.c linux-2.6.32.41/arch/powerpc/kernel/module_32.c
2403 --- linux-2.6.32.41/arch/powerpc/kernel/module_32.c 2011-03-27 14:31:47.000000000 -0400
2404 +++ linux-2.6.32.41/arch/powerpc/kernel/module_32.c 2011-04-17 15:56:45.000000000 -0400
2405 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2406 me->arch.core_plt_section = i;
2407 }
2408 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2409 - printk("Module doesn't contain .plt or .init.plt sections.\n");
2410 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2411 return -ENOEXEC;
2412 }
2413
2414 @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2415
2416 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2417 /* Init, or core PLT? */
2418 - if (location >= mod->module_core
2419 - && location < mod->module_core + mod->core_size)
2420 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2421 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2422 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2423 - else
2424 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2425 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2426 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2427 + else {
2428 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2429 + return ~0UL;
2430 + }
2431
2432 /* Find this entry, or if that fails, the next avail. entry */
2433 while (entry->jump[0]) {
2434 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/module.c linux-2.6.32.41/arch/powerpc/kernel/module.c
2435 --- linux-2.6.32.41/arch/powerpc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
2436 +++ linux-2.6.32.41/arch/powerpc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
2437 @@ -31,11 +31,24 @@
2438
2439 LIST_HEAD(module_bug_list);
2440
2441 +#ifdef CONFIG_PAX_KERNEXEC
2442 void *module_alloc(unsigned long size)
2443 {
2444 if (size == 0)
2445 return NULL;
2446
2447 + return vmalloc(size);
2448 +}
2449 +
2450 +void *module_alloc_exec(unsigned long size)
2451 +#else
2452 +void *module_alloc(unsigned long size)
2453 +#endif
2454 +
2455 +{
2456 + if (size == 0)
2457 + return NULL;
2458 +
2459 return vmalloc_exec(size);
2460 }
2461
2462 @@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2463 vfree(module_region);
2464 }
2465
2466 +#ifdef CONFIG_PAX_KERNEXEC
2467 +void module_free_exec(struct module *mod, void *module_region)
2468 +{
2469 + module_free(mod, module_region);
2470 +}
2471 +#endif
2472 +
2473 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2474 const Elf_Shdr *sechdrs,
2475 const char *name)
2476 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/pci-common.c linux-2.6.32.41/arch/powerpc/kernel/pci-common.c
2477 --- linux-2.6.32.41/arch/powerpc/kernel/pci-common.c 2011-03-27 14:31:47.000000000 -0400
2478 +++ linux-2.6.32.41/arch/powerpc/kernel/pci-common.c 2011-04-17 15:56:45.000000000 -0400
2479 @@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
2480 unsigned int ppc_pci_flags = 0;
2481
2482
2483 -static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2484 +static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2485
2486 -void set_pci_dma_ops(struct dma_map_ops *dma_ops)
2487 +void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
2488 {
2489 pci_dma_ops = dma_ops;
2490 }
2491
2492 -struct dma_map_ops *get_pci_dma_ops(void)
2493 +const struct dma_map_ops *get_pci_dma_ops(void)
2494 {
2495 return pci_dma_ops;
2496 }
2497 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/process.c linux-2.6.32.41/arch/powerpc/kernel/process.c
2498 --- linux-2.6.32.41/arch/powerpc/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
2499 +++ linux-2.6.32.41/arch/powerpc/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
2500 @@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
2501 * Lookup NIP late so we have the best change of getting the
2502 * above info out without failing
2503 */
2504 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2505 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2506 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2507 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2508 #endif
2509 show_stack(current, (unsigned long *) regs->gpr[1]);
2510 if (!user_mode(regs))
2511 @@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk,
2512 newsp = stack[0];
2513 ip = stack[STACK_FRAME_LR_SAVE];
2514 if (!firstframe || ip != lr) {
2515 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2516 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2517 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2518 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2519 - printk(" (%pS)",
2520 + printk(" (%pA)",
2521 (void *)current->ret_stack[curr_frame].ret);
2522 curr_frame--;
2523 }
2524 @@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk,
2525 struct pt_regs *regs = (struct pt_regs *)
2526 (sp + STACK_FRAME_OVERHEAD);
2527 lr = regs->link;
2528 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
2529 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
2530 regs->trap, (void *)regs->nip, (void *)lr);
2531 firstframe = 1;
2532 }
2533 @@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
2534 }
2535
2536 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2537 -
2538 -unsigned long arch_align_stack(unsigned long sp)
2539 -{
2540 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2541 - sp -= get_random_int() & ~PAGE_MASK;
2542 - return sp & ~0xf;
2543 -}
2544 -
2545 -static inline unsigned long brk_rnd(void)
2546 -{
2547 - unsigned long rnd = 0;
2548 -
2549 - /* 8MB for 32bit, 1GB for 64bit */
2550 - if (is_32bit_task())
2551 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2552 - else
2553 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2554 -
2555 - return rnd << PAGE_SHIFT;
2556 -}
2557 -
2558 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2559 -{
2560 - unsigned long base = mm->brk;
2561 - unsigned long ret;
2562 -
2563 -#ifdef CONFIG_PPC_STD_MMU_64
2564 - /*
2565 - * If we are using 1TB segments and we are allowed to randomise
2566 - * the heap, we can put it above 1TB so it is backed by a 1TB
2567 - * segment. Otherwise the heap will be in the bottom 1TB
2568 - * which always uses 256MB segments and this may result in a
2569 - * performance penalty.
2570 - */
2571 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2572 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2573 -#endif
2574 -
2575 - ret = PAGE_ALIGN(base + brk_rnd());
2576 -
2577 - if (ret < mm->brk)
2578 - return mm->brk;
2579 -
2580 - return ret;
2581 -}
2582 -
2583 -unsigned long randomize_et_dyn(unsigned long base)
2584 -{
2585 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2586 -
2587 - if (ret < base)
2588 - return base;
2589 -
2590 - return ret;
2591 -}
2592 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/signal_32.c linux-2.6.32.41/arch/powerpc/kernel/signal_32.c
2593 --- linux-2.6.32.41/arch/powerpc/kernel/signal_32.c 2011-03-27 14:31:47.000000000 -0400
2594 +++ linux-2.6.32.41/arch/powerpc/kernel/signal_32.c 2011-04-17 15:56:45.000000000 -0400
2595 @@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig
2596 /* Save user registers on the stack */
2597 frame = &rt_sf->uc.uc_mcontext;
2598 addr = frame;
2599 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2600 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2601 if (save_user_regs(regs, frame, 0, 1))
2602 goto badframe;
2603 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2604 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/signal_64.c linux-2.6.32.41/arch/powerpc/kernel/signal_64.c
2605 --- linux-2.6.32.41/arch/powerpc/kernel/signal_64.c 2011-03-27 14:31:47.000000000 -0400
2606 +++ linux-2.6.32.41/arch/powerpc/kernel/signal_64.c 2011-04-17 15:56:45.000000000 -0400
2607 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct
2608 current->thread.fpscr.val = 0;
2609
2610 /* Set up to return from userspace. */
2611 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2612 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2613 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2614 } else {
2615 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2616 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/sys_ppc32.c linux-2.6.32.41/arch/powerpc/kernel/sys_ppc32.c
2617 --- linux-2.6.32.41/arch/powerpc/kernel/sys_ppc32.c 2011-03-27 14:31:47.000000000 -0400
2618 +++ linux-2.6.32.41/arch/powerpc/kernel/sys_ppc32.c 2011-04-17 15:56:45.000000000 -0400
2619 @@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct
2620 if (oldlenp) {
2621 if (!error) {
2622 if (get_user(oldlen, oldlenp) ||
2623 - put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
2624 + put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
2625 + copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
2626 error = -EFAULT;
2627 }
2628 - copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
2629 }
2630 return error;
2631 }
2632 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/vdso.c linux-2.6.32.41/arch/powerpc/kernel/vdso.c
2633 --- linux-2.6.32.41/arch/powerpc/kernel/vdso.c 2011-03-27 14:31:47.000000000 -0400
2634 +++ linux-2.6.32.41/arch/powerpc/kernel/vdso.c 2011-04-17 15:56:45.000000000 -0400
2635 @@ -36,6 +36,7 @@
2636 #include <asm/firmware.h>
2637 #include <asm/vdso.h>
2638 #include <asm/vdso_datapage.h>
2639 +#include <asm/mman.h>
2640
2641 #include "setup.h"
2642
2643 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2644 vdso_base = VDSO32_MBASE;
2645 #endif
2646
2647 - current->mm->context.vdso_base = 0;
2648 + current->mm->context.vdso_base = ~0UL;
2649
2650 /* vDSO has a problem and was disabled, just don't "enable" it for the
2651 * process
2652 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2653 vdso_base = get_unmapped_area(NULL, vdso_base,
2654 (vdso_pages << PAGE_SHIFT) +
2655 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2656 - 0, 0);
2657 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
2658 if (IS_ERR_VALUE(vdso_base)) {
2659 rc = vdso_base;
2660 goto fail_mmapsem;
2661 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/vio.c linux-2.6.32.41/arch/powerpc/kernel/vio.c
2662 --- linux-2.6.32.41/arch/powerpc/kernel/vio.c 2011-03-27 14:31:47.000000000 -0400
2663 +++ linux-2.6.32.41/arch/powerpc/kernel/vio.c 2011-04-17 15:56:45.000000000 -0400
2664 @@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struc
2665 vio_cmo_dealloc(viodev, alloc_size);
2666 }
2667
2668 -struct dma_map_ops vio_dma_mapping_ops = {
2669 +static const struct dma_map_ops vio_dma_mapping_ops = {
2670 .alloc_coherent = vio_dma_iommu_alloc_coherent,
2671 .free_coherent = vio_dma_iommu_free_coherent,
2672 .map_sg = vio_dma_iommu_map_sg,
2673 .unmap_sg = vio_dma_iommu_unmap_sg,
2674 + .dma_supported = dma_iommu_dma_supported,
2675 .map_page = vio_dma_iommu_map_page,
2676 .unmap_page = vio_dma_iommu_unmap_page,
2677
2678 @@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vi
2679
2680 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
2681 {
2682 - vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
2683 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
2684 }
2685
2686 diff -urNp linux-2.6.32.41/arch/powerpc/lib/usercopy_64.c linux-2.6.32.41/arch/powerpc/lib/usercopy_64.c
2687 --- linux-2.6.32.41/arch/powerpc/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
2688 +++ linux-2.6.32.41/arch/powerpc/lib/usercopy_64.c 2011-04-17 15:56:45.000000000 -0400
2689 @@ -9,22 +9,6 @@
2690 #include <linux/module.h>
2691 #include <asm/uaccess.h>
2692
2693 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2694 -{
2695 - if (likely(access_ok(VERIFY_READ, from, n)))
2696 - n = __copy_from_user(to, from, n);
2697 - else
2698 - memset(to, 0, n);
2699 - return n;
2700 -}
2701 -
2702 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2703 -{
2704 - if (likely(access_ok(VERIFY_WRITE, to, n)))
2705 - n = __copy_to_user(to, from, n);
2706 - return n;
2707 -}
2708 -
2709 unsigned long copy_in_user(void __user *to, const void __user *from,
2710 unsigned long n)
2711 {
2712 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2713 return n;
2714 }
2715
2716 -EXPORT_SYMBOL(copy_from_user);
2717 -EXPORT_SYMBOL(copy_to_user);
2718 EXPORT_SYMBOL(copy_in_user);
2719
2720 diff -urNp linux-2.6.32.41/arch/powerpc/mm/fault.c linux-2.6.32.41/arch/powerpc/mm/fault.c
2721 --- linux-2.6.32.41/arch/powerpc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
2722 +++ linux-2.6.32.41/arch/powerpc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
2723 @@ -30,6 +30,10 @@
2724 #include <linux/kprobes.h>
2725 #include <linux/kdebug.h>
2726 #include <linux/perf_event.h>
2727 +#include <linux/slab.h>
2728 +#include <linux/pagemap.h>
2729 +#include <linux/compiler.h>
2730 +#include <linux/unistd.h>
2731
2732 #include <asm/firmware.h>
2733 #include <asm/page.h>
2734 @@ -40,6 +44,7 @@
2735 #include <asm/uaccess.h>
2736 #include <asm/tlbflush.h>
2737 #include <asm/siginfo.h>
2738 +#include <asm/ptrace.h>
2739
2740
2741 #ifdef CONFIG_KPROBES
2742 @@ -64,6 +69,33 @@ static inline int notify_page_fault(stru
2743 }
2744 #endif
2745
2746 +#ifdef CONFIG_PAX_PAGEEXEC
2747 +/*
2748 + * PaX: decide what to do with offenders (regs->nip = fault address)
2749 + *
2750 + * returns 1 when task should be killed
2751 + */
2752 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2753 +{
2754 + return 1;
2755 +}
2756 +
2757 +void pax_report_insns(void *pc, void *sp)
2758 +{
2759 + unsigned long i;
2760 +
2761 + printk(KERN_ERR "PAX: bytes at PC: ");
2762 + for (i = 0; i < 5; i++) {
2763 + unsigned int c;
2764 + if (get_user(c, (unsigned int __user *)pc+i))
2765 + printk(KERN_CONT "???????? ");
2766 + else
2767 + printk(KERN_CONT "%08x ", c);
2768 + }
2769 + printk("\n");
2770 +}
2771 +#endif
2772 +
2773 /*
2774 * Check whether the instruction at regs->nip is a store using
2775 * an update addressing form which will update r1.
2776 @@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_re
2777 * indicate errors in DSISR but can validly be set in SRR1.
2778 */
2779 if (trap == 0x400)
2780 - error_code &= 0x48200000;
2781 + error_code &= 0x58200000;
2782 else
2783 is_write = error_code & DSISR_ISSTORE;
2784 #else
2785 @@ -250,7 +282,7 @@ good_area:
2786 * "undefined". Of those that can be set, this is the only
2787 * one which seems bad.
2788 */
2789 - if (error_code & 0x10000000)
2790 + if (error_code & DSISR_GUARDED)
2791 /* Guarded storage error. */
2792 goto bad_area;
2793 #endif /* CONFIG_8xx */
2794 @@ -265,7 +297,7 @@ good_area:
2795 * processors use the same I/D cache coherency mechanism
2796 * as embedded.
2797 */
2798 - if (error_code & DSISR_PROTFAULT)
2799 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2800 goto bad_area;
2801 #endif /* CONFIG_PPC_STD_MMU */
2802
2803 @@ -335,6 +367,23 @@ bad_area:
2804 bad_area_nosemaphore:
2805 /* User mode accesses cause a SIGSEGV */
2806 if (user_mode(regs)) {
2807 +
2808 +#ifdef CONFIG_PAX_PAGEEXEC
2809 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2810 +#ifdef CONFIG_PPC_STD_MMU
2811 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2812 +#else
2813 + if (is_exec && regs->nip == address) {
2814 +#endif
2815 + switch (pax_handle_fetch_fault(regs)) {
2816 + }
2817 +
2818 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2819 + do_group_exit(SIGKILL);
2820 + }
2821 + }
2822 +#endif
2823 +
2824 _exception(SIGSEGV, regs, code, address);
2825 return 0;
2826 }
2827 diff -urNp linux-2.6.32.41/arch/powerpc/mm/mmap_64.c linux-2.6.32.41/arch/powerpc/mm/mmap_64.c
2828 --- linux-2.6.32.41/arch/powerpc/mm/mmap_64.c 2011-03-27 14:31:47.000000000 -0400
2829 +++ linux-2.6.32.41/arch/powerpc/mm/mmap_64.c 2011-04-17 15:56:45.000000000 -0400
2830 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2831 */
2832 if (mmap_is_legacy()) {
2833 mm->mmap_base = TASK_UNMAPPED_BASE;
2834 +
2835 +#ifdef CONFIG_PAX_RANDMMAP
2836 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2837 + mm->mmap_base += mm->delta_mmap;
2838 +#endif
2839 +
2840 mm->get_unmapped_area = arch_get_unmapped_area;
2841 mm->unmap_area = arch_unmap_area;
2842 } else {
2843 mm->mmap_base = mmap_base();
2844 +
2845 +#ifdef CONFIG_PAX_RANDMMAP
2846 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2847 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2848 +#endif
2849 +
2850 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2851 mm->unmap_area = arch_unmap_area_topdown;
2852 }
2853 diff -urNp linux-2.6.32.41/arch/powerpc/mm/slice.c linux-2.6.32.41/arch/powerpc/mm/slice.c
2854 --- linux-2.6.32.41/arch/powerpc/mm/slice.c 2011-03-27 14:31:47.000000000 -0400
2855 +++ linux-2.6.32.41/arch/powerpc/mm/slice.c 2011-04-17 15:56:45.000000000 -0400
2856 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
2857 if ((mm->task_size - len) < addr)
2858 return 0;
2859 vma = find_vma(mm, addr);
2860 - return (!vma || (addr + len) <= vma->vm_start);
2861 + return check_heap_stack_gap(vma, addr, len);
2862 }
2863
2864 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2865 @@ -256,7 +256,7 @@ full_search:
2866 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2867 continue;
2868 }
2869 - if (!vma || addr + len <= vma->vm_start) {
2870 + if (check_heap_stack_gap(vma, addr, len)) {
2871 /*
2872 * Remember the place where we stopped the search:
2873 */
2874 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
2875 }
2876 }
2877
2878 - addr = mm->mmap_base;
2879 - while (addr > len) {
2880 + if (mm->mmap_base < len)
2881 + addr = -ENOMEM;
2882 + else
2883 + addr = mm->mmap_base - len;
2884 +
2885 + while (!IS_ERR_VALUE(addr)) {
2886 /* Go down by chunk size */
2887 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2888 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
2889
2890 /* Check for hit with different page size */
2891 mask = slice_range_to_mask(addr, len);
2892 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
2893 * return with success:
2894 */
2895 vma = find_vma(mm, addr);
2896 - if (!vma || (addr + len) <= vma->vm_start) {
2897 + if (check_heap_stack_gap(vma, addr, len)) {
2898 /* remember the address as a hint for next time */
2899 if (use_cache)
2900 mm->free_area_cache = addr;
2901 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
2902 mm->cached_hole_size = vma->vm_start - addr;
2903
2904 /* try just below the current vma->vm_start */
2905 - addr = vma->vm_start;
2906 + addr = skip_heap_stack_gap(vma, len);
2907 }
2908
2909 /*
2910 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
2911 if (fixed && addr > (mm->task_size - len))
2912 return -EINVAL;
2913
2914 +#ifdef CONFIG_PAX_RANDMMAP
2915 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2916 + addr = 0;
2917 +#endif
2918 +
2919 /* If hint, make sure it matches our alignment restrictions */
2920 if (!fixed && addr) {
2921 addr = _ALIGN_UP(addr, 1ul << pshift);
2922 diff -urNp linux-2.6.32.41/arch/powerpc/platforms/52xx/lite5200_pm.c linux-2.6.32.41/arch/powerpc/platforms/52xx/lite5200_pm.c
2923 --- linux-2.6.32.41/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-03-27 14:31:47.000000000 -0400
2924 +++ linux-2.6.32.41/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-04-17 15:56:45.000000000 -0400
2925 @@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
2926 lite5200_pm_target_state = PM_SUSPEND_ON;
2927 }
2928
2929 -static struct platform_suspend_ops lite5200_pm_ops = {
2930 +static const struct platform_suspend_ops lite5200_pm_ops = {
2931 .valid = lite5200_pm_valid,
2932 .begin = lite5200_pm_begin,
2933 .prepare = lite5200_pm_prepare,
2934 diff -urNp linux-2.6.32.41/arch/powerpc/platforms/52xx/mpc52xx_pm.c linux-2.6.32.41/arch/powerpc/platforms/52xx/mpc52xx_pm.c
2935 --- linux-2.6.32.41/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-03-27 14:31:47.000000000 -0400
2936 +++ linux-2.6.32.41/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-04-17 15:56:45.000000000 -0400
2937 @@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
2938 iounmap(mbar);
2939 }
2940
2941 -static struct platform_suspend_ops mpc52xx_pm_ops = {
2942 +static const struct platform_suspend_ops mpc52xx_pm_ops = {
2943 .valid = mpc52xx_pm_valid,
2944 .prepare = mpc52xx_pm_prepare,
2945 .enter = mpc52xx_pm_enter,
2946 diff -urNp linux-2.6.32.41/arch/powerpc/platforms/83xx/suspend.c linux-2.6.32.41/arch/powerpc/platforms/83xx/suspend.c
2947 --- linux-2.6.32.41/arch/powerpc/platforms/83xx/suspend.c 2011-03-27 14:31:47.000000000 -0400
2948 +++ linux-2.6.32.41/arch/powerpc/platforms/83xx/suspend.c 2011-04-17 15:56:45.000000000 -0400
2949 @@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
2950 return ret;
2951 }
2952
2953 -static struct platform_suspend_ops mpc83xx_suspend_ops = {
2954 +static const struct platform_suspend_ops mpc83xx_suspend_ops = {
2955 .valid = mpc83xx_suspend_valid,
2956 .begin = mpc83xx_suspend_begin,
2957 .enter = mpc83xx_suspend_enter,
2958 diff -urNp linux-2.6.32.41/arch/powerpc/platforms/cell/iommu.c linux-2.6.32.41/arch/powerpc/platforms/cell/iommu.c
2959 --- linux-2.6.32.41/arch/powerpc/platforms/cell/iommu.c 2011-03-27 14:31:47.000000000 -0400
2960 +++ linux-2.6.32.41/arch/powerpc/platforms/cell/iommu.c 2011-04-17 15:56:45.000000000 -0400
2961 @@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struc
2962
2963 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
2964
2965 -struct dma_map_ops dma_iommu_fixed_ops = {
2966 +const struct dma_map_ops dma_iommu_fixed_ops = {
2967 .alloc_coherent = dma_fixed_alloc_coherent,
2968 .free_coherent = dma_fixed_free_coherent,
2969 .map_sg = dma_fixed_map_sg,
2970 diff -urNp linux-2.6.32.41/arch/powerpc/platforms/ps3/system-bus.c linux-2.6.32.41/arch/powerpc/platforms/ps3/system-bus.c
2971 --- linux-2.6.32.41/arch/powerpc/platforms/ps3/system-bus.c 2011-03-27 14:31:47.000000000 -0400
2972 +++ linux-2.6.32.41/arch/powerpc/platforms/ps3/system-bus.c 2011-04-17 15:56:45.000000000 -0400
2973 @@ -694,7 +694,7 @@ static int ps3_dma_supported(struct devi
2974 return mask >= DMA_BIT_MASK(32);
2975 }
2976
2977 -static struct dma_map_ops ps3_sb_dma_ops = {
2978 +static const struct dma_map_ops ps3_sb_dma_ops = {
2979 .alloc_coherent = ps3_alloc_coherent,
2980 .free_coherent = ps3_free_coherent,
2981 .map_sg = ps3_sb_map_sg,
2982 @@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops
2983 .unmap_page = ps3_unmap_page,
2984 };
2985
2986 -static struct dma_map_ops ps3_ioc0_dma_ops = {
2987 +static const struct dma_map_ops ps3_ioc0_dma_ops = {
2988 .alloc_coherent = ps3_alloc_coherent,
2989 .free_coherent = ps3_free_coherent,
2990 .map_sg = ps3_ioc0_map_sg,
2991 diff -urNp linux-2.6.32.41/arch/powerpc/platforms/pseries/Kconfig linux-2.6.32.41/arch/powerpc/platforms/pseries/Kconfig
2992 --- linux-2.6.32.41/arch/powerpc/platforms/pseries/Kconfig 2011-03-27 14:31:47.000000000 -0400
2993 +++ linux-2.6.32.41/arch/powerpc/platforms/pseries/Kconfig 2011-04-17 15:56:45.000000000 -0400
2994 @@ -2,6 +2,8 @@ config PPC_PSERIES
2995 depends on PPC64 && PPC_BOOK3S
2996 bool "IBM pSeries & new (POWER5-based) iSeries"
2997 select MPIC
2998 + select PCI_MSI
2999 + select XICS
3000 select PPC_I8259
3001 select PPC_RTAS
3002 select RTAS_ERROR_LOGGING
3003 diff -urNp linux-2.6.32.41/arch/s390/include/asm/elf.h linux-2.6.32.41/arch/s390/include/asm/elf.h
3004 --- linux-2.6.32.41/arch/s390/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3005 +++ linux-2.6.32.41/arch/s390/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
3006 @@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
3007 that it will "exec", and that there is sufficient room for the brk. */
3008 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
3009
3010 +#ifdef CONFIG_PAX_ASLR
3011 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
3012 +
3013 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3014 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3015 +#endif
3016 +
3017 /* This yields a mask that user programs can use to figure out what
3018 instruction set this CPU supports. */
3019
3020 diff -urNp linux-2.6.32.41/arch/s390/include/asm/setup.h linux-2.6.32.41/arch/s390/include/asm/setup.h
3021 --- linux-2.6.32.41/arch/s390/include/asm/setup.h 2011-03-27 14:31:47.000000000 -0400
3022 +++ linux-2.6.32.41/arch/s390/include/asm/setup.h 2011-04-17 15:56:45.000000000 -0400
3023 @@ -50,13 +50,13 @@ extern unsigned long memory_end;
3024 void detect_memory_layout(struct mem_chunk chunk[]);
3025
3026 #ifdef CONFIG_S390_SWITCH_AMODE
3027 -extern unsigned int switch_amode;
3028 +#define switch_amode (1)
3029 #else
3030 #define switch_amode (0)
3031 #endif
3032
3033 #ifdef CONFIG_S390_EXEC_PROTECT
3034 -extern unsigned int s390_noexec;
3035 +#define s390_noexec (1)
3036 #else
3037 #define s390_noexec (0)
3038 #endif
3039 diff -urNp linux-2.6.32.41/arch/s390/include/asm/uaccess.h linux-2.6.32.41/arch/s390/include/asm/uaccess.h
3040 --- linux-2.6.32.41/arch/s390/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
3041 +++ linux-2.6.32.41/arch/s390/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
3042 @@ -232,6 +232,10 @@ static inline unsigned long __must_check
3043 copy_to_user(void __user *to, const void *from, unsigned long n)
3044 {
3045 might_fault();
3046 +
3047 + if ((long)n < 0)
3048 + return n;
3049 +
3050 if (access_ok(VERIFY_WRITE, to, n))
3051 n = __copy_to_user(to, from, n);
3052 return n;
3053 @@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void
3054 static inline unsigned long __must_check
3055 __copy_from_user(void *to, const void __user *from, unsigned long n)
3056 {
3057 + if ((long)n < 0)
3058 + return n;
3059 +
3060 if (__builtin_constant_p(n) && (n <= 256))
3061 return uaccess.copy_from_user_small(n, from, to);
3062 else
3063 @@ -283,6 +290,10 @@ static inline unsigned long __must_check
3064 copy_from_user(void *to, const void __user *from, unsigned long n)
3065 {
3066 might_fault();
3067 +
3068 + if ((long)n < 0)
3069 + return n;
3070 +
3071 if (access_ok(VERIFY_READ, from, n))
3072 n = __copy_from_user(to, from, n);
3073 else
3074 diff -urNp linux-2.6.32.41/arch/s390/Kconfig linux-2.6.32.41/arch/s390/Kconfig
3075 --- linux-2.6.32.41/arch/s390/Kconfig 2011-03-27 14:31:47.000000000 -0400
3076 +++ linux-2.6.32.41/arch/s390/Kconfig 2011-04-17 15:56:45.000000000 -0400
3077 @@ -194,28 +194,26 @@ config AUDIT_ARCH
3078
3079 config S390_SWITCH_AMODE
3080 bool "Switch kernel/user addressing modes"
3081 + default y
3082 help
3083 This option allows to switch the addressing modes of kernel and user
3084 - space. The kernel parameter switch_amode=on will enable this feature,
3085 - default is disabled. Enabling this (via kernel parameter) on machines
3086 - earlier than IBM System z9-109 EC/BC will reduce system performance.
3087 + space. Enabling this on machines earlier than IBM System z9-109 EC/BC
3088 + will reduce system performance.
3089
3090 Note that this option will also be selected by selecting the execute
3091 - protection option below. Enabling the execute protection via the
3092 - noexec kernel parameter will also switch the addressing modes,
3093 - independent of the switch_amode kernel parameter.
3094 + protection option below. Enabling the execute protection will also
3095 + switch the addressing modes, independent of this option.
3096
3097
3098 config S390_EXEC_PROTECT
3099 bool "Data execute protection"
3100 + default y
3101 select S390_SWITCH_AMODE
3102 help
3103 This option allows to enable a buffer overflow protection for user
3104 space programs and it also selects the addressing mode option above.
3105 - The kernel parameter noexec=on will enable this feature and also
3106 - switch the addressing modes, default is disabled. Enabling this (via
3107 - kernel parameter) on machines earlier than IBM System z9-109 EC/BC
3108 - will reduce system performance.
3109 + Enabling this on machines earlier than IBM System z9-109 EC/BC will
3110 + reduce system performance.
3111
3112 comment "Code generation options"
3113
3114 diff -urNp linux-2.6.32.41/arch/s390/kernel/module.c linux-2.6.32.41/arch/s390/kernel/module.c
3115 --- linux-2.6.32.41/arch/s390/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
3116 +++ linux-2.6.32.41/arch/s390/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
3117 @@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
3118
3119 /* Increase core size by size of got & plt and set start
3120 offsets for got and plt. */
3121 - me->core_size = ALIGN(me->core_size, 4);
3122 - me->arch.got_offset = me->core_size;
3123 - me->core_size += me->arch.got_size;
3124 - me->arch.plt_offset = me->core_size;
3125 - me->core_size += me->arch.plt_size;
3126 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
3127 + me->arch.got_offset = me->core_size_rw;
3128 + me->core_size_rw += me->arch.got_size;
3129 + me->arch.plt_offset = me->core_size_rx;
3130 + me->core_size_rx += me->arch.plt_size;
3131 return 0;
3132 }
3133
3134 @@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3135 if (info->got_initialized == 0) {
3136 Elf_Addr *gotent;
3137
3138 - gotent = me->module_core + me->arch.got_offset +
3139 + gotent = me->module_core_rw + me->arch.got_offset +
3140 info->got_offset;
3141 *gotent = val;
3142 info->got_initialized = 1;
3143 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3144 else if (r_type == R_390_GOTENT ||
3145 r_type == R_390_GOTPLTENT)
3146 *(unsigned int *) loc =
3147 - (val + (Elf_Addr) me->module_core - loc) >> 1;
3148 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3149 else if (r_type == R_390_GOT64 ||
3150 r_type == R_390_GOTPLT64)
3151 *(unsigned long *) loc = val;
3152 @@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3153 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3154 if (info->plt_initialized == 0) {
3155 unsigned int *ip;
3156 - ip = me->module_core + me->arch.plt_offset +
3157 + ip = me->module_core_rx + me->arch.plt_offset +
3158 info->plt_offset;
3159 #ifndef CONFIG_64BIT
3160 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3161 @@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3162 val - loc + 0xffffUL < 0x1ffffeUL) ||
3163 (r_type == R_390_PLT32DBL &&
3164 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3165 - val = (Elf_Addr) me->module_core +
3166 + val = (Elf_Addr) me->module_core_rx +
3167 me->arch.plt_offset +
3168 info->plt_offset;
3169 val += rela->r_addend - loc;
3170 @@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3171 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3172 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3173 val = val + rela->r_addend -
3174 - ((Elf_Addr) me->module_core + me->arch.got_offset);
3175 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3176 if (r_type == R_390_GOTOFF16)
3177 *(unsigned short *) loc = val;
3178 else if (r_type == R_390_GOTOFF32)
3179 @@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3180 break;
3181 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3182 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3183 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
3184 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3185 rela->r_addend - loc;
3186 if (r_type == R_390_GOTPC)
3187 *(unsigned int *) loc = val;
3188 diff -urNp linux-2.6.32.41/arch/s390/kernel/setup.c linux-2.6.32.41/arch/s390/kernel/setup.c
3189 --- linux-2.6.32.41/arch/s390/kernel/setup.c 2011-03-27 14:31:47.000000000 -0400
3190 +++ linux-2.6.32.41/arch/s390/kernel/setup.c 2011-04-17 15:56:45.000000000 -0400
3191 @@ -306,9 +306,6 @@ static int __init early_parse_mem(char *
3192 early_param("mem", early_parse_mem);
3193
3194 #ifdef CONFIG_S390_SWITCH_AMODE
3195 -unsigned int switch_amode = 0;
3196 -EXPORT_SYMBOL_GPL(switch_amode);
3197 -
3198 static int set_amode_and_uaccess(unsigned long user_amode,
3199 unsigned long user32_amode)
3200 {
3201 @@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigne
3202 return 0;
3203 }
3204 }
3205 -
3206 -/*
3207 - * Switch kernel/user addressing modes?
3208 - */
3209 -static int __init early_parse_switch_amode(char *p)
3210 -{
3211 - switch_amode = 1;
3212 - return 0;
3213 -}
3214 -early_param("switch_amode", early_parse_switch_amode);
3215 -
3216 #else /* CONFIG_S390_SWITCH_AMODE */
3217 static inline int set_amode_and_uaccess(unsigned long user_amode,
3218 unsigned long user32_amode)
3219 @@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(
3220 }
3221 #endif /* CONFIG_S390_SWITCH_AMODE */
3222
3223 -#ifdef CONFIG_S390_EXEC_PROTECT
3224 -unsigned int s390_noexec = 0;
3225 -EXPORT_SYMBOL_GPL(s390_noexec);
3226 -
3227 -/*
3228 - * Enable execute protection?
3229 - */
3230 -static int __init early_parse_noexec(char *p)
3231 -{
3232 - if (!strncmp(p, "off", 3))
3233 - return 0;
3234 - switch_amode = 1;
3235 - s390_noexec = 1;
3236 - return 0;
3237 -}
3238 -early_param("noexec", early_parse_noexec);
3239 -#endif /* CONFIG_S390_EXEC_PROTECT */
3240 -
3241 static void setup_addressing_mode(void)
3242 {
3243 if (s390_noexec) {
3244 diff -urNp linux-2.6.32.41/arch/s390/mm/mmap.c linux-2.6.32.41/arch/s390/mm/mmap.c
3245 --- linux-2.6.32.41/arch/s390/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3246 +++ linux-2.6.32.41/arch/s390/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
3247 @@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_str
3248 */
3249 if (mmap_is_legacy()) {
3250 mm->mmap_base = TASK_UNMAPPED_BASE;
3251 +
3252 +#ifdef CONFIG_PAX_RANDMMAP
3253 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3254 + mm->mmap_base += mm->delta_mmap;
3255 +#endif
3256 +
3257 mm->get_unmapped_area = arch_get_unmapped_area;
3258 mm->unmap_area = arch_unmap_area;
3259 } else {
3260 mm->mmap_base = mmap_base();
3261 +
3262 +#ifdef CONFIG_PAX_RANDMMAP
3263 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3264 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3265 +#endif
3266 +
3267 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3268 mm->unmap_area = arch_unmap_area_topdown;
3269 }
3270 @@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_str
3271 */
3272 if (mmap_is_legacy()) {
3273 mm->mmap_base = TASK_UNMAPPED_BASE;
3274 +
3275 +#ifdef CONFIG_PAX_RANDMMAP
3276 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3277 + mm->mmap_base += mm->delta_mmap;
3278 +#endif
3279 +
3280 mm->get_unmapped_area = s390_get_unmapped_area;
3281 mm->unmap_area = arch_unmap_area;
3282 } else {
3283 mm->mmap_base = mmap_base();
3284 +
3285 +#ifdef CONFIG_PAX_RANDMMAP
3286 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3287 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3288 +#endif
3289 +
3290 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3291 mm->unmap_area = arch_unmap_area_topdown;
3292 }
3293 diff -urNp linux-2.6.32.41/arch/score/include/asm/system.h linux-2.6.32.41/arch/score/include/asm/system.h
3294 --- linux-2.6.32.41/arch/score/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
3295 +++ linux-2.6.32.41/arch/score/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
3296 @@ -17,7 +17,7 @@ do { \
3297 #define finish_arch_switch(prev) do {} while (0)
3298
3299 typedef void (*vi_handler_t)(void);
3300 -extern unsigned long arch_align_stack(unsigned long sp);
3301 +#define arch_align_stack(x) (x)
3302
3303 #define mb() barrier()
3304 #define rmb() barrier()
3305 diff -urNp linux-2.6.32.41/arch/score/kernel/process.c linux-2.6.32.41/arch/score/kernel/process.c
3306 --- linux-2.6.32.41/arch/score/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
3307 +++ linux-2.6.32.41/arch/score/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
3308 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
3309
3310 return task_pt_regs(task)->cp0_epc;
3311 }
3312 -
3313 -unsigned long arch_align_stack(unsigned long sp)
3314 -{
3315 - return sp;
3316 -}
3317 diff -urNp linux-2.6.32.41/arch/sh/boards/mach-hp6xx/pm.c linux-2.6.32.41/arch/sh/boards/mach-hp6xx/pm.c
3318 --- linux-2.6.32.41/arch/sh/boards/mach-hp6xx/pm.c 2011-03-27 14:31:47.000000000 -0400
3319 +++ linux-2.6.32.41/arch/sh/boards/mach-hp6xx/pm.c 2011-04-17 15:56:45.000000000 -0400
3320 @@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_
3321 return 0;
3322 }
3323
3324 -static struct platform_suspend_ops hp6x0_pm_ops = {
3325 +static const struct platform_suspend_ops hp6x0_pm_ops = {
3326 .enter = hp6x0_pm_enter,
3327 .valid = suspend_valid_only_mem,
3328 };
3329 diff -urNp linux-2.6.32.41/arch/sh/kernel/cpu/sh4/sq.c linux-2.6.32.41/arch/sh/kernel/cpu/sh4/sq.c
3330 --- linux-2.6.32.41/arch/sh/kernel/cpu/sh4/sq.c 2011-03-27 14:31:47.000000000 -0400
3331 +++ linux-2.6.32.41/arch/sh/kernel/cpu/sh4/sq.c 2011-04-17 15:56:46.000000000 -0400
3332 @@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[
3333 NULL,
3334 };
3335
3336 -static struct sysfs_ops sq_sysfs_ops = {
3337 +static const struct sysfs_ops sq_sysfs_ops = {
3338 .show = sq_sysfs_show,
3339 .store = sq_sysfs_store,
3340 };
3341 diff -urNp linux-2.6.32.41/arch/sh/kernel/cpu/shmobile/pm.c linux-2.6.32.41/arch/sh/kernel/cpu/shmobile/pm.c
3342 --- linux-2.6.32.41/arch/sh/kernel/cpu/shmobile/pm.c 2011-03-27 14:31:47.000000000 -0400
3343 +++ linux-2.6.32.41/arch/sh/kernel/cpu/shmobile/pm.c 2011-04-17 15:56:46.000000000 -0400
3344 @@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t s
3345 return 0;
3346 }
3347
3348 -static struct platform_suspend_ops sh_pm_ops = {
3349 +static const struct platform_suspend_ops sh_pm_ops = {
3350 .enter = sh_pm_enter,
3351 .valid = suspend_valid_only_mem,
3352 };
3353 diff -urNp linux-2.6.32.41/arch/sh/kernel/kgdb.c linux-2.6.32.41/arch/sh/kernel/kgdb.c
3354 --- linux-2.6.32.41/arch/sh/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
3355 +++ linux-2.6.32.41/arch/sh/kernel/kgdb.c 2011-04-17 15:56:46.000000000 -0400
3356 @@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
3357 {
3358 }
3359
3360 -struct kgdb_arch arch_kgdb_ops = {
3361 +const struct kgdb_arch arch_kgdb_ops = {
3362 /* Breakpoint instruction: trapa #0x3c */
3363 #ifdef CONFIG_CPU_LITTLE_ENDIAN
3364 .gdb_bpt_instr = { 0x3c, 0xc3 },
3365 diff -urNp linux-2.6.32.41/arch/sh/mm/mmap.c linux-2.6.32.41/arch/sh/mm/mmap.c
3366 --- linux-2.6.32.41/arch/sh/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3367 +++ linux-2.6.32.41/arch/sh/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
3368 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
3369 addr = PAGE_ALIGN(addr);
3370
3371 vma = find_vma(mm, addr);
3372 - if (TASK_SIZE - len >= addr &&
3373 - (!vma || addr + len <= vma->vm_start))
3374 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3375 return addr;
3376 }
3377
3378 @@ -106,7 +105,7 @@ full_search:
3379 }
3380 return -ENOMEM;
3381 }
3382 - if (likely(!vma || addr + len <= vma->vm_start)) {
3383 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3384 /*
3385 * Remember the place where we stopped the search:
3386 */
3387 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
3388 addr = PAGE_ALIGN(addr);
3389
3390 vma = find_vma(mm, addr);
3391 - if (TASK_SIZE - len >= addr &&
3392 - (!vma || addr + len <= vma->vm_start))
3393 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3394 return addr;
3395 }
3396
3397 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
3398 /* make sure it can fit in the remaining address space */
3399 if (likely(addr > len)) {
3400 vma = find_vma(mm, addr-len);
3401 - if (!vma || addr <= vma->vm_start) {
3402 + if (check_heap_stack_gap(vma, addr - len, len)) {
3403 /* remember the address as a hint for next time */
3404 return (mm->free_area_cache = addr-len);
3405 }
3406 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
3407 if (unlikely(mm->mmap_base < len))
3408 goto bottomup;
3409
3410 - addr = mm->mmap_base-len;
3411 - if (do_colour_align)
3412 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3413 + addr = mm->mmap_base - len;
3414
3415 do {
3416 + if (do_colour_align)
3417 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3418 /*
3419 * Lookup failure means no vma is above this address,
3420 * else if new region fits below vma->vm_start,
3421 * return with success:
3422 */
3423 vma = find_vma(mm, addr);
3424 - if (likely(!vma || addr+len <= vma->vm_start)) {
3425 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3426 /* remember the address as a hint for next time */
3427 return (mm->free_area_cache = addr);
3428 }
3429 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
3430 mm->cached_hole_size = vma->vm_start - addr;
3431
3432 /* try just below the current vma->vm_start */
3433 - addr = vma->vm_start-len;
3434 - if (do_colour_align)
3435 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3436 - } while (likely(len < vma->vm_start));
3437 + addr = skip_heap_stack_gap(vma, len);
3438 + } while (!IS_ERR_VALUE(addr));
3439
3440 bottomup:
3441 /*
3442 diff -urNp linux-2.6.32.41/arch/sparc/include/asm/atomic_64.h linux-2.6.32.41/arch/sparc/include/asm/atomic_64.h
3443 --- linux-2.6.32.41/arch/sparc/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
3444 +++ linux-2.6.32.41/arch/sparc/include/asm/atomic_64.h 2011-05-04 17:56:20.000000000 -0400
3445 @@ -14,18 +14,40 @@
3446 #define ATOMIC64_INIT(i) { (i) }
3447
3448 #define atomic_read(v) ((v)->counter)
3449 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3450 +{
3451 + return v->counter;
3452 +}
3453 #define atomic64_read(v) ((v)->counter)
3454 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3455 +{
3456 + return v->counter;
3457 +}
3458
3459 #define atomic_set(v, i) (((v)->counter) = i)
3460 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3461 +{
3462 + v->counter = i;
3463 +}
3464 #define atomic64_set(v, i) (((v)->counter) = i)
3465 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3466 +{
3467 + v->counter = i;
3468 +}
3469
3470 extern void atomic_add(int, atomic_t *);
3471 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3472 extern void atomic64_add(long, atomic64_t *);
3473 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3474 extern void atomic_sub(int, atomic_t *);
3475 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3476 extern void atomic64_sub(long, atomic64_t *);
3477 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3478
3479 extern int atomic_add_ret(int, atomic_t *);
3480 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3481 extern long atomic64_add_ret(long, atomic64_t *);
3482 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3483 extern int atomic_sub_ret(int, atomic_t *);
3484 extern long atomic64_sub_ret(long, atomic64_t *);
3485
3486 @@ -33,7 +55,15 @@ extern long atomic64_sub_ret(long, atomi
3487 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3488
3489 #define atomic_inc_return(v) atomic_add_ret(1, v)
3490 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3491 +{
3492 + return atomic_add_ret_unchecked(1, v);
3493 +}
3494 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3495 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3496 +{
3497 + return atomic64_add_ret_unchecked(1, v);
3498 +}
3499
3500 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3501 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3502 @@ -50,6 +80,7 @@ extern long atomic64_sub_ret(long, atomi
3503 * other cases.
3504 */
3505 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3506 +#define atomic_inc_and_test_unchecked(v) (atomic_inc_return_unchecked(v) == 0)
3507 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3508
3509 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3510 @@ -59,30 +90,59 @@ extern long atomic64_sub_ret(long, atomi
3511 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3512
3513 #define atomic_inc(v) atomic_add(1, v)
3514 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3515 +{
3516 + atomic_add_unchecked(1, v);
3517 +}
3518 #define atomic64_inc(v) atomic64_add(1, v)
3519 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3520 +{
3521 + atomic64_add_unchecked(1, v);
3522 +}
3523
3524 #define atomic_dec(v) atomic_sub(1, v)
3525 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3526 +{
3527 + atomic_sub_unchecked(1, v);
3528 +}
3529 #define atomic64_dec(v) atomic64_sub(1, v)
3530 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3531 +{
3532 + atomic64_sub_unchecked(1, v);
3533 +}
3534
3535 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3536 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3537
3538 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3539 +#define atomic_cmpxchg_unchecked(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3540 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3541 +#define atomic_xchg_unchecked(v, new) (xchg(&((v)->counter), new))
3542
3543 static inline int atomic_add_unless(atomic_t *v, int a, int u)
3544 {
3545 - int c, old;
3546 + int c, old, new;
3547 c = atomic_read(v);
3548 for (;;) {
3549 - if (unlikely(c == (u)))
3550 + if (unlikely(c == u))
3551 break;
3552 - old = atomic_cmpxchg((v), c, c + (a));
3553 +
3554 + asm volatile("addcc %2, %0, %0\n"
3555 +
3556 +#ifdef CONFIG_PAX_REFCOUNT
3557 + "tvs %%icc, 6\n"
3558 +#endif
3559 +
3560 + : "=r" (new)
3561 + : "0" (c), "ir" (a)
3562 + : "cc");
3563 +
3564 + old = atomic_cmpxchg(v, c, new);
3565 if (likely(old == c))
3566 break;
3567 c = old;
3568 }
3569 - return c != (u);
3570 + return c != u;
3571 }
3572
3573 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
3574 @@ -93,17 +153,28 @@ static inline int atomic_add_unless(atom
3575
3576 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3577 {
3578 - long c, old;
3579 + long c, old, new;
3580 c = atomic64_read(v);
3581 for (;;) {
3582 - if (unlikely(c == (u)))
3583 + if (unlikely(c == u))
3584 break;
3585 - old = atomic64_cmpxchg((v), c, c + (a));
3586 +
3587 + asm volatile("addcc %2, %0, %0\n"
3588 +
3589 +#ifdef CONFIG_PAX_REFCOUNT
3590 + "tvs %%xcc, 6\n"
3591 +#endif
3592 +
3593 + : "=r" (new)
3594 + : "0" (c), "ir" (a)
3595 + : "cc");
3596 +
3597 + old = atomic64_cmpxchg(v, c, new);
3598 if (likely(old == c))
3599 break;
3600 c = old;
3601 }
3602 - return c != (u);
3603 + return c != u;
3604 }
3605
3606 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3607 diff -urNp linux-2.6.32.41/arch/sparc/include/asm/cache.h linux-2.6.32.41/arch/sparc/include/asm/cache.h
3608 --- linux-2.6.32.41/arch/sparc/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
3609 +++ linux-2.6.32.41/arch/sparc/include/asm/cache.h 2011-05-17 19:26:34.000000000 -0400
3610 @@ -8,7 +8,7 @@
3611 #define _SPARC_CACHE_H
3612
3613 #define L1_CACHE_SHIFT 5
3614 -#define L1_CACHE_BYTES 32
3615 +#define L1_CACHE_BYTES 32U
3616 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
3617
3618 #ifdef CONFIG_SPARC32
3619 diff -urNp linux-2.6.32.41/arch/sparc/include/asm/dma-mapping.h linux-2.6.32.41/arch/sparc/include/asm/dma-mapping.h
3620 --- linux-2.6.32.41/arch/sparc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
3621 +++ linux-2.6.32.41/arch/sparc/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
3622 @@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *d
3623 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
3624 #define dma_is_consistent(d, h) (1)
3625
3626 -extern struct dma_map_ops *dma_ops, pci32_dma_ops;
3627 +extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
3628 extern struct bus_type pci_bus_type;
3629
3630 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3631 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
3632 {
3633 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
3634 if (dev->bus == &pci_bus_type)
3635 @@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dm
3636 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3637 dma_addr_t *dma_handle, gfp_t flag)
3638 {
3639 - struct dma_map_ops *ops = get_dma_ops(dev);
3640 + const struct dma_map_ops *ops = get_dma_ops(dev);
3641 void *cpu_addr;
3642
3643 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
3644 @@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(s
3645 static inline void dma_free_coherent(struct device *dev, size_t size,
3646 void *cpu_addr, dma_addr_t dma_handle)
3647 {
3648 - struct dma_map_ops *ops = get_dma_ops(dev);
3649 + const struct dma_map_ops *ops = get_dma_ops(dev);
3650
3651 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
3652 ops->free_coherent(dev, size, cpu_addr, dma_handle);
3653 diff -urNp linux-2.6.32.41/arch/sparc/include/asm/elf_32.h linux-2.6.32.41/arch/sparc/include/asm/elf_32.h
3654 --- linux-2.6.32.41/arch/sparc/include/asm/elf_32.h 2011-03-27 14:31:47.000000000 -0400
3655 +++ linux-2.6.32.41/arch/sparc/include/asm/elf_32.h 2011-04-17 15:56:46.000000000 -0400
3656 @@ -116,6 +116,13 @@ typedef struct {
3657
3658 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3659
3660 +#ifdef CONFIG_PAX_ASLR
3661 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3662 +
3663 +#define PAX_DELTA_MMAP_LEN 16
3664 +#define PAX_DELTA_STACK_LEN 16
3665 +#endif
3666 +
3667 /* This yields a mask that user programs can use to figure out what
3668 instruction set this cpu supports. This can NOT be done in userspace
3669 on Sparc. */
3670 diff -urNp linux-2.6.32.41/arch/sparc/include/asm/elf_64.h linux-2.6.32.41/arch/sparc/include/asm/elf_64.h
3671 --- linux-2.6.32.41/arch/sparc/include/asm/elf_64.h 2011-03-27 14:31:47.000000000 -0400
3672 +++ linux-2.6.32.41/arch/sparc/include/asm/elf_64.h 2011-04-17 15:56:46.000000000 -0400
3673 @@ -163,6 +163,12 @@ typedef struct {
3674 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3675 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3676
3677 +#ifdef CONFIG_PAX_ASLR
3678 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3679 +
3680 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3681 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3682 +#endif
3683
3684 /* This yields a mask that user programs can use to figure out what
3685 instruction set this cpu supports. */
3686 diff -urNp linux-2.6.32.41/arch/sparc/include/asm/pgtable_32.h linux-2.6.32.41/arch/sparc/include/asm/pgtable_32.h
3687 --- linux-2.6.32.41/arch/sparc/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
3688 +++ linux-2.6.32.41/arch/sparc/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
3689 @@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3690 BTFIXUPDEF_INT(page_none)
3691 BTFIXUPDEF_INT(page_copy)
3692 BTFIXUPDEF_INT(page_readonly)
3693 +
3694 +#ifdef CONFIG_PAX_PAGEEXEC
3695 +BTFIXUPDEF_INT(page_shared_noexec)
3696 +BTFIXUPDEF_INT(page_copy_noexec)
3697 +BTFIXUPDEF_INT(page_readonly_noexec)
3698 +#endif
3699 +
3700 BTFIXUPDEF_INT(page_kernel)
3701
3702 #define PMD_SHIFT SUN4C_PMD_SHIFT
3703 @@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
3704 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3705 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3706
3707 +#ifdef CONFIG_PAX_PAGEEXEC
3708 +extern pgprot_t PAGE_SHARED_NOEXEC;
3709 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3710 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3711 +#else
3712 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3713 +# define PAGE_COPY_NOEXEC PAGE_COPY
3714 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3715 +#endif
3716 +
3717 extern unsigned long page_kernel;
3718
3719 #ifdef MODULE
3720 diff -urNp linux-2.6.32.41/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.32.41/arch/sparc/include/asm/pgtsrmmu.h
3721 --- linux-2.6.32.41/arch/sparc/include/asm/pgtsrmmu.h 2011-03-27 14:31:47.000000000 -0400
3722 +++ linux-2.6.32.41/arch/sparc/include/asm/pgtsrmmu.h 2011-04-17 15:56:46.000000000 -0400
3723 @@ -115,6 +115,13 @@
3724 SRMMU_EXEC | SRMMU_REF)
3725 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3726 SRMMU_EXEC | SRMMU_REF)
3727 +
3728 +#ifdef CONFIG_PAX_PAGEEXEC
3729 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3730 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3731 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3732 +#endif
3733 +
3734 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3735 SRMMU_DIRTY | SRMMU_REF)
3736
3737 diff -urNp linux-2.6.32.41/arch/sparc/include/asm/spinlock_64.h linux-2.6.32.41/arch/sparc/include/asm/spinlock_64.h
3738 --- linux-2.6.32.41/arch/sparc/include/asm/spinlock_64.h 2011-03-27 14:31:47.000000000 -0400
3739 +++ linux-2.6.32.41/arch/sparc/include/asm/spinlock_64.h 2011-05-04 17:56:20.000000000 -0400
3740 @@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags
3741
3742 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3743
3744 -static void inline arch_read_lock(raw_rwlock_t *lock)
3745 +static inline void arch_read_lock(raw_rwlock_t *lock)
3746 {
3747 unsigned long tmp1, tmp2;
3748
3749 __asm__ __volatile__ (
3750 "1: ldsw [%2], %0\n"
3751 " brlz,pn %0, 2f\n"
3752 -"4: add %0, 1, %1\n"
3753 +"4: addcc %0, 1, %1\n"
3754 +
3755 +#ifdef CONFIG_PAX_REFCOUNT
3756 +" tvs %%icc, 6\n"
3757 +#endif
3758 +
3759 " cas [%2], %0, %1\n"
3760 " cmp %0, %1\n"
3761 " bne,pn %%icc, 1b\n"
3762 @@ -112,7 +117,7 @@ static void inline arch_read_lock(raw_rw
3763 " .previous"
3764 : "=&r" (tmp1), "=&r" (tmp2)
3765 : "r" (lock)
3766 - : "memory");
3767 + : "memory", "cc");
3768 }
3769
3770 static int inline arch_read_trylock(raw_rwlock_t *lock)
3771 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_
3772 "1: ldsw [%2], %0\n"
3773 " brlz,a,pn %0, 2f\n"
3774 " mov 0, %0\n"
3775 -" add %0, 1, %1\n"
3776 +" addcc %0, 1, %1\n"
3777 +
3778 +#ifdef CONFIG_PAX_REFCOUNT
3779 +" tvs %%icc, 6\n"
3780 +#endif
3781 +
3782 " cas [%2], %0, %1\n"
3783 " cmp %0, %1\n"
3784 " bne,pn %%icc, 1b\n"
3785 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_
3786 return tmp1;
3787 }
3788
3789 -static void inline arch_read_unlock(raw_rwlock_t *lock)
3790 +static inline void arch_read_unlock(raw_rwlock_t *lock)
3791 {
3792 unsigned long tmp1, tmp2;
3793
3794 __asm__ __volatile__(
3795 "1: lduw [%2], %0\n"
3796 -" sub %0, 1, %1\n"
3797 +" subcc %0, 1, %1\n"
3798 +
3799 +#ifdef CONFIG_PAX_REFCOUNT
3800 +" tvs %%icc, 6\n"
3801 +#endif
3802 +
3803 " cas [%2], %0, %1\n"
3804 " cmp %0, %1\n"
3805 " bne,pn %%xcc, 1b\n"
3806 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_
3807 : "memory");
3808 }
3809
3810 -static void inline arch_write_lock(raw_rwlock_t *lock)
3811 +static inline void arch_write_lock(raw_rwlock_t *lock)
3812 {
3813 unsigned long mask, tmp1, tmp2;
3814
3815 @@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_r
3816 : "memory");
3817 }
3818
3819 -static void inline arch_write_unlock(raw_rwlock_t *lock)
3820 +static inline void arch_write_unlock(raw_rwlock_t *lock)
3821 {
3822 __asm__ __volatile__(
3823 " stw %%g0, [%0]"
3824 diff -urNp linux-2.6.32.41/arch/sparc/include/asm/thread_info_32.h linux-2.6.32.41/arch/sparc/include/asm/thread_info_32.h
3825 --- linux-2.6.32.41/arch/sparc/include/asm/thread_info_32.h 2011-03-27 14:31:47.000000000 -0400
3826 +++ linux-2.6.32.41/arch/sparc/include/asm/thread_info_32.h 2011-06-04 20:46:01.000000000 -0400
3827 @@ -50,6 +50,8 @@ struct thread_info {
3828 unsigned long w_saved;
3829
3830 struct restart_block restart_block;
3831 +
3832 + unsigned long lowest_stack;
3833 };
3834
3835 /*
3836 diff -urNp linux-2.6.32.41/arch/sparc/include/asm/thread_info_64.h linux-2.6.32.41/arch/sparc/include/asm/thread_info_64.h
3837 --- linux-2.6.32.41/arch/sparc/include/asm/thread_info_64.h 2011-03-27 14:31:47.000000000 -0400
3838 +++ linux-2.6.32.41/arch/sparc/include/asm/thread_info_64.h 2011-06-04 20:46:21.000000000 -0400
3839 @@ -68,6 +68,8 @@ struct thread_info {
3840 struct pt_regs *kern_una_regs;
3841 unsigned int kern_una_insn;
3842
3843 + unsigned long lowest_stack;
3844 +
3845 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3846 };
3847
3848 diff -urNp linux-2.6.32.41/arch/sparc/include/asm/uaccess_32.h linux-2.6.32.41/arch/sparc/include/asm/uaccess_32.h
3849 --- linux-2.6.32.41/arch/sparc/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
3850 +++ linux-2.6.32.41/arch/sparc/include/asm/uaccess_32.h 2011-04-17 15:56:46.000000000 -0400
3851 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
3852
3853 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3854 {
3855 - if (n && __access_ok((unsigned long) to, n))
3856 + if ((long)n < 0)
3857 + return n;
3858 +
3859 + if (n && __access_ok((unsigned long) to, n)) {
3860 + if (!__builtin_constant_p(n))
3861 + check_object_size(from, n, true);
3862 return __copy_user(to, (__force void __user *) from, n);
3863 - else
3864 + } else
3865 return n;
3866 }
3867
3868 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3869 {
3870 + if ((long)n < 0)
3871 + return n;
3872 +
3873 + if (!__builtin_constant_p(n))
3874 + check_object_size(from, n, true);
3875 +
3876 return __copy_user(to, (__force void __user *) from, n);
3877 }
3878
3879 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3880 {
3881 - if (n && __access_ok((unsigned long) from, n))
3882 + if ((long)n < 0)
3883 + return n;
3884 +
3885 + if (n && __access_ok((unsigned long) from, n)) {
3886 + if (!__builtin_constant_p(n))
3887 + check_object_size(to, n, false);
3888 return __copy_user((__force void __user *) to, from, n);
3889 - else
3890 + } else
3891 return n;
3892 }
3893
3894 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3895 {
3896 + if ((long)n < 0)
3897 + return n;
3898 +
3899 return __copy_user((__force void __user *) to, from, n);
3900 }
3901
3902 diff -urNp linux-2.6.32.41/arch/sparc/include/asm/uaccess_64.h linux-2.6.32.41/arch/sparc/include/asm/uaccess_64.h
3903 --- linux-2.6.32.41/arch/sparc/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
3904 +++ linux-2.6.32.41/arch/sparc/include/asm/uaccess_64.h 2011-04-17 15:56:46.000000000 -0400
3905 @@ -9,6 +9,7 @@
3906 #include <linux/compiler.h>
3907 #include <linux/string.h>
3908 #include <linux/thread_info.h>
3909 +#include <linux/kernel.h>
3910 #include <asm/asi.h>
3911 #include <asm/system.h>
3912 #include <asm/spitfire.h>
3913 @@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixu
3914 static inline unsigned long __must_check
3915 copy_from_user(void *to, const void __user *from, unsigned long size)
3916 {
3917 - unsigned long ret = ___copy_from_user(to, from, size);
3918 + unsigned long ret;
3919
3920 + if ((long)size < 0 || size > INT_MAX)
3921 + return size;
3922 +
3923 + if (!__builtin_constant_p(size))
3924 + check_object_size(to, size, false);
3925 +
3926 + ret = ___copy_from_user(to, from, size);
3927 if (unlikely(ret))
3928 ret = copy_from_user_fixup(to, from, size);
3929 return ret;
3930 @@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(
3931 static inline unsigned long __must_check
3932 copy_to_user(void __user *to, const void *from, unsigned long size)
3933 {
3934 - unsigned long ret = ___copy_to_user(to, from, size);
3935 + unsigned long ret;
3936 +
3937 + if ((long)size < 0 || size > INT_MAX)
3938 + return size;
3939 +
3940 + if (!__builtin_constant_p(size))
3941 + check_object_size(from, size, true);
3942
3943 + ret = ___copy_to_user(to, from, size);
3944 if (unlikely(ret))
3945 ret = copy_to_user_fixup(to, from, size);
3946 return ret;
3947 diff -urNp linux-2.6.32.41/arch/sparc/include/asm/uaccess.h linux-2.6.32.41/arch/sparc/include/asm/uaccess.h
3948 --- linux-2.6.32.41/arch/sparc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
3949 +++ linux-2.6.32.41/arch/sparc/include/asm/uaccess.h 2011-04-17 15:56:46.000000000 -0400
3950 @@ -1,5 +1,13 @@
3951 #ifndef ___ASM_SPARC_UACCESS_H
3952 #define ___ASM_SPARC_UACCESS_H
3953 +
3954 +#ifdef __KERNEL__
3955 +#ifndef __ASSEMBLY__
3956 +#include <linux/types.h>
3957 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3958 +#endif
3959 +#endif
3960 +
3961 #if defined(__sparc__) && defined(__arch64__)
3962 #include <asm/uaccess_64.h>
3963 #else
3964 diff -urNp linux-2.6.32.41/arch/sparc/kernel/iommu.c linux-2.6.32.41/arch/sparc/kernel/iommu.c
3965 --- linux-2.6.32.41/arch/sparc/kernel/iommu.c 2011-03-27 14:31:47.000000000 -0400
3966 +++ linux-2.6.32.41/arch/sparc/kernel/iommu.c 2011-04-17 15:56:46.000000000 -0400
3967 @@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struc
3968 spin_unlock_irqrestore(&iommu->lock, flags);
3969 }
3970
3971 -static struct dma_map_ops sun4u_dma_ops = {
3972 +static const struct dma_map_ops sun4u_dma_ops = {
3973 .alloc_coherent = dma_4u_alloc_coherent,
3974 .free_coherent = dma_4u_free_coherent,
3975 .map_page = dma_4u_map_page,
3976 @@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops
3977 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
3978 };
3979
3980 -struct dma_map_ops *dma_ops = &sun4u_dma_ops;
3981 +const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
3982 EXPORT_SYMBOL(dma_ops);
3983
3984 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
3985 diff -urNp linux-2.6.32.41/arch/sparc/kernel/ioport.c linux-2.6.32.41/arch/sparc/kernel/ioport.c
3986 --- linux-2.6.32.41/arch/sparc/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
3987 +++ linux-2.6.32.41/arch/sparc/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
3988 @@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(stru
3989 BUG();
3990 }
3991
3992 -struct dma_map_ops sbus_dma_ops = {
3993 +const struct dma_map_ops sbus_dma_ops = {
3994 .alloc_coherent = sbus_alloc_coherent,
3995 .free_coherent = sbus_free_coherent,
3996 .map_page = sbus_map_page,
3997 @@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
3998 .sync_sg_for_device = sbus_sync_sg_for_device,
3999 };
4000
4001 -struct dma_map_ops *dma_ops = &sbus_dma_ops;
4002 +const struct dma_map_ops *dma_ops = &sbus_dma_ops;
4003 EXPORT_SYMBOL(dma_ops);
4004
4005 static int __init sparc_register_ioport(void)
4006 @@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(str
4007 }
4008 }
4009
4010 -struct dma_map_ops pci32_dma_ops = {
4011 +const struct dma_map_ops pci32_dma_ops = {
4012 .alloc_coherent = pci32_alloc_coherent,
4013 .free_coherent = pci32_free_coherent,
4014 .map_page = pci32_map_page,
4015 diff -urNp linux-2.6.32.41/arch/sparc/kernel/kgdb_32.c linux-2.6.32.41/arch/sparc/kernel/kgdb_32.c
4016 --- linux-2.6.32.41/arch/sparc/kernel/kgdb_32.c 2011-03-27 14:31:47.000000000 -0400
4017 +++ linux-2.6.32.41/arch/sparc/kernel/kgdb_32.c 2011-04-17 15:56:46.000000000 -0400
4018 @@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
4019 {
4020 }
4021
4022 -struct kgdb_arch arch_kgdb_ops = {
4023 +const struct kgdb_arch arch_kgdb_ops = {
4024 /* Breakpoint instruction: ta 0x7d */
4025 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
4026 };
4027 diff -urNp linux-2.6.32.41/arch/sparc/kernel/kgdb_64.c linux-2.6.32.41/arch/sparc/kernel/kgdb_64.c
4028 --- linux-2.6.32.41/arch/sparc/kernel/kgdb_64.c 2011-03-27 14:31:47.000000000 -0400
4029 +++ linux-2.6.32.41/arch/sparc/kernel/kgdb_64.c 2011-04-17 15:56:46.000000000 -0400
4030 @@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
4031 {
4032 }
4033
4034 -struct kgdb_arch arch_kgdb_ops = {
4035 +const struct kgdb_arch arch_kgdb_ops = {
4036 /* Breakpoint instruction: ta 0x72 */
4037 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
4038 };
4039 diff -urNp linux-2.6.32.41/arch/sparc/kernel/Makefile linux-2.6.32.41/arch/sparc/kernel/Makefile
4040 --- linux-2.6.32.41/arch/sparc/kernel/Makefile 2011-03-27 14:31:47.000000000 -0400
4041 +++ linux-2.6.32.41/arch/sparc/kernel/Makefile 2011-04-17 15:56:46.000000000 -0400
4042 @@ -3,7 +3,7 @@
4043 #
4044
4045 asflags-y := -ansi
4046 -ccflags-y := -Werror
4047 +#ccflags-y := -Werror
4048
4049 extra-y := head_$(BITS).o
4050 extra-y += init_task.o
4051 diff -urNp linux-2.6.32.41/arch/sparc/kernel/pci_sun4v.c linux-2.6.32.41/arch/sparc/kernel/pci_sun4v.c
4052 --- linux-2.6.32.41/arch/sparc/kernel/pci_sun4v.c 2011-03-27 14:31:47.000000000 -0400
4053 +++ linux-2.6.32.41/arch/sparc/kernel/pci_sun4v.c 2011-04-17 15:56:46.000000000 -0400
4054 @@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct devic
4055 spin_unlock_irqrestore(&iommu->lock, flags);
4056 }
4057
4058 -static struct dma_map_ops sun4v_dma_ops = {
4059 +static const struct dma_map_ops sun4v_dma_ops = {
4060 .alloc_coherent = dma_4v_alloc_coherent,
4061 .free_coherent = dma_4v_free_coherent,
4062 .map_page = dma_4v_map_page,
4063 diff -urNp linux-2.6.32.41/arch/sparc/kernel/process_32.c linux-2.6.32.41/arch/sparc/kernel/process_32.c
4064 --- linux-2.6.32.41/arch/sparc/kernel/process_32.c 2011-03-27 14:31:47.000000000 -0400
4065 +++ linux-2.6.32.41/arch/sparc/kernel/process_32.c 2011-04-17 15:56:46.000000000 -0400
4066 @@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
4067 rw->ins[4], rw->ins[5],
4068 rw->ins[6],
4069 rw->ins[7]);
4070 - printk("%pS\n", (void *) rw->ins[7]);
4071 + printk("%pA\n", (void *) rw->ins[7]);
4072 rw = (struct reg_window32 *) rw->ins[6];
4073 }
4074 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
4075 @@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
4076
4077 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
4078 r->psr, r->pc, r->npc, r->y, print_tainted());
4079 - printk("PC: <%pS>\n", (void *) r->pc);
4080 + printk("PC: <%pA>\n", (void *) r->pc);
4081 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4082 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
4083 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
4084 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4085 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
4086 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
4087 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
4088 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
4089
4090 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4091 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
4092 @@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk,
4093 rw = (struct reg_window32 *) fp;
4094 pc = rw->ins[7];
4095 printk("[%08lx : ", pc);
4096 - printk("%pS ] ", (void *) pc);
4097 + printk("%pA ] ", (void *) pc);
4098 fp = rw->ins[6];
4099 } while (++count < 16);
4100 printk("\n");
4101 diff -urNp linux-2.6.32.41/arch/sparc/kernel/process_64.c linux-2.6.32.41/arch/sparc/kernel/process_64.c
4102 --- linux-2.6.32.41/arch/sparc/kernel/process_64.c 2011-03-27 14:31:47.000000000 -0400
4103 +++ linux-2.6.32.41/arch/sparc/kernel/process_64.c 2011-04-17 15:56:46.000000000 -0400
4104 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
4105 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
4106 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
4107 if (regs->tstate & TSTATE_PRIV)
4108 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
4109 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
4110 }
4111
4112 void show_regs(struct pt_regs *regs)
4113 {
4114 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
4115 regs->tpc, regs->tnpc, regs->y, print_tainted());
4116 - printk("TPC: <%pS>\n", (void *) regs->tpc);
4117 + printk("TPC: <%pA>\n", (void *) regs->tpc);
4118 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
4119 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
4120 regs->u_regs[3]);
4121 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
4122 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
4123 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
4124 regs->u_regs[15]);
4125 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
4126 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
4127 show_regwindow(regs);
4128 }
4129
4130 @@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void
4131 ((tp && tp->task) ? tp->task->pid : -1));
4132
4133 if (gp->tstate & TSTATE_PRIV) {
4134 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
4135 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
4136 (void *) gp->tpc,
4137 (void *) gp->o7,
4138 (void *) gp->i7,
4139 diff -urNp linux-2.6.32.41/arch/sparc/kernel/sys_sparc_32.c linux-2.6.32.41/arch/sparc/kernel/sys_sparc_32.c
4140 --- linux-2.6.32.41/arch/sparc/kernel/sys_sparc_32.c 2011-03-27 14:31:47.000000000 -0400
4141 +++ linux-2.6.32.41/arch/sparc/kernel/sys_sparc_32.c 2011-04-17 15:56:46.000000000 -0400
4142 @@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(str
4143 if (ARCH_SUN4C && len > 0x20000000)
4144 return -ENOMEM;
4145 if (!addr)
4146 - addr = TASK_UNMAPPED_BASE;
4147 + addr = current->mm->mmap_base;
4148
4149 if (flags & MAP_SHARED)
4150 addr = COLOUR_ALIGN(addr);
4151 @@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
4152 }
4153 if (TASK_SIZE - PAGE_SIZE - len < addr)
4154 return -ENOMEM;
4155 - if (!vmm || addr + len <= vmm->vm_start)
4156 + if (check_heap_stack_gap(vmm, addr, len))
4157 return addr;
4158 addr = vmm->vm_end;
4159 if (flags & MAP_SHARED)
4160 diff -urNp linux-2.6.32.41/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.41/arch/sparc/kernel/sys_sparc_64.c
4161 --- linux-2.6.32.41/arch/sparc/kernel/sys_sparc_64.c 2011-03-27 14:31:47.000000000 -0400
4162 +++ linux-2.6.32.41/arch/sparc/kernel/sys_sparc_64.c 2011-04-17 15:56:46.000000000 -0400
4163 @@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(str
4164 /* We do not accept a shared mapping if it would violate
4165 * cache aliasing constraints.
4166 */
4167 - if ((flags & MAP_SHARED) &&
4168 + if ((filp || (flags & MAP_SHARED)) &&
4169 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4170 return -EINVAL;
4171 return addr;
4172 @@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(str
4173 if (filp || (flags & MAP_SHARED))
4174 do_color_align = 1;
4175
4176 +#ifdef CONFIG_PAX_RANDMMAP
4177 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4178 +#endif
4179 +
4180 if (addr) {
4181 if (do_color_align)
4182 addr = COLOUR_ALIGN(addr, pgoff);
4183 @@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(str
4184 addr = PAGE_ALIGN(addr);
4185
4186 vma = find_vma(mm, addr);
4187 - if (task_size - len >= addr &&
4188 - (!vma || addr + len <= vma->vm_start))
4189 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4190 return addr;
4191 }
4192
4193 if (len > mm->cached_hole_size) {
4194 - start_addr = addr = mm->free_area_cache;
4195 + start_addr = addr = mm->free_area_cache;
4196 } else {
4197 - start_addr = addr = TASK_UNMAPPED_BASE;
4198 + start_addr = addr = mm->mmap_base;
4199 mm->cached_hole_size = 0;
4200 }
4201
4202 @@ -175,14 +178,14 @@ full_search:
4203 vma = find_vma(mm, VA_EXCLUDE_END);
4204 }
4205 if (unlikely(task_size < addr)) {
4206 - if (start_addr != TASK_UNMAPPED_BASE) {
4207 - start_addr = addr = TASK_UNMAPPED_BASE;
4208 + if (start_addr != mm->mmap_base) {
4209 + start_addr = addr = mm->mmap_base;
4210 mm->cached_hole_size = 0;
4211 goto full_search;
4212 }
4213 return -ENOMEM;
4214 }
4215 - if (likely(!vma || addr + len <= vma->vm_start)) {
4216 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4217 /*
4218 * Remember the place where we stopped the search:
4219 */
4220 @@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct fi
4221 /* We do not accept a shared mapping if it would violate
4222 * cache aliasing constraints.
4223 */
4224 - if ((flags & MAP_SHARED) &&
4225 + if ((filp || (flags & MAP_SHARED)) &&
4226 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4227 return -EINVAL;
4228 return addr;
4229 @@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct fi
4230 addr = PAGE_ALIGN(addr);
4231
4232 vma = find_vma(mm, addr);
4233 - if (task_size - len >= addr &&
4234 - (!vma || addr + len <= vma->vm_start))
4235 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4236 return addr;
4237 }
4238
4239 @@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct fi
4240 /* make sure it can fit in the remaining address space */
4241 if (likely(addr > len)) {
4242 vma = find_vma(mm, addr-len);
4243 - if (!vma || addr <= vma->vm_start) {
4244 + if (check_heap_stack_gap(vma, addr - len, len)) {
4245 /* remember the address as a hint for next time */
4246 return (mm->free_area_cache = addr-len);
4247 }
4248 @@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct fi
4249 if (unlikely(mm->mmap_base < len))
4250 goto bottomup;
4251
4252 - addr = mm->mmap_base-len;
4253 - if (do_color_align)
4254 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4255 + addr = mm->mmap_base - len;
4256
4257 do {
4258 + if (do_color_align)
4259 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4260 /*
4261 * Lookup failure means no vma is above this address,
4262 * else if new region fits below vma->vm_start,
4263 * return with success:
4264 */
4265 vma = find_vma(mm, addr);
4266 - if (likely(!vma || addr+len <= vma->vm_start)) {
4267 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4268 /* remember the address as a hint for next time */
4269 return (mm->free_area_cache = addr);
4270 }
4271 @@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct fi
4272 mm->cached_hole_size = vma->vm_start - addr;
4273
4274 /* try just below the current vma->vm_start */
4275 - addr = vma->vm_start-len;
4276 - if (do_color_align)
4277 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4278 - } while (likely(len < vma->vm_start));
4279 + addr = skip_heap_stack_gap(vma, len);
4280 + } while (!IS_ERR_VALUE(addr));
4281
4282 bottomup:
4283 /*
4284 @@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_str
4285 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
4286 sysctl_legacy_va_layout) {
4287 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4288 +
4289 +#ifdef CONFIG_PAX_RANDMMAP
4290 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4291 + mm->mmap_base += mm->delta_mmap;
4292 +#endif
4293 +
4294 mm->get_unmapped_area = arch_get_unmapped_area;
4295 mm->unmap_area = arch_unmap_area;
4296 } else {
4297 @@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_str
4298 gap = (task_size / 6 * 5);
4299
4300 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4301 +
4302 +#ifdef CONFIG_PAX_RANDMMAP
4303 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4304 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4305 +#endif
4306 +
4307 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4308 mm->unmap_area = arch_unmap_area_topdown;
4309 }
4310 diff -urNp linux-2.6.32.41/arch/sparc/kernel/traps_32.c linux-2.6.32.41/arch/sparc/kernel/traps_32.c
4311 --- linux-2.6.32.41/arch/sparc/kernel/traps_32.c 2011-03-27 14:31:47.000000000 -0400
4312 +++ linux-2.6.32.41/arch/sparc/kernel/traps_32.c 2011-04-17 15:56:46.000000000 -0400
4313 @@ -76,7 +76,7 @@ void die_if_kernel(char *str, struct pt_
4314 count++ < 30 &&
4315 (((unsigned long) rw) >= PAGE_OFFSET) &&
4316 !(((unsigned long) rw) & 0x7)) {
4317 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
4318 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
4319 (void *) rw->ins[7]);
4320 rw = (struct reg_window32 *)rw->ins[6];
4321 }
4322 diff -urNp linux-2.6.32.41/arch/sparc/kernel/traps_64.c linux-2.6.32.41/arch/sparc/kernel/traps_64.c
4323 --- linux-2.6.32.41/arch/sparc/kernel/traps_64.c 2011-03-27 14:31:47.000000000 -0400
4324 +++ linux-2.6.32.41/arch/sparc/kernel/traps_64.c 2011-04-17 15:56:46.000000000 -0400
4325 @@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_
4326 i + 1,
4327 p->trapstack[i].tstate, p->trapstack[i].tpc,
4328 p->trapstack[i].tnpc, p->trapstack[i].tt);
4329 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4330 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4331 }
4332 }
4333
4334 @@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long
4335
4336 lvl -= 0x100;
4337 if (regs->tstate & TSTATE_PRIV) {
4338 +
4339 +#ifdef CONFIG_PAX_REFCOUNT
4340 + if (lvl == 6)
4341 + pax_report_refcount_overflow(regs);
4342 +#endif
4343 +
4344 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4345 die_if_kernel(buffer, regs);
4346 }
4347 @@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long
4348 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4349 {
4350 char buffer[32];
4351 -
4352 +
4353 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4354 0, lvl, SIGTRAP) == NOTIFY_STOP)
4355 return;
4356
4357 +#ifdef CONFIG_PAX_REFCOUNT
4358 + if (lvl == 6)
4359 + pax_report_refcount_overflow(regs);
4360 +#endif
4361 +
4362 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4363
4364 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4365 @@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt
4366 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4367 printk("%s" "ERROR(%d): ",
4368 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4369 - printk("TPC<%pS>\n", (void *) regs->tpc);
4370 + printk("TPC<%pA>\n", (void *) regs->tpc);
4371 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4372 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4373 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4374 @@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type,
4375 smp_processor_id(),
4376 (type & 0x1) ? 'I' : 'D',
4377 regs->tpc);
4378 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4379 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4380 panic("Irrecoverable Cheetah+ parity error.");
4381 }
4382
4383 @@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type,
4384 smp_processor_id(),
4385 (type & 0x1) ? 'I' : 'D',
4386 regs->tpc);
4387 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4388 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4389 }
4390
4391 struct sun4v_error_entry {
4392 @@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_r
4393
4394 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4395 regs->tpc, tl);
4396 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4397 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4398 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4399 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4400 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4401 (void *) regs->u_regs[UREG_I7]);
4402 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4403 "pte[%lx] error[%lx]\n",
4404 @@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_r
4405
4406 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4407 regs->tpc, tl);
4408 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4409 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4410 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4411 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4412 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4413 (void *) regs->u_regs[UREG_I7]);
4414 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4415 "pte[%lx] error[%lx]\n",
4416 @@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk,
4417 fp = (unsigned long)sf->fp + STACK_BIAS;
4418 }
4419
4420 - printk(" [%016lx] %pS\n", pc, (void *) pc);
4421 + printk(" [%016lx] %pA\n", pc, (void *) pc);
4422 } while (++count < 16);
4423 }
4424
4425 @@ -2260,7 +2271,7 @@ void die_if_kernel(char *str, struct pt_
4426 while (rw &&
4427 count++ < 30&&
4428 is_kernel_stack(current, rw)) {
4429 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
4430 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
4431 (void *) rw->ins[7]);
4432
4433 rw = kernel_stack_up(rw);
4434 diff -urNp linux-2.6.32.41/arch/sparc/kernel/unaligned_64.c linux-2.6.32.41/arch/sparc/kernel/unaligned_64.c
4435 --- linux-2.6.32.41/arch/sparc/kernel/unaligned_64.c 2011-03-27 14:31:47.000000000 -0400
4436 +++ linux-2.6.32.41/arch/sparc/kernel/unaligned_64.c 2011-04-17 15:56:46.000000000 -0400
4437 @@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs
4438 if (count < 5) {
4439 last_time = jiffies;
4440 count++;
4441 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
4442 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
4443 regs->tpc, (void *) regs->tpc);
4444 }
4445 }
4446 diff -urNp linux-2.6.32.41/arch/sparc/lib/atomic_64.S linux-2.6.32.41/arch/sparc/lib/atomic_64.S
4447 --- linux-2.6.32.41/arch/sparc/lib/atomic_64.S 2011-03-27 14:31:47.000000000 -0400
4448 +++ linux-2.6.32.41/arch/sparc/lib/atomic_64.S 2011-04-17 15:56:46.000000000 -0400
4449 @@ -18,7 +18,12 @@
4450 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4451 BACKOFF_SETUP(%o2)
4452 1: lduw [%o1], %g1
4453 - add %g1, %o0, %g7
4454 + addcc %g1, %o0, %g7
4455 +
4456 +#ifdef CONFIG_PAX_REFCOUNT
4457 + tvs %icc, 6
4458 +#endif
4459 +
4460 cas [%o1], %g1, %g7
4461 cmp %g1, %g7
4462 bne,pn %icc, 2f
4463 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
4464 2: BACKOFF_SPIN(%o2, %o3, 1b)
4465 .size atomic_add, .-atomic_add
4466
4467 + .globl atomic_add_unchecked
4468 + .type atomic_add_unchecked,#function
4469 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4470 + BACKOFF_SETUP(%o2)
4471 +1: lduw [%o1], %g1
4472 + add %g1, %o0, %g7
4473 + cas [%o1], %g1, %g7
4474 + cmp %g1, %g7
4475 + bne,pn %icc, 2f
4476 + nop
4477 + retl
4478 + nop
4479 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4480 + .size atomic_add_unchecked, .-atomic_add_unchecked
4481 +
4482 .globl atomic_sub
4483 .type atomic_sub,#function
4484 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4485 BACKOFF_SETUP(%o2)
4486 1: lduw [%o1], %g1
4487 - sub %g1, %o0, %g7
4488 + subcc %g1, %o0, %g7
4489 +
4490 +#ifdef CONFIG_PAX_REFCOUNT
4491 + tvs %icc, 6
4492 +#endif
4493 +
4494 cas [%o1], %g1, %g7
4495 cmp %g1, %g7
4496 bne,pn %icc, 2f
4497 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
4498 2: BACKOFF_SPIN(%o2, %o3, 1b)
4499 .size atomic_sub, .-atomic_sub
4500
4501 + .globl atomic_sub_unchecked
4502 + .type atomic_sub_unchecked,#function
4503 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4504 + BACKOFF_SETUP(%o2)
4505 +1: lduw [%o1], %g1
4506 + sub %g1, %o0, %g7
4507 + cas [%o1], %g1, %g7
4508 + cmp %g1, %g7
4509 + bne,pn %icc, 2f
4510 + nop
4511 + retl
4512 + nop
4513 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4514 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
4515 +
4516 .globl atomic_add_ret
4517 .type atomic_add_ret,#function
4518 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4519 BACKOFF_SETUP(%o2)
4520 1: lduw [%o1], %g1
4521 - add %g1, %o0, %g7
4522 + addcc %g1, %o0, %g7
4523 +
4524 +#ifdef CONFIG_PAX_REFCOUNT
4525 + tvs %icc, 6
4526 +#endif
4527 +
4528 cas [%o1], %g1, %g7
4529 cmp %g1, %g7
4530 bne,pn %icc, 2f
4531 @@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1
4532 2: BACKOFF_SPIN(%o2, %o3, 1b)
4533 .size atomic_add_ret, .-atomic_add_ret
4534
4535 + .globl atomic_add_ret_unchecked
4536 + .type atomic_add_ret_unchecked,#function
4537 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4538 + BACKOFF_SETUP(%o2)
4539 +1: lduw [%o1], %g1
4540 + addcc %g1, %o0, %g7
4541 + cas [%o1], %g1, %g7
4542 + cmp %g1, %g7
4543 + bne,pn %icc, 2f
4544 + add %g7, %o0, %g7
4545 + sra %g7, 0, %o0
4546 + retl
4547 + nop
4548 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4549 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4550 +
4551 .globl atomic_sub_ret
4552 .type atomic_sub_ret,#function
4553 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4554 BACKOFF_SETUP(%o2)
4555 1: lduw [%o1], %g1
4556 - sub %g1, %o0, %g7
4557 + subcc %g1, %o0, %g7
4558 +
4559 +#ifdef CONFIG_PAX_REFCOUNT
4560 + tvs %icc, 6
4561 +#endif
4562 +
4563 cas [%o1], %g1, %g7
4564 cmp %g1, %g7
4565 bne,pn %icc, 2f
4566 @@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
4567 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4568 BACKOFF_SETUP(%o2)
4569 1: ldx [%o1], %g1
4570 - add %g1, %o0, %g7
4571 + addcc %g1, %o0, %g7
4572 +
4573 +#ifdef CONFIG_PAX_REFCOUNT
4574 + tvs %xcc, 6
4575 +#endif
4576 +
4577 casx [%o1], %g1, %g7
4578 cmp %g1, %g7
4579 bne,pn %xcc, 2f
4580 @@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 =
4581 2: BACKOFF_SPIN(%o2, %o3, 1b)
4582 .size atomic64_add, .-atomic64_add
4583
4584 + .globl atomic64_add_unchecked
4585 + .type atomic64_add_unchecked,#function
4586 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4587 + BACKOFF_SETUP(%o2)
4588 +1: ldx [%o1], %g1
4589 + addcc %g1, %o0, %g7
4590 + casx [%o1], %g1, %g7
4591 + cmp %g1, %g7
4592 + bne,pn %xcc, 2f
4593 + nop
4594 + retl
4595 + nop
4596 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4597 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
4598 +
4599 .globl atomic64_sub
4600 .type atomic64_sub,#function
4601 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4602 BACKOFF_SETUP(%o2)
4603 1: ldx [%o1], %g1
4604 - sub %g1, %o0, %g7
4605 + subcc %g1, %o0, %g7
4606 +
4607 +#ifdef CONFIG_PAX_REFCOUNT
4608 + tvs %xcc, 6
4609 +#endif
4610 +
4611 casx [%o1], %g1, %g7
4612 cmp %g1, %g7
4613 bne,pn %xcc, 2f
4614 @@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
4615 2: BACKOFF_SPIN(%o2, %o3, 1b)
4616 .size atomic64_sub, .-atomic64_sub
4617
4618 + .globl atomic64_sub_unchecked
4619 + .type atomic64_sub_unchecked,#function
4620 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4621 + BACKOFF_SETUP(%o2)
4622 +1: ldx [%o1], %g1
4623 + subcc %g1, %o0, %g7
4624 + casx [%o1], %g1, %g7
4625 + cmp %g1, %g7
4626 + bne,pn %xcc, 2f
4627 + nop
4628 + retl
4629 + nop
4630 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4631 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4632 +
4633 .globl atomic64_add_ret
4634 .type atomic64_add_ret,#function
4635 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4636 BACKOFF_SETUP(%o2)
4637 1: ldx [%o1], %g1
4638 - add %g1, %o0, %g7
4639 + addcc %g1, %o0, %g7
4640 +
4641 +#ifdef CONFIG_PAX_REFCOUNT
4642 + tvs %xcc, 6
4643 +#endif
4644 +
4645 casx [%o1], %g1, %g7
4646 cmp %g1, %g7
4647 bne,pn %xcc, 2f
4648 @@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4649 2: BACKOFF_SPIN(%o2, %o3, 1b)
4650 .size atomic64_add_ret, .-atomic64_add_ret
4651
4652 + .globl atomic64_add_ret_unchecked
4653 + .type atomic64_add_ret_unchecked,#function
4654 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4655 + BACKOFF_SETUP(%o2)
4656 +1: ldx [%o1], %g1
4657 + addcc %g1, %o0, %g7
4658 + casx [%o1], %g1, %g7
4659 + cmp %g1, %g7
4660 + bne,pn %xcc, 2f
4661 + add %g7, %o0, %g7
4662 + mov %g7, %o0
4663 + retl
4664 + nop
4665 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4666 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4667 +
4668 .globl atomic64_sub_ret
4669 .type atomic64_sub_ret,#function
4670 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4671 BACKOFF_SETUP(%o2)
4672 1: ldx [%o1], %g1
4673 - sub %g1, %o0, %g7
4674 + subcc %g1, %o0, %g7
4675 +
4676 +#ifdef CONFIG_PAX_REFCOUNT
4677 + tvs %xcc, 6
4678 +#endif
4679 +
4680 casx [%o1], %g1, %g7
4681 cmp %g1, %g7
4682 bne,pn %xcc, 2f
4683 diff -urNp linux-2.6.32.41/arch/sparc/lib/ksyms.c linux-2.6.32.41/arch/sparc/lib/ksyms.c
4684 --- linux-2.6.32.41/arch/sparc/lib/ksyms.c 2011-03-27 14:31:47.000000000 -0400
4685 +++ linux-2.6.32.41/arch/sparc/lib/ksyms.c 2011-04-17 15:56:46.000000000 -0400
4686 @@ -144,12 +144,17 @@ EXPORT_SYMBOL(__downgrade_write);
4687
4688 /* Atomic counter implementation. */
4689 EXPORT_SYMBOL(atomic_add);
4690 +EXPORT_SYMBOL(atomic_add_unchecked);
4691 EXPORT_SYMBOL(atomic_add_ret);
4692 EXPORT_SYMBOL(atomic_sub);
4693 +EXPORT_SYMBOL(atomic_sub_unchecked);
4694 EXPORT_SYMBOL(atomic_sub_ret);
4695 EXPORT_SYMBOL(atomic64_add);
4696 +EXPORT_SYMBOL(atomic64_add_unchecked);
4697 EXPORT_SYMBOL(atomic64_add_ret);
4698 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4699 EXPORT_SYMBOL(atomic64_sub);
4700 +EXPORT_SYMBOL(atomic64_sub_unchecked);
4701 EXPORT_SYMBOL(atomic64_sub_ret);
4702
4703 /* Atomic bit operations. */
4704 diff -urNp linux-2.6.32.41/arch/sparc/lib/Makefile linux-2.6.32.41/arch/sparc/lib/Makefile
4705 --- linux-2.6.32.41/arch/sparc/lib/Makefile 2011-03-27 14:31:47.000000000 -0400
4706 +++ linux-2.6.32.41/arch/sparc/lib/Makefile 2011-05-17 19:26:34.000000000 -0400
4707 @@ -2,7 +2,7 @@
4708 #
4709
4710 asflags-y := -ansi -DST_DIV0=0x02
4711 -ccflags-y := -Werror
4712 +#ccflags-y := -Werror
4713
4714 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4715 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4716 diff -urNp linux-2.6.32.41/arch/sparc/lib/rwsem_64.S linux-2.6.32.41/arch/sparc/lib/rwsem_64.S
4717 --- linux-2.6.32.41/arch/sparc/lib/rwsem_64.S 2011-03-27 14:31:47.000000000 -0400
4718 +++ linux-2.6.32.41/arch/sparc/lib/rwsem_64.S 2011-04-17 15:56:46.000000000 -0400
4719 @@ -11,7 +11,12 @@
4720 .globl __down_read
4721 __down_read:
4722 1: lduw [%o0], %g1
4723 - add %g1, 1, %g7
4724 + addcc %g1, 1, %g7
4725 +
4726 +#ifdef CONFIG_PAX_REFCOUNT
4727 + tvs %icc, 6
4728 +#endif
4729 +
4730 cas [%o0], %g1, %g7
4731 cmp %g1, %g7
4732 bne,pn %icc, 1b
4733 @@ -33,7 +38,12 @@ __down_read:
4734 .globl __down_read_trylock
4735 __down_read_trylock:
4736 1: lduw [%o0], %g1
4737 - add %g1, 1, %g7
4738 + addcc %g1, 1, %g7
4739 +
4740 +#ifdef CONFIG_PAX_REFCOUNT
4741 + tvs %icc, 6
4742 +#endif
4743 +
4744 cmp %g7, 0
4745 bl,pn %icc, 2f
4746 mov 0, %o1
4747 @@ -51,7 +61,12 @@ __down_write:
4748 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
4749 1:
4750 lduw [%o0], %g3
4751 - add %g3, %g1, %g7
4752 + addcc %g3, %g1, %g7
4753 +
4754 +#ifdef CONFIG_PAX_REFCOUNT
4755 + tvs %icc, 6
4756 +#endif
4757 +
4758 cas [%o0], %g3, %g7
4759 cmp %g3, %g7
4760 bne,pn %icc, 1b
4761 @@ -77,7 +92,12 @@ __down_write_trylock:
4762 cmp %g3, 0
4763 bne,pn %icc, 2f
4764 mov 0, %o1
4765 - add %g3, %g1, %g7
4766 + addcc %g3, %g1, %g7
4767 +
4768 +#ifdef CONFIG_PAX_REFCOUNT
4769 + tvs %icc, 6
4770 +#endif
4771 +
4772 cas [%o0], %g3, %g7
4773 cmp %g3, %g7
4774 bne,pn %icc, 1b
4775 @@ -90,7 +110,12 @@ __down_write_trylock:
4776 __up_read:
4777 1:
4778 lduw [%o0], %g1
4779 - sub %g1, 1, %g7
4780 + subcc %g1, 1, %g7
4781 +
4782 +#ifdef CONFIG_PAX_REFCOUNT
4783 + tvs %icc, 6
4784 +#endif
4785 +
4786 cas [%o0], %g1, %g7
4787 cmp %g1, %g7
4788 bne,pn %icc, 1b
4789 @@ -118,7 +143,12 @@ __up_write:
4790 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
4791 1:
4792 lduw [%o0], %g3
4793 - sub %g3, %g1, %g7
4794 + subcc %g3, %g1, %g7
4795 +
4796 +#ifdef CONFIG_PAX_REFCOUNT
4797 + tvs %icc, 6
4798 +#endif
4799 +
4800 cas [%o0], %g3, %g7
4801 cmp %g3, %g7
4802 bne,pn %icc, 1b
4803 @@ -143,7 +173,12 @@ __downgrade_write:
4804 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
4805 1:
4806 lduw [%o0], %g3
4807 - sub %g3, %g1, %g7
4808 + subcc %g3, %g1, %g7
4809 +
4810 +#ifdef CONFIG_PAX_REFCOUNT
4811 + tvs %icc, 6
4812 +#endif
4813 +
4814 cas [%o0], %g3, %g7
4815 cmp %g3, %g7
4816 bne,pn %icc, 1b
4817 diff -urNp linux-2.6.32.41/arch/sparc/Makefile linux-2.6.32.41/arch/sparc/Makefile
4818 --- linux-2.6.32.41/arch/sparc/Makefile 2011-03-27 14:31:47.000000000 -0400
4819 +++ linux-2.6.32.41/arch/sparc/Makefile 2011-04-17 15:56:46.000000000 -0400
4820 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
4821 # Export what is needed by arch/sparc/boot/Makefile
4822 export VMLINUX_INIT VMLINUX_MAIN
4823 VMLINUX_INIT := $(head-y) $(init-y)
4824 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4825 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4826 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4827 VMLINUX_MAIN += $(drivers-y) $(net-y)
4828
4829 diff -urNp linux-2.6.32.41/arch/sparc/mm/fault_32.c linux-2.6.32.41/arch/sparc/mm/fault_32.c
4830 --- linux-2.6.32.41/arch/sparc/mm/fault_32.c 2011-03-27 14:31:47.000000000 -0400
4831 +++ linux-2.6.32.41/arch/sparc/mm/fault_32.c 2011-04-17 15:56:46.000000000 -0400
4832 @@ -21,6 +21,9 @@
4833 #include <linux/interrupt.h>
4834 #include <linux/module.h>
4835 #include <linux/kdebug.h>
4836 +#include <linux/slab.h>
4837 +#include <linux/pagemap.h>
4838 +#include <linux/compiler.h>
4839
4840 #include <asm/system.h>
4841 #include <asm/page.h>
4842 @@ -167,6 +170,267 @@ static unsigned long compute_si_addr(str
4843 return safe_compute_effective_address(regs, insn);
4844 }
4845
4846 +#ifdef CONFIG_PAX_PAGEEXEC
4847 +#ifdef CONFIG_PAX_DLRESOLVE
4848 +static void pax_emuplt_close(struct vm_area_struct *vma)
4849 +{
4850 + vma->vm_mm->call_dl_resolve = 0UL;
4851 +}
4852 +
4853 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4854 +{
4855 + unsigned int *kaddr;
4856 +
4857 + vmf->page = alloc_page(GFP_HIGHUSER);
4858 + if (!vmf->page)
4859 + return VM_FAULT_OOM;
4860 +
4861 + kaddr = kmap(vmf->page);
4862 + memset(kaddr, 0, PAGE_SIZE);
4863 + kaddr[0] = 0x9DE3BFA8U; /* save */
4864 + flush_dcache_page(vmf->page);
4865 + kunmap(vmf->page);
4866 + return VM_FAULT_MAJOR;
4867 +}
4868 +
4869 +static const struct vm_operations_struct pax_vm_ops = {
4870 + .close = pax_emuplt_close,
4871 + .fault = pax_emuplt_fault
4872 +};
4873 +
4874 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4875 +{
4876 + int ret;
4877 +
4878 + vma->vm_mm = current->mm;
4879 + vma->vm_start = addr;
4880 + vma->vm_end = addr + PAGE_SIZE;
4881 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4882 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4883 + vma->vm_ops = &pax_vm_ops;
4884 +
4885 + ret = insert_vm_struct(current->mm, vma);
4886 + if (ret)
4887 + return ret;
4888 +
4889 + ++current->mm->total_vm;
4890 + return 0;
4891 +}
4892 +#endif
4893 +
4894 +/*
4895 + * PaX: decide what to do with offenders (regs->pc = fault address)
4896 + *
4897 + * returns 1 when task should be killed
4898 + * 2 when patched PLT trampoline was detected
4899 + * 3 when unpatched PLT trampoline was detected
4900 + */
4901 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4902 +{
4903 +
4904 +#ifdef CONFIG_PAX_EMUPLT
4905 + int err;
4906 +
4907 + do { /* PaX: patched PLT emulation #1 */
4908 + unsigned int sethi1, sethi2, jmpl;
4909 +
4910 + err = get_user(sethi1, (unsigned int *)regs->pc);
4911 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4912 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4913 +
4914 + if (err)
4915 + break;
4916 +
4917 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4918 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4919 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4920 + {
4921 + unsigned int addr;
4922 +
4923 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4924 + addr = regs->u_regs[UREG_G1];
4925 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4926 + regs->pc = addr;
4927 + regs->npc = addr+4;
4928 + return 2;
4929 + }
4930 + } while (0);
4931 +
4932 + { /* PaX: patched PLT emulation #2 */
4933 + unsigned int ba;
4934 +
4935 + err = get_user(ba, (unsigned int *)regs->pc);
4936 +
4937 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4938 + unsigned int addr;
4939 +
4940 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4941 + regs->pc = addr;
4942 + regs->npc = addr+4;
4943 + return 2;
4944 + }
4945 + }
4946 +
4947 + do { /* PaX: patched PLT emulation #3 */
4948 + unsigned int sethi, jmpl, nop;
4949 +
4950 + err = get_user(sethi, (unsigned int *)regs->pc);
4951 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4952 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4953 +
4954 + if (err)
4955 + break;
4956 +
4957 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4958 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4959 + nop == 0x01000000U)
4960 + {
4961 + unsigned int addr;
4962 +
4963 + addr = (sethi & 0x003FFFFFU) << 10;
4964 + regs->u_regs[UREG_G1] = addr;
4965 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4966 + regs->pc = addr;
4967 + regs->npc = addr+4;
4968 + return 2;
4969 + }
4970 + } while (0);
4971 +
4972 + do { /* PaX: unpatched PLT emulation step 1 */
4973 + unsigned int sethi, ba, nop;
4974 +
4975 + err = get_user(sethi, (unsigned int *)regs->pc);
4976 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
4977 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4978 +
4979 + if (err)
4980 + break;
4981 +
4982 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4983 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4984 + nop == 0x01000000U)
4985 + {
4986 + unsigned int addr, save, call;
4987 +
4988 + if ((ba & 0xFFC00000U) == 0x30800000U)
4989 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4990 + else
4991 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4992 +
4993 + err = get_user(save, (unsigned int *)addr);
4994 + err |= get_user(call, (unsigned int *)(addr+4));
4995 + err |= get_user(nop, (unsigned int *)(addr+8));
4996 + if (err)
4997 + break;
4998 +
4999 +#ifdef CONFIG_PAX_DLRESOLVE
5000 + if (save == 0x9DE3BFA8U &&
5001 + (call & 0xC0000000U) == 0x40000000U &&
5002 + nop == 0x01000000U)
5003 + {
5004 + struct vm_area_struct *vma;
5005 + unsigned long call_dl_resolve;
5006 +
5007 + down_read(&current->mm->mmap_sem);
5008 + call_dl_resolve = current->mm->call_dl_resolve;
5009 + up_read(&current->mm->mmap_sem);
5010 + if (likely(call_dl_resolve))
5011 + goto emulate;
5012 +
5013 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5014 +
5015 + down_write(&current->mm->mmap_sem);
5016 + if (current->mm->call_dl_resolve) {
5017 + call_dl_resolve = current->mm->call_dl_resolve;
5018 + up_write(&current->mm->mmap_sem);
5019 + if (vma)
5020 + kmem_cache_free(vm_area_cachep, vma);
5021 + goto emulate;
5022 + }
5023 +
5024 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5025 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5026 + up_write(&current->mm->mmap_sem);
5027 + if (vma)
5028 + kmem_cache_free(vm_area_cachep, vma);
5029 + return 1;
5030 + }
5031 +
5032 + if (pax_insert_vma(vma, call_dl_resolve)) {
5033 + up_write(&current->mm->mmap_sem);
5034 + kmem_cache_free(vm_area_cachep, vma);
5035 + return 1;
5036 + }
5037 +
5038 + current->mm->call_dl_resolve = call_dl_resolve;
5039 + up_write(&current->mm->mmap_sem);
5040 +
5041 +emulate:
5042 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5043 + regs->pc = call_dl_resolve;
5044 + regs->npc = addr+4;
5045 + return 3;
5046 + }
5047 +#endif
5048 +
5049 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5050 + if ((save & 0xFFC00000U) == 0x05000000U &&
5051 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5052 + nop == 0x01000000U)
5053 + {
5054 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5055 + regs->u_regs[UREG_G2] = addr + 4;
5056 + addr = (save & 0x003FFFFFU) << 10;
5057 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5058 + regs->pc = addr;
5059 + regs->npc = addr+4;
5060 + return 3;
5061 + }
5062 + }
5063 + } while (0);
5064 +
5065 + do { /* PaX: unpatched PLT emulation step 2 */
5066 + unsigned int save, call, nop;
5067 +
5068 + err = get_user(save, (unsigned int *)(regs->pc-4));
5069 + err |= get_user(call, (unsigned int *)regs->pc);
5070 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
5071 + if (err)
5072 + break;
5073 +
5074 + if (save == 0x9DE3BFA8U &&
5075 + (call & 0xC0000000U) == 0x40000000U &&
5076 + nop == 0x01000000U)
5077 + {
5078 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
5079 +
5080 + regs->u_regs[UREG_RETPC] = regs->pc;
5081 + regs->pc = dl_resolve;
5082 + regs->npc = dl_resolve+4;
5083 + return 3;
5084 + }
5085 + } while (0);
5086 +#endif
5087 +
5088 + return 1;
5089 +}
5090 +
5091 +void pax_report_insns(void *pc, void *sp)
5092 +{
5093 + unsigned long i;
5094 +
5095 + printk(KERN_ERR "PAX: bytes at PC: ");
5096 + for (i = 0; i < 8; i++) {
5097 + unsigned int c;
5098 + if (get_user(c, (unsigned int *)pc+i))
5099 + printk(KERN_CONT "???????? ");
5100 + else
5101 + printk(KERN_CONT "%08x ", c);
5102 + }
5103 + printk("\n");
5104 +}
5105 +#endif
5106 +
5107 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
5108 unsigned long address)
5109 {
5110 @@ -231,6 +495,24 @@ good_area:
5111 if(!(vma->vm_flags & VM_WRITE))
5112 goto bad_area;
5113 } else {
5114 +
5115 +#ifdef CONFIG_PAX_PAGEEXEC
5116 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
5117 + up_read(&mm->mmap_sem);
5118 + switch (pax_handle_fetch_fault(regs)) {
5119 +
5120 +#ifdef CONFIG_PAX_EMUPLT
5121 + case 2:
5122 + case 3:
5123 + return;
5124 +#endif
5125 +
5126 + }
5127 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
5128 + do_group_exit(SIGKILL);
5129 + }
5130 +#endif
5131 +
5132 /* Allow reads even for write-only mappings */
5133 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
5134 goto bad_area;
5135 diff -urNp linux-2.6.32.41/arch/sparc/mm/fault_64.c linux-2.6.32.41/arch/sparc/mm/fault_64.c
5136 --- linux-2.6.32.41/arch/sparc/mm/fault_64.c 2011-03-27 14:31:47.000000000 -0400
5137 +++ linux-2.6.32.41/arch/sparc/mm/fault_64.c 2011-04-17 15:56:46.000000000 -0400
5138 @@ -20,6 +20,9 @@
5139 #include <linux/kprobes.h>
5140 #include <linux/kdebug.h>
5141 #include <linux/percpu.h>
5142 +#include <linux/slab.h>
5143 +#include <linux/pagemap.h>
5144 +#include <linux/compiler.h>
5145
5146 #include <asm/page.h>
5147 #include <asm/pgtable.h>
5148 @@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs
5149 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
5150 regs->tpc);
5151 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
5152 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
5153 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
5154 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
5155 dump_stack();
5156 unhandled_fault(regs->tpc, current, regs);
5157 @@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_a
5158 show_regs(regs);
5159 }
5160
5161 +#ifdef CONFIG_PAX_PAGEEXEC
5162 +#ifdef CONFIG_PAX_DLRESOLVE
5163 +static void pax_emuplt_close(struct vm_area_struct *vma)
5164 +{
5165 + vma->vm_mm->call_dl_resolve = 0UL;
5166 +}
5167 +
5168 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5169 +{
5170 + unsigned int *kaddr;
5171 +
5172 + vmf->page = alloc_page(GFP_HIGHUSER);
5173 + if (!vmf->page)
5174 + return VM_FAULT_OOM;
5175 +
5176 + kaddr = kmap(vmf->page);
5177 + memset(kaddr, 0, PAGE_SIZE);
5178 + kaddr[0] = 0x9DE3BFA8U; /* save */
5179 + flush_dcache_page(vmf->page);
5180 + kunmap(vmf->page);
5181 + return VM_FAULT_MAJOR;
5182 +}
5183 +
5184 +static const struct vm_operations_struct pax_vm_ops = {
5185 + .close = pax_emuplt_close,
5186 + .fault = pax_emuplt_fault
5187 +};
5188 +
5189 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5190 +{
5191 + int ret;
5192 +
5193 + vma->vm_mm = current->mm;
5194 + vma->vm_start = addr;
5195 + vma->vm_end = addr + PAGE_SIZE;
5196 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5197 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5198 + vma->vm_ops = &pax_vm_ops;
5199 +
5200 + ret = insert_vm_struct(current->mm, vma);
5201 + if (ret)
5202 + return ret;
5203 +
5204 + ++current->mm->total_vm;
5205 + return 0;
5206 +}
5207 +#endif
5208 +
5209 +/*
5210 + * PaX: decide what to do with offenders (regs->tpc = fault address)
5211 + *
5212 + * returns 1 when task should be killed
5213 + * 2 when patched PLT trampoline was detected
5214 + * 3 when unpatched PLT trampoline was detected
5215 + */
5216 +static int pax_handle_fetch_fault(struct pt_regs *regs)
5217 +{
5218 +
5219 +#ifdef CONFIG_PAX_EMUPLT
5220 + int err;
5221 +
5222 + do { /* PaX: patched PLT emulation #1 */
5223 + unsigned int sethi1, sethi2, jmpl;
5224 +
5225 + err = get_user(sethi1, (unsigned int *)regs->tpc);
5226 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
5227 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
5228 +
5229 + if (err)
5230 + break;
5231 +
5232 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5233 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
5234 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
5235 + {
5236 + unsigned long addr;
5237 +
5238 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5239 + addr = regs->u_regs[UREG_G1];
5240 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5241 +
5242 + if (test_thread_flag(TIF_32BIT))
5243 + addr &= 0xFFFFFFFFUL;
5244 +
5245 + regs->tpc = addr;
5246 + regs->tnpc = addr+4;
5247 + return 2;
5248 + }
5249 + } while (0);
5250 +
5251 + { /* PaX: patched PLT emulation #2 */
5252 + unsigned int ba;
5253 +
5254 + err = get_user(ba, (unsigned int *)regs->tpc);
5255 +
5256 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5257 + unsigned long addr;
5258 +
5259 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5260 +
5261 + if (test_thread_flag(TIF_32BIT))
5262 + addr &= 0xFFFFFFFFUL;
5263 +
5264 + regs->tpc = addr;
5265 + regs->tnpc = addr+4;
5266 + return 2;
5267 + }
5268 + }
5269 +
5270 + do { /* PaX: patched PLT emulation #3 */
5271 + unsigned int sethi, jmpl, nop;
5272 +
5273 + err = get_user(sethi, (unsigned int *)regs->tpc);
5274 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5275 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5276 +
5277 + if (err)
5278 + break;
5279 +
5280 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5281 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5282 + nop == 0x01000000U)
5283 + {
5284 + unsigned long addr;
5285 +
5286 + addr = (sethi & 0x003FFFFFU) << 10;
5287 + regs->u_regs[UREG_G1] = addr;
5288 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5289 +
5290 + if (test_thread_flag(TIF_32BIT))
5291 + addr &= 0xFFFFFFFFUL;
5292 +
5293 + regs->tpc = addr;
5294 + regs->tnpc = addr+4;
5295 + return 2;
5296 + }
5297 + } while (0);
5298 +
5299 + do { /* PaX: patched PLT emulation #4 */
5300 + unsigned int sethi, mov1, call, mov2;
5301 +
5302 + err = get_user(sethi, (unsigned int *)regs->tpc);
5303 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5304 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
5305 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5306 +
5307 + if (err)
5308 + break;
5309 +
5310 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5311 + mov1 == 0x8210000FU &&
5312 + (call & 0xC0000000U) == 0x40000000U &&
5313 + mov2 == 0x9E100001U)
5314 + {
5315 + unsigned long addr;
5316 +
5317 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5318 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5319 +
5320 + if (test_thread_flag(TIF_32BIT))
5321 + addr &= 0xFFFFFFFFUL;
5322 +
5323 + regs->tpc = addr;
5324 + regs->tnpc = addr+4;
5325 + return 2;
5326 + }
5327 + } while (0);
5328 +
5329 + do { /* PaX: patched PLT emulation #5 */
5330 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5331 +
5332 + err = get_user(sethi, (unsigned int *)regs->tpc);
5333 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5334 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5335 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5336 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5337 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5338 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5339 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5340 +
5341 + if (err)
5342 + break;
5343 +
5344 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5345 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5346 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5347 + (or1 & 0xFFFFE000U) == 0x82106000U &&
5348 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5349 + sllx == 0x83287020U &&
5350 + jmpl == 0x81C04005U &&
5351 + nop == 0x01000000U)
5352 + {
5353 + unsigned long addr;
5354 +
5355 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5356 + regs->u_regs[UREG_G1] <<= 32;
5357 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5358 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5359 + regs->tpc = addr;
5360 + regs->tnpc = addr+4;
5361 + return 2;
5362 + }
5363 + } while (0);
5364 +
5365 + do { /* PaX: patched PLT emulation #6 */
5366 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5367 +
5368 + err = get_user(sethi, (unsigned int *)regs->tpc);
5369 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5370 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5371 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5372 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
5373 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5374 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5375 +
5376 + if (err)
5377 + break;
5378 +
5379 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5380 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5381 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5382 + sllx == 0x83287020U &&
5383 + (or & 0xFFFFE000U) == 0x8A116000U &&
5384 + jmpl == 0x81C04005U &&
5385 + nop == 0x01000000U)
5386 + {
5387 + unsigned long addr;
5388 +
5389 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5390 + regs->u_regs[UREG_G1] <<= 32;
5391 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5392 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5393 + regs->tpc = addr;
5394 + regs->tnpc = addr+4;
5395 + return 2;
5396 + }
5397 + } while (0);
5398 +
5399 + do { /* PaX: unpatched PLT emulation step 1 */
5400 + unsigned int sethi, ba, nop;
5401 +
5402 + err = get_user(sethi, (unsigned int *)regs->tpc);
5403 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5404 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5405 +
5406 + if (err)
5407 + break;
5408 +
5409 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5410 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5411 + nop == 0x01000000U)
5412 + {
5413 + unsigned long addr;
5414 + unsigned int save, call;
5415 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5416 +
5417 + if ((ba & 0xFFC00000U) == 0x30800000U)
5418 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5419 + else
5420 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5421 +
5422 + if (test_thread_flag(TIF_32BIT))
5423 + addr &= 0xFFFFFFFFUL;
5424 +
5425 + err = get_user(save, (unsigned int *)addr);
5426 + err |= get_user(call, (unsigned int *)(addr+4));
5427 + err |= get_user(nop, (unsigned int *)(addr+8));
5428 + if (err)
5429 + break;
5430 +
5431 +#ifdef CONFIG_PAX_DLRESOLVE
5432 + if (save == 0x9DE3BFA8U &&
5433 + (call & 0xC0000000U) == 0x40000000U &&
5434 + nop == 0x01000000U)
5435 + {
5436 + struct vm_area_struct *vma;
5437 + unsigned long call_dl_resolve;
5438 +
5439 + down_read(&current->mm->mmap_sem);
5440 + call_dl_resolve = current->mm->call_dl_resolve;
5441 + up_read(&current->mm->mmap_sem);
5442 + if (likely(call_dl_resolve))
5443 + goto emulate;
5444 +
5445 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5446 +
5447 + down_write(&current->mm->mmap_sem);
5448 + if (current->mm->call_dl_resolve) {
5449 + call_dl_resolve = current->mm->call_dl_resolve;
5450 + up_write(&current->mm->mmap_sem);
5451 + if (vma)
5452 + kmem_cache_free(vm_area_cachep, vma);
5453 + goto emulate;
5454 + }
5455 +
5456 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5457 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5458 + up_write(&current->mm->mmap_sem);
5459 + if (vma)
5460 + kmem_cache_free(vm_area_cachep, vma);
5461 + return 1;
5462 + }
5463 +
5464 + if (pax_insert_vma(vma, call_dl_resolve)) {
5465 + up_write(&current->mm->mmap_sem);
5466 + kmem_cache_free(vm_area_cachep, vma);
5467 + return 1;
5468 + }
5469 +
5470 + current->mm->call_dl_resolve = call_dl_resolve;
5471 + up_write(&current->mm->mmap_sem);
5472 +
5473 +emulate:
5474 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5475 + regs->tpc = call_dl_resolve;
5476 + regs->tnpc = addr+4;
5477 + return 3;
5478 + }
5479 +#endif
5480 +
5481 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5482 + if ((save & 0xFFC00000U) == 0x05000000U &&
5483 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5484 + nop == 0x01000000U)
5485 + {
5486 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5487 + regs->u_regs[UREG_G2] = addr + 4;
5488 + addr = (save & 0x003FFFFFU) << 10;
5489 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5490 +
5491 + if (test_thread_flag(TIF_32BIT))
5492 + addr &= 0xFFFFFFFFUL;
5493 +
5494 + regs->tpc = addr;
5495 + regs->tnpc = addr+4;
5496 + return 3;
5497 + }
5498 +
5499 + /* PaX: 64-bit PLT stub */
5500 + err = get_user(sethi1, (unsigned int *)addr);
5501 + err |= get_user(sethi2, (unsigned int *)(addr+4));
5502 + err |= get_user(or1, (unsigned int *)(addr+8));
5503 + err |= get_user(or2, (unsigned int *)(addr+12));
5504 + err |= get_user(sllx, (unsigned int *)(addr+16));
5505 + err |= get_user(add, (unsigned int *)(addr+20));
5506 + err |= get_user(jmpl, (unsigned int *)(addr+24));
5507 + err |= get_user(nop, (unsigned int *)(addr+28));
5508 + if (err)
5509 + break;
5510 +
5511 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5512 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5513 + (or1 & 0xFFFFE000U) == 0x88112000U &&
5514 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5515 + sllx == 0x89293020U &&
5516 + add == 0x8A010005U &&
5517 + jmpl == 0x89C14000U &&
5518 + nop == 0x01000000U)
5519 + {
5520 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5521 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5522 + regs->u_regs[UREG_G4] <<= 32;
5523 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5524 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5525 + regs->u_regs[UREG_G4] = addr + 24;
5526 + addr = regs->u_regs[UREG_G5];
5527 + regs->tpc = addr;
5528 + regs->tnpc = addr+4;
5529 + return 3;
5530 + }
5531 + }
5532 + } while (0);
5533 +
5534 +#ifdef CONFIG_PAX_DLRESOLVE
5535 + do { /* PaX: unpatched PLT emulation step 2 */
5536 + unsigned int save, call, nop;
5537 +
5538 + err = get_user(save, (unsigned int *)(regs->tpc-4));
5539 + err |= get_user(call, (unsigned int *)regs->tpc);
5540 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5541 + if (err)
5542 + break;
5543 +
5544 + if (save == 0x9DE3BFA8U &&
5545 + (call & 0xC0000000U) == 0x40000000U &&
5546 + nop == 0x01000000U)
5547 + {
5548 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5549 +
5550 + if (test_thread_flag(TIF_32BIT))
5551 + dl_resolve &= 0xFFFFFFFFUL;
5552 +
5553 + regs->u_regs[UREG_RETPC] = regs->tpc;
5554 + regs->tpc = dl_resolve;
5555 + regs->tnpc = dl_resolve+4;
5556 + return 3;
5557 + }
5558 + } while (0);
5559 +#endif
5560 +
5561 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5562 + unsigned int sethi, ba, nop;
5563 +
5564 + err = get_user(sethi, (unsigned int *)regs->tpc);
5565 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5566 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5567 +
5568 + if (err)
5569 + break;
5570 +
5571 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5572 + (ba & 0xFFF00000U) == 0x30600000U &&
5573 + nop == 0x01000000U)
5574 + {
5575 + unsigned long addr;
5576 +
5577 + addr = (sethi & 0x003FFFFFU) << 10;
5578 + regs->u_regs[UREG_G1] = addr;
5579 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5580 +
5581 + if (test_thread_flag(TIF_32BIT))
5582 + addr &= 0xFFFFFFFFUL;
5583 +
5584 + regs->tpc = addr;
5585 + regs->tnpc = addr+4;
5586 + return 2;
5587 + }
5588 + } while (0);
5589 +
5590 +#endif
5591 +
5592 + return 1;
5593 +}
5594 +
5595 +void pax_report_insns(void *pc, void *sp)
5596 +{
5597 + unsigned long i;
5598 +
5599 + printk(KERN_ERR "PAX: bytes at PC: ");
5600 + for (i = 0; i < 8; i++) {
5601 + unsigned int c;
5602 + if (get_user(c, (unsigned int *)pc+i))
5603 + printk(KERN_CONT "???????? ");
5604 + else
5605 + printk(KERN_CONT "%08x ", c);
5606 + }
5607 + printk("\n");
5608 +}
5609 +#endif
5610 +
5611 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5612 {
5613 struct mm_struct *mm = current->mm;
5614 @@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fau
5615 if (!vma)
5616 goto bad_area;
5617
5618 +#ifdef CONFIG_PAX_PAGEEXEC
5619 + /* PaX: detect ITLB misses on non-exec pages */
5620 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5621 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5622 + {
5623 + if (address != regs->tpc)
5624 + goto good_area;
5625 +
5626 + up_read(&mm->mmap_sem);
5627 + switch (pax_handle_fetch_fault(regs)) {
5628 +
5629 +#ifdef CONFIG_PAX_EMUPLT
5630 + case 2:
5631 + case 3:
5632 + return;
5633 +#endif
5634 +
5635 + }
5636 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
5637 + do_group_exit(SIGKILL);
5638 + }
5639 +#endif
5640 +
5641 /* Pure DTLB misses do not tell us whether the fault causing
5642 * load/store/atomic was a write or not, it only says that there
5643 * was no match. So in such a case we (carefully) read the
5644 diff -urNp linux-2.6.32.41/arch/sparc/mm/hugetlbpage.c linux-2.6.32.41/arch/sparc/mm/hugetlbpage.c
5645 --- linux-2.6.32.41/arch/sparc/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
5646 +++ linux-2.6.32.41/arch/sparc/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
5647 @@ -69,7 +69,7 @@ full_search:
5648 }
5649 return -ENOMEM;
5650 }
5651 - if (likely(!vma || addr + len <= vma->vm_start)) {
5652 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5653 /*
5654 * Remember the place where we stopped the search:
5655 */
5656 @@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct
5657 /* make sure it can fit in the remaining address space */
5658 if (likely(addr > len)) {
5659 vma = find_vma(mm, addr-len);
5660 - if (!vma || addr <= vma->vm_start) {
5661 + if (check_heap_stack_gap(vma, addr - len, len)) {
5662 /* remember the address as a hint for next time */
5663 return (mm->free_area_cache = addr-len);
5664 }
5665 @@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct
5666 if (unlikely(mm->mmap_base < len))
5667 goto bottomup;
5668
5669 - addr = (mm->mmap_base-len) & HPAGE_MASK;
5670 + addr = mm->mmap_base - len;
5671
5672 do {
5673 + addr &= HPAGE_MASK;
5674 /*
5675 * Lookup failure means no vma is above this address,
5676 * else if new region fits below vma->vm_start,
5677 * return with success:
5678 */
5679 vma = find_vma(mm, addr);
5680 - if (likely(!vma || addr+len <= vma->vm_start)) {
5681 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5682 /* remember the address as a hint for next time */
5683 return (mm->free_area_cache = addr);
5684 }
5685 @@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct
5686 mm->cached_hole_size = vma->vm_start - addr;
5687
5688 /* try just below the current vma->vm_start */
5689 - addr = (vma->vm_start-len) & HPAGE_MASK;
5690 - } while (likely(len < vma->vm_start));
5691 + addr = skip_heap_stack_gap(vma, len);
5692 + } while (!IS_ERR_VALUE(addr));
5693
5694 bottomup:
5695 /*
5696 @@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *f
5697 if (addr) {
5698 addr = ALIGN(addr, HPAGE_SIZE);
5699 vma = find_vma(mm, addr);
5700 - if (task_size - len >= addr &&
5701 - (!vma || addr + len <= vma->vm_start))
5702 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5703 return addr;
5704 }
5705 if (mm->get_unmapped_area == arch_get_unmapped_area)
5706 diff -urNp linux-2.6.32.41/arch/sparc/mm/init_32.c linux-2.6.32.41/arch/sparc/mm/init_32.c
5707 --- linux-2.6.32.41/arch/sparc/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
5708 +++ linux-2.6.32.41/arch/sparc/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
5709 @@ -317,6 +317,9 @@ extern void device_scan(void);
5710 pgprot_t PAGE_SHARED __read_mostly;
5711 EXPORT_SYMBOL(PAGE_SHARED);
5712
5713 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5714 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5715 +
5716 void __init paging_init(void)
5717 {
5718 switch(sparc_cpu_model) {
5719 @@ -345,17 +348,17 @@ void __init paging_init(void)
5720
5721 /* Initialize the protection map with non-constant, MMU dependent values. */
5722 protection_map[0] = PAGE_NONE;
5723 - protection_map[1] = PAGE_READONLY;
5724 - protection_map[2] = PAGE_COPY;
5725 - protection_map[3] = PAGE_COPY;
5726 + protection_map[1] = PAGE_READONLY_NOEXEC;
5727 + protection_map[2] = PAGE_COPY_NOEXEC;
5728 + protection_map[3] = PAGE_COPY_NOEXEC;
5729 protection_map[4] = PAGE_READONLY;
5730 protection_map[5] = PAGE_READONLY;
5731 protection_map[6] = PAGE_COPY;
5732 protection_map[7] = PAGE_COPY;
5733 protection_map[8] = PAGE_NONE;
5734 - protection_map[9] = PAGE_READONLY;
5735 - protection_map[10] = PAGE_SHARED;
5736 - protection_map[11] = PAGE_SHARED;
5737 + protection_map[9] = PAGE_READONLY_NOEXEC;
5738 + protection_map[10] = PAGE_SHARED_NOEXEC;
5739 + protection_map[11] = PAGE_SHARED_NOEXEC;
5740 protection_map[12] = PAGE_READONLY;
5741 protection_map[13] = PAGE_READONLY;
5742 protection_map[14] = PAGE_SHARED;
5743 diff -urNp linux-2.6.32.41/arch/sparc/mm/Makefile linux-2.6.32.41/arch/sparc/mm/Makefile
5744 --- linux-2.6.32.41/arch/sparc/mm/Makefile 2011-03-27 14:31:47.000000000 -0400
5745 +++ linux-2.6.32.41/arch/sparc/mm/Makefile 2011-04-17 15:56:46.000000000 -0400
5746 @@ -2,7 +2,7 @@
5747 #
5748
5749 asflags-y := -ansi
5750 -ccflags-y := -Werror
5751 +#ccflags-y := -Werror
5752
5753 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5754 obj-y += fault_$(BITS).o
5755 diff -urNp linux-2.6.32.41/arch/sparc/mm/srmmu.c linux-2.6.32.41/arch/sparc/mm/srmmu.c
5756 --- linux-2.6.32.41/arch/sparc/mm/srmmu.c 2011-03-27 14:31:47.000000000 -0400
5757 +++ linux-2.6.32.41/arch/sparc/mm/srmmu.c 2011-04-17 15:56:46.000000000 -0400
5758 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5759 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5760 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5761 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5762 +
5763 +#ifdef CONFIG_PAX_PAGEEXEC
5764 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5765 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5766 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5767 +#endif
5768 +
5769 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5770 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5771
5772 diff -urNp linux-2.6.32.41/arch/um/include/asm/kmap_types.h linux-2.6.32.41/arch/um/include/asm/kmap_types.h
5773 --- linux-2.6.32.41/arch/um/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
5774 +++ linux-2.6.32.41/arch/um/include/asm/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
5775 @@ -23,6 +23,7 @@ enum km_type {
5776 KM_IRQ1,
5777 KM_SOFTIRQ0,
5778 KM_SOFTIRQ1,
5779 + KM_CLEARPAGE,
5780 KM_TYPE_NR
5781 };
5782
5783 diff -urNp linux-2.6.32.41/arch/um/include/asm/page.h linux-2.6.32.41/arch/um/include/asm/page.h
5784 --- linux-2.6.32.41/arch/um/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
5785 +++ linux-2.6.32.41/arch/um/include/asm/page.h 2011-04-17 15:56:46.000000000 -0400
5786 @@ -14,6 +14,9 @@
5787 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5788 #define PAGE_MASK (~(PAGE_SIZE-1))
5789
5790 +#define ktla_ktva(addr) (addr)
5791 +#define ktva_ktla(addr) (addr)
5792 +
5793 #ifndef __ASSEMBLY__
5794
5795 struct page;
5796 diff -urNp linux-2.6.32.41/arch/um/kernel/process.c linux-2.6.32.41/arch/um/kernel/process.c
5797 --- linux-2.6.32.41/arch/um/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
5798 +++ linux-2.6.32.41/arch/um/kernel/process.c 2011-04-17 15:56:46.000000000 -0400
5799 @@ -393,22 +393,6 @@ int singlestepping(void * t)
5800 return 2;
5801 }
5802
5803 -/*
5804 - * Only x86 and x86_64 have an arch_align_stack().
5805 - * All other arches have "#define arch_align_stack(x) (x)"
5806 - * in their asm/system.h
5807 - * As this is included in UML from asm-um/system-generic.h,
5808 - * we can use it to behave as the subarch does.
5809 - */
5810 -#ifndef arch_align_stack
5811 -unsigned long arch_align_stack(unsigned long sp)
5812 -{
5813 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5814 - sp -= get_random_int() % 8192;
5815 - return sp & ~0xf;
5816 -}
5817 -#endif
5818 -
5819 unsigned long get_wchan(struct task_struct *p)
5820 {
5821 unsigned long stack_page, sp, ip;
5822 diff -urNp linux-2.6.32.41/arch/um/sys-i386/syscalls.c linux-2.6.32.41/arch/um/sys-i386/syscalls.c
5823 --- linux-2.6.32.41/arch/um/sys-i386/syscalls.c 2011-03-27 14:31:47.000000000 -0400
5824 +++ linux-2.6.32.41/arch/um/sys-i386/syscalls.c 2011-04-17 15:56:46.000000000 -0400
5825 @@ -11,6 +11,21 @@
5826 #include "asm/uaccess.h"
5827 #include "asm/unistd.h"
5828
5829 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5830 +{
5831 + unsigned long pax_task_size = TASK_SIZE;
5832 +
5833 +#ifdef CONFIG_PAX_SEGMEXEC
5834 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5835 + pax_task_size = SEGMEXEC_TASK_SIZE;
5836 +#endif
5837 +
5838 + if (len > pax_task_size || addr > pax_task_size - len)
5839 + return -EINVAL;
5840 +
5841 + return 0;
5842 +}
5843 +
5844 /*
5845 * Perform the select(nd, in, out, ex, tv) and mmap() system
5846 * calls. Linux/i386 didn't use to be able to handle more than
5847 diff -urNp linux-2.6.32.41/arch/x86/boot/bitops.h linux-2.6.32.41/arch/x86/boot/bitops.h
5848 --- linux-2.6.32.41/arch/x86/boot/bitops.h 2011-03-27 14:31:47.000000000 -0400
5849 +++ linux-2.6.32.41/arch/x86/boot/bitops.h 2011-04-17 15:56:46.000000000 -0400
5850 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int
5851 u8 v;
5852 const u32 *p = (const u32 *)addr;
5853
5854 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5855 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5856 return v;
5857 }
5858
5859 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int
5860
5861 static inline void set_bit(int nr, void *addr)
5862 {
5863 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5864 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5865 }
5866
5867 #endif /* BOOT_BITOPS_H */
5868 diff -urNp linux-2.6.32.41/arch/x86/boot/boot.h linux-2.6.32.41/arch/x86/boot/boot.h
5869 --- linux-2.6.32.41/arch/x86/boot/boot.h 2011-03-27 14:31:47.000000000 -0400
5870 +++ linux-2.6.32.41/arch/x86/boot/boot.h 2011-04-17 15:56:46.000000000 -0400
5871 @@ -82,7 +82,7 @@ static inline void io_delay(void)
5872 static inline u16 ds(void)
5873 {
5874 u16 seg;
5875 - asm("movw %%ds,%0" : "=rm" (seg));
5876 + asm volatile("movw %%ds,%0" : "=rm" (seg));
5877 return seg;
5878 }
5879
5880 @@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t
5881 static inline int memcmp(const void *s1, const void *s2, size_t len)
5882 {
5883 u8 diff;
5884 - asm("repe; cmpsb; setnz %0"
5885 + asm volatile("repe; cmpsb; setnz %0"
5886 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5887 return diff;
5888 }
5889 diff -urNp linux-2.6.32.41/arch/x86/boot/compressed/head_32.S linux-2.6.32.41/arch/x86/boot/compressed/head_32.S
5890 --- linux-2.6.32.41/arch/x86/boot/compressed/head_32.S 2011-03-27 14:31:47.000000000 -0400
5891 +++ linux-2.6.32.41/arch/x86/boot/compressed/head_32.S 2011-04-17 15:56:46.000000000 -0400
5892 @@ -76,7 +76,7 @@ ENTRY(startup_32)
5893 notl %eax
5894 andl %eax, %ebx
5895 #else
5896 - movl $LOAD_PHYSICAL_ADDR, %ebx
5897 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5898 #endif
5899
5900 /* Target address to relocate to for decompression */
5901 @@ -149,7 +149,7 @@ relocated:
5902 * and where it was actually loaded.
5903 */
5904 movl %ebp, %ebx
5905 - subl $LOAD_PHYSICAL_ADDR, %ebx
5906 + subl $____LOAD_PHYSICAL_ADDR, %ebx
5907 jz 2f /* Nothing to be done if loaded at compiled addr. */
5908 /*
5909 * Process relocations.
5910 @@ -157,8 +157,7 @@ relocated:
5911
5912 1: subl $4, %edi
5913 movl (%edi), %ecx
5914 - testl %ecx, %ecx
5915 - jz 2f
5916 + jecxz 2f
5917 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5918 jmp 1b
5919 2:
5920 diff -urNp linux-2.6.32.41/arch/x86/boot/compressed/head_64.S linux-2.6.32.41/arch/x86/boot/compressed/head_64.S
5921 --- linux-2.6.32.41/arch/x86/boot/compressed/head_64.S 2011-03-27 14:31:47.000000000 -0400
5922 +++ linux-2.6.32.41/arch/x86/boot/compressed/head_64.S 2011-04-17 15:56:46.000000000 -0400
5923 @@ -91,7 +91,7 @@ ENTRY(startup_32)
5924 notl %eax
5925 andl %eax, %ebx
5926 #else
5927 - movl $LOAD_PHYSICAL_ADDR, %ebx
5928 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5929 #endif
5930
5931 /* Target address to relocate to for decompression */
5932 @@ -234,7 +234,7 @@ ENTRY(startup_64)
5933 notq %rax
5934 andq %rax, %rbp
5935 #else
5936 - movq $LOAD_PHYSICAL_ADDR, %rbp
5937 + movq $____LOAD_PHYSICAL_ADDR, %rbp
5938 #endif
5939
5940 /* Target address to relocate to for decompression */
5941 diff -urNp linux-2.6.32.41/arch/x86/boot/compressed/misc.c linux-2.6.32.41/arch/x86/boot/compressed/misc.c
5942 --- linux-2.6.32.41/arch/x86/boot/compressed/misc.c 2011-03-27 14:31:47.000000000 -0400
5943 +++ linux-2.6.32.41/arch/x86/boot/compressed/misc.c 2011-04-17 15:56:46.000000000 -0400
5944 @@ -288,7 +288,7 @@ static void parse_elf(void *output)
5945 case PT_LOAD:
5946 #ifdef CONFIG_RELOCATABLE
5947 dest = output;
5948 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5949 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5950 #else
5951 dest = (void *)(phdr->p_paddr);
5952 #endif
5953 @@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *
5954 error("Destination address too large");
5955 #endif
5956 #ifndef CONFIG_RELOCATABLE
5957 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5958 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5959 error("Wrong destination address");
5960 #endif
5961
5962 diff -urNp linux-2.6.32.41/arch/x86/boot/compressed/mkpiggy.c linux-2.6.32.41/arch/x86/boot/compressed/mkpiggy.c
5963 --- linux-2.6.32.41/arch/x86/boot/compressed/mkpiggy.c 2011-03-27 14:31:47.000000000 -0400
5964 +++ linux-2.6.32.41/arch/x86/boot/compressed/mkpiggy.c 2011-04-17 15:56:46.000000000 -0400
5965 @@ -74,7 +74,7 @@ int main(int argc, char *argv[])
5966
5967 offs = (olen > ilen) ? olen - ilen : 0;
5968 offs += olen >> 12; /* Add 8 bytes for each 32K block */
5969 - offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
5970 + offs += 64*1024; /* Add 64K bytes slack */
5971 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
5972
5973 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
5974 diff -urNp linux-2.6.32.41/arch/x86/boot/compressed/relocs.c linux-2.6.32.41/arch/x86/boot/compressed/relocs.c
5975 --- linux-2.6.32.41/arch/x86/boot/compressed/relocs.c 2011-03-27 14:31:47.000000000 -0400
5976 +++ linux-2.6.32.41/arch/x86/boot/compressed/relocs.c 2011-04-17 15:56:46.000000000 -0400
5977 @@ -10,8 +10,11 @@
5978 #define USE_BSD
5979 #include <endian.h>
5980
5981 +#include "../../../../include/linux/autoconf.h"
5982 +
5983 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5984 static Elf32_Ehdr ehdr;
5985 +static Elf32_Phdr *phdr;
5986 static unsigned long reloc_count, reloc_idx;
5987 static unsigned long *relocs;
5988
5989 @@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
5990
5991 static int is_safe_abs_reloc(const char* sym_name)
5992 {
5993 - int i;
5994 + unsigned int i;
5995
5996 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
5997 if (!strcmp(sym_name, safe_abs_relocs[i]))
5998 @@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
5999 }
6000 }
6001
6002 +static void read_phdrs(FILE *fp)
6003 +{
6004 + unsigned int i;
6005 +
6006 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
6007 + if (!phdr) {
6008 + die("Unable to allocate %d program headers\n",
6009 + ehdr.e_phnum);
6010 + }
6011 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
6012 + die("Seek to %d failed: %s\n",
6013 + ehdr.e_phoff, strerror(errno));
6014 + }
6015 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
6016 + die("Cannot read ELF program headers: %s\n",
6017 + strerror(errno));
6018 + }
6019 + for(i = 0; i < ehdr.e_phnum; i++) {
6020 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
6021 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
6022 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
6023 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
6024 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
6025 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
6026 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
6027 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
6028 + }
6029 +
6030 +}
6031 +
6032 static void read_shdrs(FILE *fp)
6033 {
6034 - int i;
6035 + unsigned int i;
6036 Elf32_Shdr shdr;
6037
6038 secs = calloc(ehdr.e_shnum, sizeof(struct section));
6039 @@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
6040
6041 static void read_strtabs(FILE *fp)
6042 {
6043 - int i;
6044 + unsigned int i;
6045 for (i = 0; i < ehdr.e_shnum; i++) {
6046 struct section *sec = &secs[i];
6047 if (sec->shdr.sh_type != SHT_STRTAB) {
6048 @@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
6049
6050 static void read_symtabs(FILE *fp)
6051 {
6052 - int i,j;
6053 + unsigned int i,j;
6054 for (i = 0; i < ehdr.e_shnum; i++) {
6055 struct section *sec = &secs[i];
6056 if (sec->shdr.sh_type != SHT_SYMTAB) {
6057 @@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
6058
6059 static void read_relocs(FILE *fp)
6060 {
6061 - int i,j;
6062 + unsigned int i,j;
6063 + uint32_t base;
6064 +
6065 for (i = 0; i < ehdr.e_shnum; i++) {
6066 struct section *sec = &secs[i];
6067 if (sec->shdr.sh_type != SHT_REL) {
6068 @@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
6069 die("Cannot read symbol table: %s\n",
6070 strerror(errno));
6071 }
6072 + base = 0;
6073 + for (j = 0; j < ehdr.e_phnum; j++) {
6074 + if (phdr[j].p_type != PT_LOAD )
6075 + continue;
6076 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
6077 + continue;
6078 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
6079 + break;
6080 + }
6081 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
6082 Elf32_Rel *rel = &sec->reltab[j];
6083 - rel->r_offset = elf32_to_cpu(rel->r_offset);
6084 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
6085 rel->r_info = elf32_to_cpu(rel->r_info);
6086 }
6087 }
6088 @@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
6089
6090 static void print_absolute_symbols(void)
6091 {
6092 - int i;
6093 + unsigned int i;
6094 printf("Absolute symbols\n");
6095 printf(" Num: Value Size Type Bind Visibility Name\n");
6096 for (i = 0; i < ehdr.e_shnum; i++) {
6097 struct section *sec = &secs[i];
6098 char *sym_strtab;
6099 Elf32_Sym *sh_symtab;
6100 - int j;
6101 + unsigned int j;
6102
6103 if (sec->shdr.sh_type != SHT_SYMTAB) {
6104 continue;
6105 @@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
6106
6107 static void print_absolute_relocs(void)
6108 {
6109 - int i, printed = 0;
6110 + unsigned int i, printed = 0;
6111
6112 for (i = 0; i < ehdr.e_shnum; i++) {
6113 struct section *sec = &secs[i];
6114 struct section *sec_applies, *sec_symtab;
6115 char *sym_strtab;
6116 Elf32_Sym *sh_symtab;
6117 - int j;
6118 + unsigned int j;
6119 if (sec->shdr.sh_type != SHT_REL) {
6120 continue;
6121 }
6122 @@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
6123
6124 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6125 {
6126 - int i;
6127 + unsigned int i;
6128 /* Walk through the relocations */
6129 for (i = 0; i < ehdr.e_shnum; i++) {
6130 char *sym_strtab;
6131 Elf32_Sym *sh_symtab;
6132 struct section *sec_applies, *sec_symtab;
6133 - int j;
6134 + unsigned int j;
6135 struct section *sec = &secs[i];
6136
6137 if (sec->shdr.sh_type != SHT_REL) {
6138 @@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(El
6139 if (sym->st_shndx == SHN_ABS) {
6140 continue;
6141 }
6142 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6143 + if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6144 + continue;
6145 +
6146 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6147 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6148 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6149 + continue;
6150 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6151 + continue;
6152 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6153 + continue;
6154 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6155 + continue;
6156 +#endif
6157 if (r_type == R_386_NONE || r_type == R_386_PC32) {
6158 /*
6159 * NONE can be ignored and and PC relative
6160 @@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, co
6161
6162 static void emit_relocs(int as_text)
6163 {
6164 - int i;
6165 + unsigned int i;
6166 /* Count how many relocations I have and allocate space for them. */
6167 reloc_count = 0;
6168 walk_relocs(count_reloc);
6169 @@ -634,6 +693,7 @@ int main(int argc, char **argv)
6170 fname, strerror(errno));
6171 }
6172 read_ehdr(fp);
6173 + read_phdrs(fp);
6174 read_shdrs(fp);
6175 read_strtabs(fp);
6176 read_symtabs(fp);
6177 diff -urNp linux-2.6.32.41/arch/x86/boot/cpucheck.c linux-2.6.32.41/arch/x86/boot/cpucheck.c
6178 --- linux-2.6.32.41/arch/x86/boot/cpucheck.c 2011-03-27 14:31:47.000000000 -0400
6179 +++ linux-2.6.32.41/arch/x86/boot/cpucheck.c 2011-04-17 15:56:46.000000000 -0400
6180 @@ -74,7 +74,7 @@ static int has_fpu(void)
6181 u16 fcw = -1, fsw = -1;
6182 u32 cr0;
6183
6184 - asm("movl %%cr0,%0" : "=r" (cr0));
6185 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
6186 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6187 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6188 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6189 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6190 {
6191 u32 f0, f1;
6192
6193 - asm("pushfl ; "
6194 + asm volatile("pushfl ; "
6195 "pushfl ; "
6196 "popl %0 ; "
6197 "movl %0,%1 ; "
6198 @@ -115,7 +115,7 @@ static void get_flags(void)
6199 set_bit(X86_FEATURE_FPU, cpu.flags);
6200
6201 if (has_eflag(X86_EFLAGS_ID)) {
6202 - asm("cpuid"
6203 + asm volatile("cpuid"
6204 : "=a" (max_intel_level),
6205 "=b" (cpu_vendor[0]),
6206 "=d" (cpu_vendor[1]),
6207 @@ -124,7 +124,7 @@ static void get_flags(void)
6208
6209 if (max_intel_level >= 0x00000001 &&
6210 max_intel_level <= 0x0000ffff) {
6211 - asm("cpuid"
6212 + asm volatile("cpuid"
6213 : "=a" (tfms),
6214 "=c" (cpu.flags[4]),
6215 "=d" (cpu.flags[0])
6216 @@ -136,7 +136,7 @@ static void get_flags(void)
6217 cpu.model += ((tfms >> 16) & 0xf) << 4;
6218 }
6219
6220 - asm("cpuid"
6221 + asm volatile("cpuid"
6222 : "=a" (max_amd_level)
6223 : "a" (0x80000000)
6224 : "ebx", "ecx", "edx");
6225 @@ -144,7 +144,7 @@ static void get_flags(void)
6226 if (max_amd_level >= 0x80000001 &&
6227 max_amd_level <= 0x8000ffff) {
6228 u32 eax = 0x80000001;
6229 - asm("cpuid"
6230 + asm volatile("cpuid"
6231 : "+a" (eax),
6232 "=c" (cpu.flags[6]),
6233 "=d" (cpu.flags[1])
6234 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6235 u32 ecx = MSR_K7_HWCR;
6236 u32 eax, edx;
6237
6238 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6239 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6240 eax &= ~(1 << 15);
6241 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6242 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6243
6244 get_flags(); /* Make sure it really did something */
6245 err = check_flags();
6246 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6247 u32 ecx = MSR_VIA_FCR;
6248 u32 eax, edx;
6249
6250 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6251 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6252 eax |= (1<<1)|(1<<7);
6253 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6254 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6255
6256 set_bit(X86_FEATURE_CX8, cpu.flags);
6257 err = check_flags();
6258 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
6259 u32 eax, edx;
6260 u32 level = 1;
6261
6262 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6263 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6264 - asm("cpuid"
6265 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6266 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6267 + asm volatile("cpuid"
6268 : "+a" (level), "=d" (cpu.flags[0])
6269 : : "ecx", "ebx");
6270 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6271 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6272
6273 err = check_flags();
6274 }
6275 diff -urNp linux-2.6.32.41/arch/x86/boot/header.S linux-2.6.32.41/arch/x86/boot/header.S
6276 --- linux-2.6.32.41/arch/x86/boot/header.S 2011-03-27 14:31:47.000000000 -0400
6277 +++ linux-2.6.32.41/arch/x86/boot/header.S 2011-04-17 15:56:46.000000000 -0400
6278 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
6279 # single linked list of
6280 # struct setup_data
6281
6282 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6283 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6284
6285 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6286 #define VO_INIT_SIZE (VO__end - VO__text)
6287 diff -urNp linux-2.6.32.41/arch/x86/boot/memory.c linux-2.6.32.41/arch/x86/boot/memory.c
6288 --- linux-2.6.32.41/arch/x86/boot/memory.c 2011-03-27 14:31:47.000000000 -0400
6289 +++ linux-2.6.32.41/arch/x86/boot/memory.c 2011-04-17 15:56:46.000000000 -0400
6290 @@ -19,7 +19,7 @@
6291
6292 static int detect_memory_e820(void)
6293 {
6294 - int count = 0;
6295 + unsigned int count = 0;
6296 struct biosregs ireg, oreg;
6297 struct e820entry *desc = boot_params.e820_map;
6298 static struct e820entry buf; /* static so it is zeroed */
6299 diff -urNp linux-2.6.32.41/arch/x86/boot/video.c linux-2.6.32.41/arch/x86/boot/video.c
6300 --- linux-2.6.32.41/arch/x86/boot/video.c 2011-03-27 14:31:47.000000000 -0400
6301 +++ linux-2.6.32.41/arch/x86/boot/video.c 2011-04-17 15:56:46.000000000 -0400
6302 @@ -90,7 +90,7 @@ static void store_mode_params(void)
6303 static unsigned int get_entry(void)
6304 {
6305 char entry_buf[4];
6306 - int i, len = 0;
6307 + unsigned int i, len = 0;
6308 int key;
6309 unsigned int v;
6310
6311 diff -urNp linux-2.6.32.41/arch/x86/boot/video-vesa.c linux-2.6.32.41/arch/x86/boot/video-vesa.c
6312 --- linux-2.6.32.41/arch/x86/boot/video-vesa.c 2011-03-27 14:31:47.000000000 -0400
6313 +++ linux-2.6.32.41/arch/x86/boot/video-vesa.c 2011-04-17 15:56:46.000000000 -0400
6314 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6315
6316 boot_params.screen_info.vesapm_seg = oreg.es;
6317 boot_params.screen_info.vesapm_off = oreg.di;
6318 + boot_params.screen_info.vesapm_size = oreg.cx;
6319 }
6320
6321 /*
6322 diff -urNp linux-2.6.32.41/arch/x86/ia32/ia32_aout.c linux-2.6.32.41/arch/x86/ia32/ia32_aout.c
6323 --- linux-2.6.32.41/arch/x86/ia32/ia32_aout.c 2011-03-27 14:31:47.000000000 -0400
6324 +++ linux-2.6.32.41/arch/x86/ia32/ia32_aout.c 2011-04-17 15:56:46.000000000 -0400
6325 @@ -169,6 +169,8 @@ static int aout_core_dump(long signr, st
6326 unsigned long dump_start, dump_size;
6327 struct user32 dump;
6328
6329 + memset(&dump, 0, sizeof(dump));
6330 +
6331 fs = get_fs();
6332 set_fs(KERNEL_DS);
6333 has_dumped = 1;
6334 @@ -218,12 +220,6 @@ static int aout_core_dump(long signr, st
6335 dump_size = dump.u_ssize << PAGE_SHIFT;
6336 DUMP_WRITE(dump_start, dump_size);
6337 }
6338 - /*
6339 - * Finally dump the task struct. Not be used by gdb, but
6340 - * could be useful
6341 - */
6342 - set_fs(KERNEL_DS);
6343 - DUMP_WRITE(current, sizeof(*current));
6344 end_coredump:
6345 set_fs(fs);
6346 return has_dumped;
6347 diff -urNp linux-2.6.32.41/arch/x86/ia32/ia32entry.S linux-2.6.32.41/arch/x86/ia32/ia32entry.S
6348 --- linux-2.6.32.41/arch/x86/ia32/ia32entry.S 2011-03-27 14:31:47.000000000 -0400
6349 +++ linux-2.6.32.41/arch/x86/ia32/ia32entry.S 2011-06-04 20:29:52.000000000 -0400
6350 @@ -13,6 +13,7 @@
6351 #include <asm/thread_info.h>
6352 #include <asm/segment.h>
6353 #include <asm/irqflags.h>
6354 +#include <asm/pgtable.h>
6355 #include <linux/linkage.h>
6356
6357 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6358 @@ -93,6 +94,30 @@ ENTRY(native_irq_enable_sysexit)
6359 ENDPROC(native_irq_enable_sysexit)
6360 #endif
6361
6362 + .macro pax_enter_kernel_user
6363 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6364 + call pax_enter_kernel_user
6365 +#endif
6366 + .endm
6367 +
6368 + .macro pax_exit_kernel_user
6369 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6370 + call pax_exit_kernel_user
6371 +#endif
6372 +#ifdef CONFIG_PAX_RANDKSTACK
6373 + pushq %rax
6374 + call pax_randomize_kstack
6375 + popq %rax
6376 +#endif
6377 + pax_erase_kstack
6378 + .endm
6379 +
6380 +.macro pax_erase_kstack
6381 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6382 + call pax_erase_kstack
6383 +#endif
6384 +.endm
6385 +
6386 /*
6387 * 32bit SYSENTER instruction entry.
6388 *
6389 @@ -119,7 +144,7 @@ ENTRY(ia32_sysenter_target)
6390 CFI_REGISTER rsp,rbp
6391 SWAPGS_UNSAFE_STACK
6392 movq PER_CPU_VAR(kernel_stack), %rsp
6393 - addq $(KERNEL_STACK_OFFSET),%rsp
6394 + pax_enter_kernel_user
6395 /*
6396 * No need to follow this irqs on/off section: the syscall
6397 * disabled irqs, here we enable it straight after entry:
6398 @@ -135,7 +160,8 @@ ENTRY(ia32_sysenter_target)
6399 pushfq
6400 CFI_ADJUST_CFA_OFFSET 8
6401 /*CFI_REL_OFFSET rflags,0*/
6402 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6403 + GET_THREAD_INFO(%r10)
6404 + movl TI_sysenter_return(%r10), %r10d
6405 CFI_REGISTER rip,r10
6406 pushq $__USER32_CS
6407 CFI_ADJUST_CFA_OFFSET 8
6408 @@ -150,6 +176,12 @@ ENTRY(ia32_sysenter_target)
6409 SAVE_ARGS 0,0,1
6410 /* no need to do an access_ok check here because rbp has been
6411 32bit zero extended */
6412 +
6413 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6414 + mov $PAX_USER_SHADOW_BASE,%r10
6415 + add %r10,%rbp
6416 +#endif
6417 +
6418 1: movl (%rbp),%ebp
6419 .section __ex_table,"a"
6420 .quad 1b,ia32_badarg
6421 @@ -172,6 +204,7 @@ sysenter_dispatch:
6422 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6423 jnz sysexit_audit
6424 sysexit_from_sys_call:
6425 + pax_exit_kernel_user
6426 andl $~TS_COMPAT,TI_status(%r10)
6427 /* clear IF, that popfq doesn't enable interrupts early */
6428 andl $~0x200,EFLAGS-R11(%rsp)
6429 @@ -200,6 +233,9 @@ sysexit_from_sys_call:
6430 movl %eax,%esi /* 2nd arg: syscall number */
6431 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6432 call audit_syscall_entry
6433 +
6434 + pax_erase_kstack
6435 +
6436 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6437 cmpq $(IA32_NR_syscalls-1),%rax
6438 ja ia32_badsys
6439 @@ -252,6 +288,9 @@ sysenter_tracesys:
6440 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
6441 movq %rsp,%rdi /* &pt_regs -> arg1 */
6442 call syscall_trace_enter
6443 +
6444 + pax_erase_kstack
6445 +
6446 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6447 RESTORE_REST
6448 cmpq $(IA32_NR_syscalls-1),%rax
6449 @@ -283,19 +322,24 @@ ENDPROC(ia32_sysenter_target)
6450 ENTRY(ia32_cstar_target)
6451 CFI_STARTPROC32 simple
6452 CFI_SIGNAL_FRAME
6453 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
6454 + CFI_DEF_CFA rsp,0
6455 CFI_REGISTER rip,rcx
6456 /*CFI_REGISTER rflags,r11*/
6457 SWAPGS_UNSAFE_STACK
6458 movl %esp,%r8d
6459 CFI_REGISTER rsp,r8
6460 movq PER_CPU_VAR(kernel_stack),%rsp
6461 +
6462 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6463 + pax_enter_kernel_user
6464 +#endif
6465 +
6466 /*
6467 * No need to follow this irqs on/off section: the syscall
6468 * disabled irqs and here we enable it straight after entry:
6469 */
6470 ENABLE_INTERRUPTS(CLBR_NONE)
6471 - SAVE_ARGS 8,1,1
6472 + SAVE_ARGS 8*6,1,1
6473 movl %eax,%eax /* zero extension */
6474 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
6475 movq %rcx,RIP-ARGOFFSET(%rsp)
6476 @@ -311,6 +355,12 @@ ENTRY(ia32_cstar_target)
6477 /* no need to do an access_ok check here because r8 has been
6478 32bit zero extended */
6479 /* hardware stack frame is complete now */
6480 +
6481 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6482 + mov $PAX_USER_SHADOW_BASE,%r10
6483 + add %r10,%r8
6484 +#endif
6485 +
6486 1: movl (%r8),%r9d
6487 .section __ex_table,"a"
6488 .quad 1b,ia32_badarg
6489 @@ -333,6 +383,7 @@ cstar_dispatch:
6490 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6491 jnz sysretl_audit
6492 sysretl_from_sys_call:
6493 + pax_exit_kernel_user
6494 andl $~TS_COMPAT,TI_status(%r10)
6495 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
6496 movl RIP-ARGOFFSET(%rsp),%ecx
6497 @@ -370,6 +421,9 @@ cstar_tracesys:
6498 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6499 movq %rsp,%rdi /* &pt_regs -> arg1 */
6500 call syscall_trace_enter
6501 +
6502 + pax_erase_kstack
6503 +
6504 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6505 RESTORE_REST
6506 xchgl %ebp,%r9d
6507 @@ -415,6 +469,7 @@ ENTRY(ia32_syscall)
6508 CFI_REL_OFFSET rip,RIP-RIP
6509 PARAVIRT_ADJUST_EXCEPTION_FRAME
6510 SWAPGS
6511 + pax_enter_kernel_user
6512 /*
6513 * No need to follow this irqs on/off section: the syscall
6514 * disabled irqs and here we enable it straight after entry:
6515 @@ -448,6 +503,9 @@ ia32_tracesys:
6516 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6517 movq %rsp,%rdi /* &pt_regs -> arg1 */
6518 call syscall_trace_enter
6519 +
6520 + pax_erase_kstack
6521 +
6522 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6523 RESTORE_REST
6524 cmpq $(IA32_NR_syscalls-1),%rax
6525 diff -urNp linux-2.6.32.41/arch/x86/ia32/ia32_signal.c linux-2.6.32.41/arch/x86/ia32/ia32_signal.c
6526 --- linux-2.6.32.41/arch/x86/ia32/ia32_signal.c 2011-03-27 14:31:47.000000000 -0400
6527 +++ linux-2.6.32.41/arch/x86/ia32/ia32_signal.c 2011-04-17 15:56:46.000000000 -0400
6528 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
6529 sp -= frame_size;
6530 /* Align the stack pointer according to the i386 ABI,
6531 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6532 - sp = ((sp + 4) & -16ul) - 4;
6533 + sp = ((sp - 12) & -16ul) - 4;
6534 return (void __user *) sp;
6535 }
6536
6537 @@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
6538 * These are actually not used anymore, but left because some
6539 * gdb versions depend on them as a marker.
6540 */
6541 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6542 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6543 } put_user_catch(err);
6544
6545 if (err)
6546 @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
6547 0xb8,
6548 __NR_ia32_rt_sigreturn,
6549 0x80cd,
6550 - 0,
6551 + 0
6552 };
6553
6554 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6555 @@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
6556
6557 if (ka->sa.sa_flags & SA_RESTORER)
6558 restorer = ka->sa.sa_restorer;
6559 + else if (current->mm->context.vdso)
6560 + /* Return stub is in 32bit vsyscall page */
6561 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6562 else
6563 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6564 - rt_sigreturn);
6565 + restorer = &frame->retcode;
6566 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6567
6568 /*
6569 * Not actually used anymore, but left because some gdb
6570 * versions need it.
6571 */
6572 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6573 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6574 } put_user_catch(err);
6575
6576 if (err)
6577 diff -urNp linux-2.6.32.41/arch/x86/include/asm/alternative.h linux-2.6.32.41/arch/x86/include/asm/alternative.h
6578 --- linux-2.6.32.41/arch/x86/include/asm/alternative.h 2011-03-27 14:31:47.000000000 -0400
6579 +++ linux-2.6.32.41/arch/x86/include/asm/alternative.h 2011-04-17 15:56:46.000000000 -0400
6580 @@ -85,7 +85,7 @@ static inline void alternatives_smp_swit
6581 " .byte 662b-661b\n" /* sourcelen */ \
6582 " .byte 664f-663f\n" /* replacementlen */ \
6583 ".previous\n" \
6584 - ".section .altinstr_replacement, \"ax\"\n" \
6585 + ".section .altinstr_replacement, \"a\"\n" \
6586 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
6587 ".previous"
6588
6589 diff -urNp linux-2.6.32.41/arch/x86/include/asm/apm.h linux-2.6.32.41/arch/x86/include/asm/apm.h
6590 --- linux-2.6.32.41/arch/x86/include/asm/apm.h 2011-03-27 14:31:47.000000000 -0400
6591 +++ linux-2.6.32.41/arch/x86/include/asm/apm.h 2011-04-17 15:56:46.000000000 -0400
6592 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
6593 __asm__ __volatile__(APM_DO_ZERO_SEGS
6594 "pushl %%edi\n\t"
6595 "pushl %%ebp\n\t"
6596 - "lcall *%%cs:apm_bios_entry\n\t"
6597 + "lcall *%%ss:apm_bios_entry\n\t"
6598 "setc %%al\n\t"
6599 "popl %%ebp\n\t"
6600 "popl %%edi\n\t"
6601 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
6602 __asm__ __volatile__(APM_DO_ZERO_SEGS
6603 "pushl %%edi\n\t"
6604 "pushl %%ebp\n\t"
6605 - "lcall *%%cs:apm_bios_entry\n\t"
6606 + "lcall *%%ss:apm_bios_entry\n\t"
6607 "setc %%bl\n\t"
6608 "popl %%ebp\n\t"
6609 "popl %%edi\n\t"
6610 diff -urNp linux-2.6.32.41/arch/x86/include/asm/atomic_32.h linux-2.6.32.41/arch/x86/include/asm/atomic_32.h
6611 --- linux-2.6.32.41/arch/x86/include/asm/atomic_32.h 2011-03-27 14:31:47.000000000 -0400
6612 +++ linux-2.6.32.41/arch/x86/include/asm/atomic_32.h 2011-05-04 17:56:20.000000000 -0400
6613 @@ -25,6 +25,17 @@ static inline int atomic_read(const atom
6614 }
6615
6616 /**
6617 + * atomic_read_unchecked - read atomic variable
6618 + * @v: pointer of type atomic_unchecked_t
6619 + *
6620 + * Atomically reads the value of @v.
6621 + */
6622 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6623 +{
6624 + return v->counter;
6625 +}
6626 +
6627 +/**
6628 * atomic_set - set atomic variable
6629 * @v: pointer of type atomic_t
6630 * @i: required value
6631 @@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *
6632 }
6633
6634 /**
6635 + * atomic_set_unchecked - set atomic variable
6636 + * @v: pointer of type atomic_unchecked_t
6637 + * @i: required value
6638 + *
6639 + * Atomically sets the value of @v to @i.
6640 + */
6641 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6642 +{
6643 + v->counter = i;
6644 +}
6645 +
6646 +/**
6647 * atomic_add - add integer to atomic variable
6648 * @i: integer value to add
6649 * @v: pointer of type atomic_t
6650 @@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *
6651 */
6652 static inline void atomic_add(int i, atomic_t *v)
6653 {
6654 - asm volatile(LOCK_PREFIX "addl %1,%0"
6655 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6656 +
6657 +#ifdef CONFIG_PAX_REFCOUNT
6658 + "jno 0f\n"
6659 + LOCK_PREFIX "subl %1,%0\n"
6660 + "int $4\n0:\n"
6661 + _ASM_EXTABLE(0b, 0b)
6662 +#endif
6663 +
6664 + : "+m" (v->counter)
6665 + : "ir" (i));
6666 +}
6667 +
6668 +/**
6669 + * atomic_add_unchecked - add integer to atomic variable
6670 + * @i: integer value to add
6671 + * @v: pointer of type atomic_unchecked_t
6672 + *
6673 + * Atomically adds @i to @v.
6674 + */
6675 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6676 +{
6677 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6678 : "+m" (v->counter)
6679 : "ir" (i));
6680 }
6681 @@ -59,7 +104,29 @@ static inline void atomic_add(int i, ato
6682 */
6683 static inline void atomic_sub(int i, atomic_t *v)
6684 {
6685 - asm volatile(LOCK_PREFIX "subl %1,%0"
6686 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6687 +
6688 +#ifdef CONFIG_PAX_REFCOUNT
6689 + "jno 0f\n"
6690 + LOCK_PREFIX "addl %1,%0\n"
6691 + "int $4\n0:\n"
6692 + _ASM_EXTABLE(0b, 0b)
6693 +#endif
6694 +
6695 + : "+m" (v->counter)
6696 + : "ir" (i));
6697 +}
6698 +
6699 +/**
6700 + * atomic_sub_unchecked - subtract integer from atomic variable
6701 + * @i: integer value to subtract
6702 + * @v: pointer of type atomic_unchecked_t
6703 + *
6704 + * Atomically subtracts @i from @v.
6705 + */
6706 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6707 +{
6708 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6709 : "+m" (v->counter)
6710 : "ir" (i));
6711 }
6712 @@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(in
6713 {
6714 unsigned char c;
6715
6716 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6717 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
6718 +
6719 +#ifdef CONFIG_PAX_REFCOUNT
6720 + "jno 0f\n"
6721 + LOCK_PREFIX "addl %2,%0\n"
6722 + "int $4\n0:\n"
6723 + _ASM_EXTABLE(0b, 0b)
6724 +#endif
6725 +
6726 + "sete %1\n"
6727 : "+m" (v->counter), "=qm" (c)
6728 : "ir" (i) : "memory");
6729 return c;
6730 @@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(in
6731 */
6732 static inline void atomic_inc(atomic_t *v)
6733 {
6734 - asm volatile(LOCK_PREFIX "incl %0"
6735 + asm volatile(LOCK_PREFIX "incl %0\n"
6736 +
6737 +#ifdef CONFIG_PAX_REFCOUNT
6738 + "jno 0f\n"
6739 + LOCK_PREFIX "decl %0\n"
6740 + "int $4\n0:\n"
6741 + _ASM_EXTABLE(0b, 0b)
6742 +#endif
6743 +
6744 + : "+m" (v->counter));
6745 +}
6746 +
6747 +/**
6748 + * atomic_inc_unchecked - increment atomic variable
6749 + * @v: pointer of type atomic_unchecked_t
6750 + *
6751 + * Atomically increments @v by 1.
6752 + */
6753 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6754 +{
6755 + asm volatile(LOCK_PREFIX "incl %0\n"
6756 : "+m" (v->counter));
6757 }
6758
6759 @@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *
6760 */
6761 static inline void atomic_dec(atomic_t *v)
6762 {
6763 - asm volatile(LOCK_PREFIX "decl %0"
6764 + asm volatile(LOCK_PREFIX "decl %0\n"
6765 +
6766 +#ifdef CONFIG_PAX_REFCOUNT
6767 + "jno 0f\n"
6768 + LOCK_PREFIX "incl %0\n"
6769 + "int $4\n0:\n"
6770 + _ASM_EXTABLE(0b, 0b)
6771 +#endif
6772 +
6773 + : "+m" (v->counter));
6774 +}
6775 +
6776 +/**
6777 + * atomic_dec_unchecked - decrement atomic variable
6778 + * @v: pointer of type atomic_unchecked_t
6779 + *
6780 + * Atomically decrements @v by 1.
6781 + */
6782 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6783 +{
6784 + asm volatile(LOCK_PREFIX "decl %0\n"
6785 : "+m" (v->counter));
6786 }
6787
6788 @@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(at
6789 {
6790 unsigned char c;
6791
6792 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
6793 + asm volatile(LOCK_PREFIX "decl %0\n"
6794 +
6795 +#ifdef CONFIG_PAX_REFCOUNT
6796 + "jno 0f\n"
6797 + LOCK_PREFIX "incl %0\n"
6798 + "int $4\n0:\n"
6799 + _ASM_EXTABLE(0b, 0b)
6800 +#endif
6801 +
6802 + "sete %1\n"
6803 : "+m" (v->counter), "=qm" (c)
6804 : : "memory");
6805 return c != 0;
6806 @@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(at
6807 {
6808 unsigned char c;
6809
6810 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
6811 + asm volatile(LOCK_PREFIX "incl %0\n"
6812 +
6813 +#ifdef CONFIG_PAX_REFCOUNT
6814 + "jno 0f\n"
6815 + LOCK_PREFIX "decl %0\n"
6816 + "into\n0:\n"
6817 + _ASM_EXTABLE(0b, 0b)
6818 +#endif
6819 +
6820 + "sete %1\n"
6821 + : "+m" (v->counter), "=qm" (c)
6822 + : : "memory");
6823 + return c != 0;
6824 +}
6825 +
6826 +/**
6827 + * atomic_inc_and_test_unchecked - increment and test
6828 + * @v: pointer of type atomic_unchecked_t
6829 + *
6830 + * Atomically increments @v by 1
6831 + * and returns true if the result is zero, or false for all
6832 + * other cases.
6833 + */
6834 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6835 +{
6836 + unsigned char c;
6837 +
6838 + asm volatile(LOCK_PREFIX "incl %0\n"
6839 + "sete %1\n"
6840 : "+m" (v->counter), "=qm" (c)
6841 : : "memory");
6842 return c != 0;
6843 @@ -156,7 +309,16 @@ static inline int atomic_add_negative(in
6844 {
6845 unsigned char c;
6846
6847 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
6848 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
6849 +
6850 +#ifdef CONFIG_PAX_REFCOUNT
6851 + "jno 0f\n"
6852 + LOCK_PREFIX "subl %2,%0\n"
6853 + "int $4\n0:\n"
6854 + _ASM_EXTABLE(0b, 0b)
6855 +#endif
6856 +
6857 + "sets %1\n"
6858 : "+m" (v->counter), "=qm" (c)
6859 : "ir" (i) : "memory");
6860 return c;
6861 @@ -179,6 +341,46 @@ static inline int atomic_add_return(int
6862 #endif
6863 /* Modern 486+ processor */
6864 __i = i;
6865 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
6866 +
6867 +#ifdef CONFIG_PAX_REFCOUNT
6868 + "jno 0f\n"
6869 + "movl %0, %1\n"
6870 + "int $4\n0:\n"
6871 + _ASM_EXTABLE(0b, 0b)
6872 +#endif
6873 +
6874 + : "+r" (i), "+m" (v->counter)
6875 + : : "memory");
6876 + return i + __i;
6877 +
6878 +#ifdef CONFIG_M386
6879 +no_xadd: /* Legacy 386 processor */
6880 + local_irq_save(flags);
6881 + __i = atomic_read(v);
6882 + atomic_set(v, i + __i);
6883 + local_irq_restore(flags);
6884 + return i + __i;
6885 +#endif
6886 +}
6887 +
6888 +/**
6889 + * atomic_add_return_unchecked - add integer and return
6890 + * @v: pointer of type atomic_unchecked_t
6891 + * @i: integer value to add
6892 + *
6893 + * Atomically adds @i to @v and returns @i + @v
6894 + */
6895 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6896 +{
6897 + int __i;
6898 +#ifdef CONFIG_M386
6899 + unsigned long flags;
6900 + if (unlikely(boot_cpu_data.x86 <= 3))
6901 + goto no_xadd;
6902 +#endif
6903 + /* Modern 486+ processor */
6904 + __i = i;
6905 asm volatile(LOCK_PREFIX "xaddl %0, %1"
6906 : "+r" (i), "+m" (v->counter)
6907 : : "memory");
6908 @@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_
6909 return cmpxchg(&v->counter, old, new);
6910 }
6911
6912 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
6913 +{
6914 + return cmpxchg(&v->counter, old, new);
6915 +}
6916 +
6917 static inline int atomic_xchg(atomic_t *v, int new)
6918 {
6919 return xchg(&v->counter, new);
6920 }
6921
6922 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6923 +{
6924 + return xchg(&v->counter, new);
6925 +}
6926 +
6927 /**
6928 * atomic_add_unless - add unless the number is already a given value
6929 * @v: pointer of type atomic_t
6930 @@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *
6931 */
6932 static inline int atomic_add_unless(atomic_t *v, int a, int u)
6933 {
6934 - int c, old;
6935 + int c, old, new;
6936 c = atomic_read(v);
6937 for (;;) {
6938 - if (unlikely(c == (u)))
6939 + if (unlikely(c == u))
6940 break;
6941 - old = atomic_cmpxchg((v), c, c + (a));
6942 +
6943 + asm volatile("addl %2,%0\n"
6944 +
6945 +#ifdef CONFIG_PAX_REFCOUNT
6946 + "jno 0f\n"
6947 + "subl %2,%0\n"
6948 + "int $4\n0:\n"
6949 + _ASM_EXTABLE(0b, 0b)
6950 +#endif
6951 +
6952 + : "=r" (new)
6953 + : "0" (c), "ir" (a));
6954 +
6955 + old = atomic_cmpxchg(v, c, new);
6956 if (likely(old == c))
6957 break;
6958 c = old;
6959 }
6960 - return c != (u);
6961 + return c != u;
6962 }
6963
6964 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
6965
6966 #define atomic_inc_return(v) (atomic_add_return(1, v))
6967 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6968 +{
6969 + return atomic_add_return_unchecked(1, v);
6970 +}
6971 #define atomic_dec_return(v) (atomic_sub_return(1, v))
6972
6973 /* These are x86-specific, used by some header files */
6974 @@ -266,9 +495,18 @@ typedef struct {
6975 u64 __aligned(8) counter;
6976 } atomic64_t;
6977
6978 +#ifdef CONFIG_PAX_REFCOUNT
6979 +typedef struct {
6980 + u64 __aligned(8) counter;
6981 +} atomic64_unchecked_t;
6982 +#else
6983 +typedef atomic64_t atomic64_unchecked_t;
6984 +#endif
6985 +
6986 #define ATOMIC64_INIT(val) { (val) }
6987
6988 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
6989 +extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
6990
6991 /**
6992 * atomic64_xchg - xchg atomic64 variable
6993 @@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *
6994 * the old value.
6995 */
6996 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
6997 +extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
6998
6999 /**
7000 * atomic64_set - set atomic64 variable
7001 @@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr
7002 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
7003
7004 /**
7005 + * atomic64_unchecked_set - set atomic64 variable
7006 + * @ptr: pointer to type atomic64_unchecked_t
7007 + * @new_val: value to assign
7008 + *
7009 + * Atomically sets the value of @ptr to @new_val.
7010 + */
7011 +extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7012 +
7013 +/**
7014 * atomic64_read - read atomic64 variable
7015 * @ptr: pointer to type atomic64_t
7016 *
7017 @@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64
7018 return res;
7019 }
7020
7021 -extern u64 atomic64_read(atomic64_t *ptr);
7022 +/**
7023 + * atomic64_read_unchecked - read atomic64 variable
7024 + * @ptr: pointer to type atomic64_unchecked_t
7025 + *
7026 + * Atomically reads the value of @ptr and returns it.
7027 + */
7028 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
7029 +{
7030 + u64 res;
7031 +
7032 + /*
7033 + * Note, we inline this atomic64_unchecked_t primitive because
7034 + * it only clobbers EAX/EDX and leaves the others
7035 + * untouched. We also (somewhat subtly) rely on the
7036 + * fact that cmpxchg8b returns the current 64-bit value
7037 + * of the memory location we are touching:
7038 + */
7039 + asm volatile(
7040 + "mov %%ebx, %%eax\n\t"
7041 + "mov %%ecx, %%edx\n\t"
7042 + LOCK_PREFIX "cmpxchg8b %1\n"
7043 + : "=&A" (res)
7044 + : "m" (*ptr)
7045 + );
7046 +
7047 + return res;
7048 +}
7049
7050 /**
7051 * atomic64_add_return - add and return
7052 @@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta
7053 * Other variants with different arithmetic operators:
7054 */
7055 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
7056 +extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7057 extern u64 atomic64_inc_return(atomic64_t *ptr);
7058 +extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
7059 extern u64 atomic64_dec_return(atomic64_t *ptr);
7060 +extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
7061
7062 /**
7063 * atomic64_add - add integer to atomic64 variable
7064 @@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_
7065 extern void atomic64_add(u64 delta, atomic64_t *ptr);
7066
7067 /**
7068 + * atomic64_add_unchecked - add integer to atomic64 variable
7069 + * @delta: integer value to add
7070 + * @ptr: pointer to type atomic64_unchecked_t
7071 + *
7072 + * Atomically adds @delta to @ptr.
7073 + */
7074 +extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7075 +
7076 +/**
7077 * atomic64_sub - subtract the atomic64 variable
7078 * @delta: integer value to subtract
7079 * @ptr: pointer to type atomic64_t
7080 @@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atom
7081 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
7082
7083 /**
7084 + * atomic64_sub_unchecked - subtract the atomic64 variable
7085 + * @delta: integer value to subtract
7086 + * @ptr: pointer to type atomic64_unchecked_t
7087 + *
7088 + * Atomically subtracts @delta from @ptr.
7089 + */
7090 +extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7091 +
7092 +/**
7093 * atomic64_sub_and_test - subtract value from variable and test result
7094 * @delta: integer value to subtract
7095 * @ptr: pointer to type atomic64_t
7096 @@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 del
7097 extern void atomic64_inc(atomic64_t *ptr);
7098
7099 /**
7100 + * atomic64_inc_unchecked - increment atomic64 variable
7101 + * @ptr: pointer to type atomic64_unchecked_t
7102 + *
7103 + * Atomically increments @ptr by 1.
7104 + */
7105 +extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
7106 +
7107 +/**
7108 * atomic64_dec - decrement atomic64 variable
7109 * @ptr: pointer to type atomic64_t
7110 *
7111 @@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr
7112 extern void atomic64_dec(atomic64_t *ptr);
7113
7114 /**
7115 + * atomic64_dec_unchecked - decrement atomic64 variable
7116 + * @ptr: pointer to type atomic64_unchecked_t
7117 + *
7118 + * Atomically decrements @ptr by 1.
7119 + */
7120 +extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
7121 +
7122 +/**
7123 * atomic64_dec_and_test - decrement and test
7124 * @ptr: pointer to type atomic64_t
7125 *
7126 diff -urNp linux-2.6.32.41/arch/x86/include/asm/atomic_64.h linux-2.6.32.41/arch/x86/include/asm/atomic_64.h
7127 --- linux-2.6.32.41/arch/x86/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
7128 +++ linux-2.6.32.41/arch/x86/include/asm/atomic_64.h 2011-05-04 18:35:31.000000000 -0400
7129 @@ -24,6 +24,17 @@ static inline int atomic_read(const atom
7130 }
7131
7132 /**
7133 + * atomic_read_unchecked - read atomic variable
7134 + * @v: pointer of type atomic_unchecked_t
7135 + *
7136 + * Atomically reads the value of @v.
7137 + */
7138 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7139 +{
7140 + return v->counter;
7141 +}
7142 +
7143 +/**
7144 * atomic_set - set atomic variable
7145 * @v: pointer of type atomic_t
7146 * @i: required value
7147 @@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *
7148 }
7149
7150 /**
7151 + * atomic_set_unchecked - set atomic variable
7152 + * @v: pointer of type atomic_unchecked_t
7153 + * @i: required value
7154 + *
7155 + * Atomically sets the value of @v to @i.
7156 + */
7157 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7158 +{
7159 + v->counter = i;
7160 +}
7161 +
7162 +/**
7163 * atomic_add - add integer to atomic variable
7164 * @i: integer value to add
7165 * @v: pointer of type atomic_t
7166 @@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *
7167 */
7168 static inline void atomic_add(int i, atomic_t *v)
7169 {
7170 - asm volatile(LOCK_PREFIX "addl %1,%0"
7171 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7172 +
7173 +#ifdef CONFIG_PAX_REFCOUNT
7174 + "jno 0f\n"
7175 + LOCK_PREFIX "subl %1,%0\n"
7176 + "int $4\n0:\n"
7177 + _ASM_EXTABLE(0b, 0b)
7178 +#endif
7179 +
7180 + : "=m" (v->counter)
7181 + : "ir" (i), "m" (v->counter));
7182 +}
7183 +
7184 +/**
7185 + * atomic_add_unchecked - add integer to atomic variable
7186 + * @i: integer value to add
7187 + * @v: pointer of type atomic_unchecked_t
7188 + *
7189 + * Atomically adds @i to @v.
7190 + */
7191 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7192 +{
7193 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7194 : "=m" (v->counter)
7195 : "ir" (i), "m" (v->counter));
7196 }
7197 @@ -58,7 +103,29 @@ static inline void atomic_add(int i, ato
7198 */
7199 static inline void atomic_sub(int i, atomic_t *v)
7200 {
7201 - asm volatile(LOCK_PREFIX "subl %1,%0"
7202 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7203 +
7204 +#ifdef CONFIG_PAX_REFCOUNT
7205 + "jno 0f\n"
7206 + LOCK_PREFIX "addl %1,%0\n"
7207 + "int $4\n0:\n"
7208 + _ASM_EXTABLE(0b, 0b)
7209 +#endif
7210 +
7211 + : "=m" (v->counter)
7212 + : "ir" (i), "m" (v->counter));
7213 +}
7214 +
7215 +/**
7216 + * atomic_sub_unchecked - subtract the atomic variable
7217 + * @i: integer value to subtract
7218 + * @v: pointer of type atomic_unchecked_t
7219 + *
7220 + * Atomically subtracts @i from @v.
7221 + */
7222 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7223 +{
7224 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7225 : "=m" (v->counter)
7226 : "ir" (i), "m" (v->counter));
7227 }
7228 @@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(in
7229 {
7230 unsigned char c;
7231
7232 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7233 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
7234 +
7235 +#ifdef CONFIG_PAX_REFCOUNT
7236 + "jno 0f\n"
7237 + LOCK_PREFIX "addl %2,%0\n"
7238 + "int $4\n0:\n"
7239 + _ASM_EXTABLE(0b, 0b)
7240 +#endif
7241 +
7242 + "sete %1\n"
7243 : "=m" (v->counter), "=qm" (c)
7244 : "ir" (i), "m" (v->counter) : "memory");
7245 return c;
7246 @@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(in
7247 */
7248 static inline void atomic_inc(atomic_t *v)
7249 {
7250 - asm volatile(LOCK_PREFIX "incl %0"
7251 + asm volatile(LOCK_PREFIX "incl %0\n"
7252 +
7253 +#ifdef CONFIG_PAX_REFCOUNT
7254 + "jno 0f\n"
7255 + LOCK_PREFIX "decl %0\n"
7256 + "int $4\n0:\n"
7257 + _ASM_EXTABLE(0b, 0b)
7258 +#endif
7259 +
7260 + : "=m" (v->counter)
7261 + : "m" (v->counter));
7262 +}
7263 +
7264 +/**
7265 + * atomic_inc_unchecked - increment atomic variable
7266 + * @v: pointer of type atomic_unchecked_t
7267 + *
7268 + * Atomically increments @v by 1.
7269 + */
7270 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7271 +{
7272 + asm volatile(LOCK_PREFIX "incl %0\n"
7273 : "=m" (v->counter)
7274 : "m" (v->counter));
7275 }
7276 @@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *
7277 */
7278 static inline void atomic_dec(atomic_t *v)
7279 {
7280 - asm volatile(LOCK_PREFIX "decl %0"
7281 + asm volatile(LOCK_PREFIX "decl %0\n"
7282 +
7283 +#ifdef CONFIG_PAX_REFCOUNT
7284 + "jno 0f\n"
7285 + LOCK_PREFIX "incl %0\n"
7286 + "int $4\n0:\n"
7287 + _ASM_EXTABLE(0b, 0b)
7288 +#endif
7289 +
7290 + : "=m" (v->counter)
7291 + : "m" (v->counter));
7292 +}
7293 +
7294 +/**
7295 + * atomic_dec_unchecked - decrement atomic variable
7296 + * @v: pointer of type atomic_unchecked_t
7297 + *
7298 + * Atomically decrements @v by 1.
7299 + */
7300 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7301 +{
7302 + asm volatile(LOCK_PREFIX "decl %0\n"
7303 : "=m" (v->counter)
7304 : "m" (v->counter));
7305 }
7306 @@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(at
7307 {
7308 unsigned char c;
7309
7310 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
7311 + asm volatile(LOCK_PREFIX "decl %0\n"
7312 +
7313 +#ifdef CONFIG_PAX_REFCOUNT
7314 + "jno 0f\n"
7315 + LOCK_PREFIX "incl %0\n"
7316 + "int $4\n0:\n"
7317 + _ASM_EXTABLE(0b, 0b)
7318 +#endif
7319 +
7320 + "sete %1\n"
7321 : "=m" (v->counter), "=qm" (c)
7322 : "m" (v->counter) : "memory");
7323 return c != 0;
7324 @@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(at
7325 {
7326 unsigned char c;
7327
7328 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
7329 + asm volatile(LOCK_PREFIX "incl %0\n"
7330 +
7331 +#ifdef CONFIG_PAX_REFCOUNT
7332 + "jno 0f\n"
7333 + LOCK_PREFIX "decl %0\n"
7334 + "int $4\n0:\n"
7335 + _ASM_EXTABLE(0b, 0b)
7336 +#endif
7337 +
7338 + "sete %1\n"
7339 + : "=m" (v->counter), "=qm" (c)
7340 + : "m" (v->counter) : "memory");
7341 + return c != 0;
7342 +}
7343 +
7344 +/**
7345 + * atomic_inc_and_test_unchecked - increment and test
7346 + * @v: pointer of type atomic_unchecked_t
7347 + *
7348 + * Atomically increments @v by 1
7349 + * and returns true if the result is zero, or false for all
7350 + * other cases.
7351 + */
7352 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7353 +{
7354 + unsigned char c;
7355 +
7356 + asm volatile(LOCK_PREFIX "incl %0\n"
7357 + "sete %1\n"
7358 : "=m" (v->counter), "=qm" (c)
7359 : "m" (v->counter) : "memory");
7360 return c != 0;
7361 @@ -157,7 +312,16 @@ static inline int atomic_add_negative(in
7362 {
7363 unsigned char c;
7364
7365 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7366 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
7367 +
7368 +#ifdef CONFIG_PAX_REFCOUNT
7369 + "jno 0f\n"
7370 + LOCK_PREFIX "subl %2,%0\n"
7371 + "int $4\n0:\n"
7372 + _ASM_EXTABLE(0b, 0b)
7373 +#endif
7374 +
7375 + "sets %1\n"
7376 : "=m" (v->counter), "=qm" (c)
7377 : "ir" (i), "m" (v->counter) : "memory");
7378 return c;
7379 @@ -173,7 +337,31 @@ static inline int atomic_add_negative(in
7380 static inline int atomic_add_return(int i, atomic_t *v)
7381 {
7382 int __i = i;
7383 - asm volatile(LOCK_PREFIX "xaddl %0, %1"
7384 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7385 +
7386 +#ifdef CONFIG_PAX_REFCOUNT
7387 + "jno 0f\n"
7388 + "movl %0, %1\n"
7389 + "int $4\n0:\n"
7390 + _ASM_EXTABLE(0b, 0b)
7391 +#endif
7392 +
7393 + : "+r" (i), "+m" (v->counter)
7394 + : : "memory");
7395 + return i + __i;
7396 +}
7397 +
7398 +/**
7399 + * atomic_add_return_unchecked - add and return
7400 + * @i: integer value to add
7401 + * @v: pointer of type atomic_unchecked_t
7402 + *
7403 + * Atomically adds @i to @v and returns @i + @v
7404 + */
7405 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7406 +{
7407 + int __i = i;
7408 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7409 : "+r" (i), "+m" (v->counter)
7410 : : "memory");
7411 return i + __i;
7412 @@ -185,6 +373,10 @@ static inline int atomic_sub_return(int
7413 }
7414
7415 #define atomic_inc_return(v) (atomic_add_return(1, v))
7416 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7417 +{
7418 + return atomic_add_return_unchecked(1, v);
7419 +}
7420 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7421
7422 /* The 64-bit atomic type */
7423 @@ -204,6 +396,18 @@ static inline long atomic64_read(const a
7424 }
7425
7426 /**
7427 + * atomic64_read_unchecked - read atomic64 variable
7428 + * @v: pointer of type atomic64_unchecked_t
7429 + *
7430 + * Atomically reads the value of @v.
7431 + * Doesn't imply a read memory barrier.
7432 + */
7433 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7434 +{
7435 + return v->counter;
7436 +}
7437 +
7438 +/**
7439 * atomic64_set - set atomic64 variable
7440 * @v: pointer to type atomic64_t
7441 * @i: required value
7442 @@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64
7443 }
7444
7445 /**
7446 + * atomic64_set_unchecked - set atomic64 variable
7447 + * @v: pointer to type atomic64_unchecked_t
7448 + * @i: required value
7449 + *
7450 + * Atomically sets the value of @v to @i.
7451 + */
7452 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7453 +{
7454 + v->counter = i;
7455 +}
7456 +
7457 +/**
7458 * atomic64_add - add integer to atomic64 variable
7459 * @i: integer value to add
7460 * @v: pointer to type atomic64_t
7461 @@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64
7462 */
7463 static inline void atomic64_add(long i, atomic64_t *v)
7464 {
7465 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
7466 +
7467 +#ifdef CONFIG_PAX_REFCOUNT
7468 + "jno 0f\n"
7469 + LOCK_PREFIX "subq %1,%0\n"
7470 + "int $4\n0:\n"
7471 + _ASM_EXTABLE(0b, 0b)
7472 +#endif
7473 +
7474 + : "=m" (v->counter)
7475 + : "er" (i), "m" (v->counter));
7476 +}
7477 +
7478 +/**
7479 + * atomic64_add_unchecked - add integer to atomic64 variable
7480 + * @i: integer value to add
7481 + * @v: pointer to type atomic64_unchecked_t
7482 + *
7483 + * Atomically adds @i to @v.
7484 + */
7485 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
7486 +{
7487 asm volatile(LOCK_PREFIX "addq %1,%0"
7488 : "=m" (v->counter)
7489 : "er" (i), "m" (v->counter));
7490 @@ -238,7 +476,15 @@ static inline void atomic64_add(long i,
7491 */
7492 static inline void atomic64_sub(long i, atomic64_t *v)
7493 {
7494 - asm volatile(LOCK_PREFIX "subq %1,%0"
7495 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
7496 +
7497 +#ifdef CONFIG_PAX_REFCOUNT
7498 + "jno 0f\n"
7499 + LOCK_PREFIX "addq %1,%0\n"
7500 + "int $4\n0:\n"
7501 + _ASM_EXTABLE(0b, 0b)
7502 +#endif
7503 +
7504 : "=m" (v->counter)
7505 : "er" (i), "m" (v->counter));
7506 }
7507 @@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(
7508 {
7509 unsigned char c;
7510
7511 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
7512 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
7513 +
7514 +#ifdef CONFIG_PAX_REFCOUNT
7515 + "jno 0f\n"
7516 + LOCK_PREFIX "addq %2,%0\n"
7517 + "int $4\n0:\n"
7518 + _ASM_EXTABLE(0b, 0b)
7519 +#endif
7520 +
7521 + "sete %1\n"
7522 : "=m" (v->counter), "=qm" (c)
7523 : "er" (i), "m" (v->counter) : "memory");
7524 return c;
7525 @@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(
7526 */
7527 static inline void atomic64_inc(atomic64_t *v)
7528 {
7529 + asm volatile(LOCK_PREFIX "incq %0\n"
7530 +
7531 +#ifdef CONFIG_PAX_REFCOUNT
7532 + "jno 0f\n"
7533 + LOCK_PREFIX "decq %0\n"
7534 + "int $4\n0:\n"
7535 + _ASM_EXTABLE(0b, 0b)
7536 +#endif
7537 +
7538 + : "=m" (v->counter)
7539 + : "m" (v->counter));
7540 +}
7541 +
7542 +/**
7543 + * atomic64_inc_unchecked - increment atomic64 variable
7544 + * @v: pointer to type atomic64_unchecked_t
7545 + *
7546 + * Atomically increments @v by 1.
7547 + */
7548 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7549 +{
7550 asm volatile(LOCK_PREFIX "incq %0"
7551 : "=m" (v->counter)
7552 : "m" (v->counter));
7553 @@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64
7554 */
7555 static inline void atomic64_dec(atomic64_t *v)
7556 {
7557 - asm volatile(LOCK_PREFIX "decq %0"
7558 + asm volatile(LOCK_PREFIX "decq %0\n"
7559 +
7560 +#ifdef CONFIG_PAX_REFCOUNT
7561 + "jno 0f\n"
7562 + LOCK_PREFIX "incq %0\n"
7563 + "int $4\n0:\n"
7564 + _ASM_EXTABLE(0b, 0b)
7565 +#endif
7566 +
7567 + : "=m" (v->counter)
7568 + : "m" (v->counter));
7569 +}
7570 +
7571 +/**
7572 + * atomic64_dec_unchecked - decrement atomic64 variable
7573 + * @v: pointer to type atomic64_t
7574 + *
7575 + * Atomically decrements @v by 1.
7576 + */
7577 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7578 +{
7579 + asm volatile(LOCK_PREFIX "decq %0\n"
7580 : "=m" (v->counter)
7581 : "m" (v->counter));
7582 }
7583 @@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(
7584 {
7585 unsigned char c;
7586
7587 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
7588 + asm volatile(LOCK_PREFIX "decq %0\n"
7589 +
7590 +#ifdef CONFIG_PAX_REFCOUNT
7591 + "jno 0f\n"
7592 + LOCK_PREFIX "incq %0\n"
7593 + "int $4\n0:\n"
7594 + _ASM_EXTABLE(0b, 0b)
7595 +#endif
7596 +
7597 + "sete %1\n"
7598 : "=m" (v->counter), "=qm" (c)
7599 : "m" (v->counter) : "memory");
7600 return c != 0;
7601 @@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(
7602 {
7603 unsigned char c;
7604
7605 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
7606 + asm volatile(LOCK_PREFIX "incq %0\n"
7607 +
7608 +#ifdef CONFIG_PAX_REFCOUNT
7609 + "jno 0f\n"
7610 + LOCK_PREFIX "decq %0\n"
7611 + "int $4\n0:\n"
7612 + _ASM_EXTABLE(0b, 0b)
7613 +#endif
7614 +
7615 + "sete %1\n"
7616 : "=m" (v->counter), "=qm" (c)
7617 : "m" (v->counter) : "memory");
7618 return c != 0;
7619 @@ -337,7 +652,16 @@ static inline int atomic64_add_negative(
7620 {
7621 unsigned char c;
7622
7623 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
7624 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
7625 +
7626 +#ifdef CONFIG_PAX_REFCOUNT
7627 + "jno 0f\n"
7628 + LOCK_PREFIX "subq %2,%0\n"
7629 + "int $4\n0:\n"
7630 + _ASM_EXTABLE(0b, 0b)
7631 +#endif
7632 +
7633 + "sets %1\n"
7634 : "=m" (v->counter), "=qm" (c)
7635 : "er" (i), "m" (v->counter) : "memory");
7636 return c;
7637 @@ -353,7 +677,31 @@ static inline int atomic64_add_negative(
7638 static inline long atomic64_add_return(long i, atomic64_t *v)
7639 {
7640 long __i = i;
7641 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
7642 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
7643 +
7644 +#ifdef CONFIG_PAX_REFCOUNT
7645 + "jno 0f\n"
7646 + "movq %0, %1\n"
7647 + "int $4\n0:\n"
7648 + _ASM_EXTABLE(0b, 0b)
7649 +#endif
7650 +
7651 + : "+r" (i), "+m" (v->counter)
7652 + : : "memory");
7653 + return i + __i;
7654 +}
7655 +
7656 +/**
7657 + * atomic64_add_return_unchecked - add and return
7658 + * @i: integer value to add
7659 + * @v: pointer to type atomic64_unchecked_t
7660 + *
7661 + * Atomically adds @i to @v and returns @i + @v
7662 + */
7663 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7664 +{
7665 + long __i = i;
7666 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
7667 : "+r" (i), "+m" (v->counter)
7668 : : "memory");
7669 return i + __i;
7670 @@ -365,6 +713,10 @@ static inline long atomic64_sub_return(l
7671 }
7672
7673 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
7674 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7675 +{
7676 + return atomic64_add_return_unchecked(1, v);
7677 +}
7678 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
7679
7680 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
7681 @@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atom
7682 return cmpxchg(&v->counter, old, new);
7683 }
7684
7685 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
7686 +{
7687 + return cmpxchg(&v->counter, old, new);
7688 +}
7689 +
7690 static inline long atomic64_xchg(atomic64_t *v, long new)
7691 {
7692 return xchg(&v->counter, new);
7693 }
7694
7695 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
7696 +{
7697 + return xchg(&v->counter, new);
7698 +}
7699 +
7700 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
7701 {
7702 return cmpxchg(&v->counter, old, new);
7703 }
7704
7705 +static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7706 +{
7707 + return cmpxchg(&v->counter, old, new);
7708 +}
7709 +
7710 static inline long atomic_xchg(atomic_t *v, int new)
7711 {
7712 return xchg(&v->counter, new);
7713 }
7714
7715 +static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7716 +{
7717 + return xchg(&v->counter, new);
7718 +}
7719 +
7720 /**
7721 * atomic_add_unless - add unless the number is a given value
7722 * @v: pointer of type atomic_t
7723 @@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t
7724 */
7725 static inline int atomic_add_unless(atomic_t *v, int a, int u)
7726 {
7727 - int c, old;
7728 + int c, old, new;
7729 c = atomic_read(v);
7730 for (;;) {
7731 - if (unlikely(c == (u)))
7732 + if (unlikely(c == u))
7733 break;
7734 - old = atomic_cmpxchg((v), c, c + (a));
7735 +
7736 + asm volatile("addl %2,%0\n"
7737 +
7738 +#ifdef CONFIG_PAX_REFCOUNT
7739 + "jno 0f\n"
7740 + "subl %2,%0\n"
7741 + "int $4\n0:\n"
7742 + _ASM_EXTABLE(0b, 0b)
7743 +#endif
7744 +
7745 + : "=r" (new)
7746 + : "0" (c), "ir" (a));
7747 +
7748 + old = atomic_cmpxchg(v, c, new);
7749 if (likely(old == c))
7750 break;
7751 c = old;
7752 }
7753 - return c != (u);
7754 + return c != u;
7755 }
7756
7757 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
7758 @@ -424,17 +809,30 @@ static inline int atomic_add_unless(atom
7759 */
7760 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
7761 {
7762 - long c, old;
7763 + long c, old, new;
7764 c = atomic64_read(v);
7765 for (;;) {
7766 - if (unlikely(c == (u)))
7767 + if (unlikely(c == u))
7768 break;
7769 - old = atomic64_cmpxchg((v), c, c + (a));
7770 +
7771 + asm volatile("addq %2,%0\n"
7772 +
7773 +#ifdef CONFIG_PAX_REFCOUNT
7774 + "jno 0f\n"
7775 + "subq %2,%0\n"
7776 + "int $4\n0:\n"
7777 + _ASM_EXTABLE(0b, 0b)
7778 +#endif
7779 +
7780 + : "=r" (new)
7781 + : "0" (c), "er" (a));
7782 +
7783 + old = atomic64_cmpxchg(v, c, new);
7784 if (likely(old == c))
7785 break;
7786 c = old;
7787 }
7788 - return c != (u);
7789 + return c != u;
7790 }
7791
7792 /**
7793 diff -urNp linux-2.6.32.41/arch/x86/include/asm/bitops.h linux-2.6.32.41/arch/x86/include/asm/bitops.h
7794 --- linux-2.6.32.41/arch/x86/include/asm/bitops.h 2011-03-27 14:31:47.000000000 -0400
7795 +++ linux-2.6.32.41/arch/x86/include/asm/bitops.h 2011-04-17 15:56:46.000000000 -0400
7796 @@ -38,7 +38,7 @@
7797 * a mask operation on a byte.
7798 */
7799 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
7800 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
7801 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
7802 #define CONST_MASK(nr) (1 << ((nr) & 7))
7803
7804 /**
7805 diff -urNp linux-2.6.32.41/arch/x86/include/asm/boot.h linux-2.6.32.41/arch/x86/include/asm/boot.h
7806 --- linux-2.6.32.41/arch/x86/include/asm/boot.h 2011-03-27 14:31:47.000000000 -0400
7807 +++ linux-2.6.32.41/arch/x86/include/asm/boot.h 2011-04-17 15:56:46.000000000 -0400
7808 @@ -11,10 +11,15 @@
7809 #include <asm/pgtable_types.h>
7810
7811 /* Physical address where kernel should be loaded. */
7812 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
7813 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
7814 + (CONFIG_PHYSICAL_ALIGN - 1)) \
7815 & ~(CONFIG_PHYSICAL_ALIGN - 1))
7816
7817 +#ifndef __ASSEMBLY__
7818 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
7819 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
7820 +#endif
7821 +
7822 /* Minimum kernel alignment, as a power of two */
7823 #ifdef CONFIG_X86_64
7824 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
7825 diff -urNp linux-2.6.32.41/arch/x86/include/asm/cacheflush.h linux-2.6.32.41/arch/x86/include/asm/cacheflush.h
7826 --- linux-2.6.32.41/arch/x86/include/asm/cacheflush.h 2011-03-27 14:31:47.000000000 -0400
7827 +++ linux-2.6.32.41/arch/x86/include/asm/cacheflush.h 2011-04-17 15:56:46.000000000 -0400
7828 @@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
7829 static inline unsigned long get_page_memtype(struct page *pg)
7830 {
7831 if (!PageUncached(pg) && !PageWC(pg))
7832 - return -1;
7833 + return ~0UL;
7834 else if (!PageUncached(pg) && PageWC(pg))
7835 return _PAGE_CACHE_WC;
7836 else if (PageUncached(pg) && !PageWC(pg))
7837 @@ -85,7 +85,7 @@ static inline void set_page_memtype(stru
7838 SetPageWC(pg);
7839 break;
7840 default:
7841 - case -1:
7842 + case ~0UL:
7843 ClearPageUncached(pg);
7844 ClearPageWC(pg);
7845 break;
7846 diff -urNp linux-2.6.32.41/arch/x86/include/asm/cache.h linux-2.6.32.41/arch/x86/include/asm/cache.h
7847 --- linux-2.6.32.41/arch/x86/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
7848 +++ linux-2.6.32.41/arch/x86/include/asm/cache.h 2011-05-04 17:56:20.000000000 -0400
7849 @@ -5,9 +5,10 @@
7850
7851 /* L1 cache line size */
7852 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7853 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7854 +#define L1_CACHE_BYTES (_AC(1,U) << L1_CACHE_SHIFT)
7855
7856 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
7857 +#define __read_only __attribute__((__section__(".data.read_only")))
7858
7859 #ifdef CONFIG_X86_VSMP
7860 /* vSMP Internode cacheline shift */
7861 diff -urNp linux-2.6.32.41/arch/x86/include/asm/checksum_32.h linux-2.6.32.41/arch/x86/include/asm/checksum_32.h
7862 --- linux-2.6.32.41/arch/x86/include/asm/checksum_32.h 2011-03-27 14:31:47.000000000 -0400
7863 +++ linux-2.6.32.41/arch/x86/include/asm/checksum_32.h 2011-04-17 15:56:46.000000000 -0400
7864 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
7865 int len, __wsum sum,
7866 int *src_err_ptr, int *dst_err_ptr);
7867
7868 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
7869 + int len, __wsum sum,
7870 + int *src_err_ptr, int *dst_err_ptr);
7871 +
7872 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
7873 + int len, __wsum sum,
7874 + int *src_err_ptr, int *dst_err_ptr);
7875 +
7876 /*
7877 * Note: when you get a NULL pointer exception here this means someone
7878 * passed in an incorrect kernel address to one of these functions.
7879 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
7880 int *err_ptr)
7881 {
7882 might_sleep();
7883 - return csum_partial_copy_generic((__force void *)src, dst,
7884 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
7885 len, sum, err_ptr, NULL);
7886 }
7887
7888 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
7889 {
7890 might_sleep();
7891 if (access_ok(VERIFY_WRITE, dst, len))
7892 - return csum_partial_copy_generic(src, (__force void *)dst,
7893 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
7894 len, sum, NULL, err_ptr);
7895
7896 if (len)
7897 diff -urNp linux-2.6.32.41/arch/x86/include/asm/desc_defs.h linux-2.6.32.41/arch/x86/include/asm/desc_defs.h
7898 --- linux-2.6.32.41/arch/x86/include/asm/desc_defs.h 2011-03-27 14:31:47.000000000 -0400
7899 +++ linux-2.6.32.41/arch/x86/include/asm/desc_defs.h 2011-04-17 15:56:46.000000000 -0400
7900 @@ -31,6 +31,12 @@ struct desc_struct {
7901 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
7902 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
7903 };
7904 + struct {
7905 + u16 offset_low;
7906 + u16 seg;
7907 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
7908 + unsigned offset_high: 16;
7909 + } gate;
7910 };
7911 } __attribute__((packed));
7912
7913 diff -urNp linux-2.6.32.41/arch/x86/include/asm/desc.h linux-2.6.32.41/arch/x86/include/asm/desc.h
7914 --- linux-2.6.32.41/arch/x86/include/asm/desc.h 2011-03-27 14:31:47.000000000 -0400
7915 +++ linux-2.6.32.41/arch/x86/include/asm/desc.h 2011-04-23 12:56:10.000000000 -0400
7916 @@ -4,6 +4,7 @@
7917 #include <asm/desc_defs.h>
7918 #include <asm/ldt.h>
7919 #include <asm/mmu.h>
7920 +#include <asm/pgtable.h>
7921 #include <linux/smp.h>
7922
7923 static inline void fill_ldt(struct desc_struct *desc,
7924 @@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_
7925 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
7926 desc->type = (info->read_exec_only ^ 1) << 1;
7927 desc->type |= info->contents << 2;
7928 + desc->type |= info->seg_not_present ^ 1;
7929 desc->s = 1;
7930 desc->dpl = 0x3;
7931 desc->p = info->seg_not_present ^ 1;
7932 @@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_
7933 }
7934
7935 extern struct desc_ptr idt_descr;
7936 -extern gate_desc idt_table[];
7937 -
7938 -struct gdt_page {
7939 - struct desc_struct gdt[GDT_ENTRIES];
7940 -} __attribute__((aligned(PAGE_SIZE)));
7941 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
7942 +extern gate_desc idt_table[256];
7943
7944 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
7945 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
7946 {
7947 - return per_cpu(gdt_page, cpu).gdt;
7948 + return cpu_gdt_table[cpu];
7949 }
7950
7951 #ifdef CONFIG_X86_64
7952 @@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *
7953 unsigned long base, unsigned dpl, unsigned flags,
7954 unsigned short seg)
7955 {
7956 - gate->a = (seg << 16) | (base & 0xffff);
7957 - gate->b = (base & 0xffff0000) |
7958 - (((0x80 | type | (dpl << 5)) & 0xff) << 8);
7959 + gate->gate.offset_low = base;
7960 + gate->gate.seg = seg;
7961 + gate->gate.reserved = 0;
7962 + gate->gate.type = type;
7963 + gate->gate.s = 0;
7964 + gate->gate.dpl = dpl;
7965 + gate->gate.p = 1;
7966 + gate->gate.offset_high = base >> 16;
7967 }
7968
7969 #endif
7970 @@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(str
7971 static inline void native_write_idt_entry(gate_desc *idt, int entry,
7972 const gate_desc *gate)
7973 {
7974 + pax_open_kernel();
7975 memcpy(&idt[entry], gate, sizeof(*gate));
7976 + pax_close_kernel();
7977 }
7978
7979 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
7980 const void *desc)
7981 {
7982 + pax_open_kernel();
7983 memcpy(&ldt[entry], desc, 8);
7984 + pax_close_kernel();
7985 }
7986
7987 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
7988 @@ -139,7 +146,10 @@ static inline void native_write_gdt_entr
7989 size = sizeof(struct desc_struct);
7990 break;
7991 }
7992 +
7993 + pax_open_kernel();
7994 memcpy(&gdt[entry], desc, size);
7995 + pax_close_kernel();
7996 }
7997
7998 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
7999 @@ -211,7 +221,9 @@ static inline void native_set_ldt(const
8000
8001 static inline void native_load_tr_desc(void)
8002 {
8003 + pax_open_kernel();
8004 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
8005 + pax_close_kernel();
8006 }
8007
8008 static inline void native_load_gdt(const struct desc_ptr *dtr)
8009 @@ -246,8 +258,10 @@ static inline void native_load_tls(struc
8010 unsigned int i;
8011 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
8012
8013 + pax_open_kernel();
8014 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
8015 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
8016 + pax_close_kernel();
8017 }
8018
8019 #define _LDT_empty(info) \
8020 @@ -309,7 +323,7 @@ static inline void set_desc_limit(struct
8021 desc->limit = (limit >> 16) & 0xf;
8022 }
8023
8024 -static inline void _set_gate(int gate, unsigned type, void *addr,
8025 +static inline void _set_gate(int gate, unsigned type, const void *addr,
8026 unsigned dpl, unsigned ist, unsigned seg)
8027 {
8028 gate_desc s;
8029 @@ -327,7 +341,7 @@ static inline void _set_gate(int gate, u
8030 * Pentium F0 0F bugfix can have resulted in the mapped
8031 * IDT being write-protected.
8032 */
8033 -static inline void set_intr_gate(unsigned int n, void *addr)
8034 +static inline void set_intr_gate(unsigned int n, const void *addr)
8035 {
8036 BUG_ON((unsigned)n > 0xFF);
8037 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
8038 @@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsig
8039 /*
8040 * This routine sets up an interrupt gate at directory privilege level 3.
8041 */
8042 -static inline void set_system_intr_gate(unsigned int n, void *addr)
8043 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
8044 {
8045 BUG_ON((unsigned)n > 0xFF);
8046 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
8047 }
8048
8049 -static inline void set_system_trap_gate(unsigned int n, void *addr)
8050 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
8051 {
8052 BUG_ON((unsigned)n > 0xFF);
8053 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8054 }
8055
8056 -static inline void set_trap_gate(unsigned int n, void *addr)
8057 +static inline void set_trap_gate(unsigned int n, const void *addr)
8058 {
8059 BUG_ON((unsigned)n > 0xFF);
8060 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8061 @@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigne
8062 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8063 {
8064 BUG_ON((unsigned)n > 0xFF);
8065 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8066 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8067 }
8068
8069 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8070 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8071 {
8072 BUG_ON((unsigned)n > 0xFF);
8073 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8074 }
8075
8076 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8077 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8078 {
8079 BUG_ON((unsigned)n > 0xFF);
8080 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8081 }
8082
8083 +#ifdef CONFIG_X86_32
8084 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8085 +{
8086 + struct desc_struct d;
8087 +
8088 + if (likely(limit))
8089 + limit = (limit - 1UL) >> PAGE_SHIFT;
8090 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
8091 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8092 +}
8093 +#endif
8094 +
8095 #endif /* _ASM_X86_DESC_H */
8096 diff -urNp linux-2.6.32.41/arch/x86/include/asm/device.h linux-2.6.32.41/arch/x86/include/asm/device.h
8097 --- linux-2.6.32.41/arch/x86/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
8098 +++ linux-2.6.32.41/arch/x86/include/asm/device.h 2011-04-17 15:56:46.000000000 -0400
8099 @@ -6,7 +6,7 @@ struct dev_archdata {
8100 void *acpi_handle;
8101 #endif
8102 #ifdef CONFIG_X86_64
8103 -struct dma_map_ops *dma_ops;
8104 + const struct dma_map_ops *dma_ops;
8105 #endif
8106 #ifdef CONFIG_DMAR
8107 void *iommu; /* hook for IOMMU specific extension */
8108 diff -urNp linux-2.6.32.41/arch/x86/include/asm/dma-mapping.h linux-2.6.32.41/arch/x86/include/asm/dma-mapping.h
8109 --- linux-2.6.32.41/arch/x86/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
8110 +++ linux-2.6.32.41/arch/x86/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
8111 @@ -25,9 +25,9 @@ extern int iommu_merge;
8112 extern struct device x86_dma_fallback_dev;
8113 extern int panic_on_overflow;
8114
8115 -extern struct dma_map_ops *dma_ops;
8116 +extern const struct dma_map_ops *dma_ops;
8117
8118 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
8119 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
8120 {
8121 #ifdef CONFIG_X86_32
8122 return dma_ops;
8123 @@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dm
8124 /* Make sure we keep the same behaviour */
8125 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
8126 {
8127 - struct dma_map_ops *ops = get_dma_ops(dev);
8128 + const struct dma_map_ops *ops = get_dma_ops(dev);
8129 if (ops->mapping_error)
8130 return ops->mapping_error(dev, dma_addr);
8131
8132 @@ -122,7 +122,7 @@ static inline void *
8133 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
8134 gfp_t gfp)
8135 {
8136 - struct dma_map_ops *ops = get_dma_ops(dev);
8137 + const struct dma_map_ops *ops = get_dma_ops(dev);
8138 void *memory;
8139
8140 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
8141 @@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, s
8142 static inline void dma_free_coherent(struct device *dev, size_t size,
8143 void *vaddr, dma_addr_t bus)
8144 {
8145 - struct dma_map_ops *ops = get_dma_ops(dev);
8146 + const struct dma_map_ops *ops = get_dma_ops(dev);
8147
8148 WARN_ON(irqs_disabled()); /* for portability */
8149
8150 diff -urNp linux-2.6.32.41/arch/x86/include/asm/e820.h linux-2.6.32.41/arch/x86/include/asm/e820.h
8151 --- linux-2.6.32.41/arch/x86/include/asm/e820.h 2011-03-27 14:31:47.000000000 -0400
8152 +++ linux-2.6.32.41/arch/x86/include/asm/e820.h 2011-04-17 15:56:46.000000000 -0400
8153 @@ -133,7 +133,7 @@ extern char *default_machine_specific_me
8154 #define ISA_END_ADDRESS 0x100000
8155 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
8156
8157 -#define BIOS_BEGIN 0x000a0000
8158 +#define BIOS_BEGIN 0x000c0000
8159 #define BIOS_END 0x00100000
8160
8161 #ifdef __KERNEL__
8162 diff -urNp linux-2.6.32.41/arch/x86/include/asm/elf.h linux-2.6.32.41/arch/x86/include/asm/elf.h
8163 --- linux-2.6.32.41/arch/x86/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
8164 +++ linux-2.6.32.41/arch/x86/include/asm/elf.h 2011-04-17 15:56:46.000000000 -0400
8165 @@ -257,7 +257,25 @@ extern int force_personality32;
8166 the loader. We need to make sure that it is out of the way of the program
8167 that it will "exec", and that there is sufficient room for the brk. */
8168
8169 +#ifdef CONFIG_PAX_SEGMEXEC
8170 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8171 +#else
8172 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8173 +#endif
8174 +
8175 +#ifdef CONFIG_PAX_ASLR
8176 +#ifdef CONFIG_X86_32
8177 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8178 +
8179 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8180 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8181 +#else
8182 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
8183 +
8184 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8185 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8186 +#endif
8187 +#endif
8188
8189 /* This yields a mask that user programs can use to figure out what
8190 instruction set this CPU supports. This could be done in user space,
8191 @@ -311,8 +329,7 @@ do { \
8192 #define ARCH_DLINFO \
8193 do { \
8194 if (vdso_enabled) \
8195 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8196 - (unsigned long)current->mm->context.vdso); \
8197 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);\
8198 } while (0)
8199
8200 #define AT_SYSINFO 32
8201 @@ -323,7 +340,7 @@ do { \
8202
8203 #endif /* !CONFIG_X86_32 */
8204
8205 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8206 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8207
8208 #define VDSO_ENTRY \
8209 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8210 @@ -337,7 +354,4 @@ extern int arch_setup_additional_pages(s
8211 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8212 #define compat_arch_setup_additional_pages syscall32_setup_pages
8213
8214 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8215 -#define arch_randomize_brk arch_randomize_brk
8216 -
8217 #endif /* _ASM_X86_ELF_H */
8218 diff -urNp linux-2.6.32.41/arch/x86/include/asm/emergency-restart.h linux-2.6.32.41/arch/x86/include/asm/emergency-restart.h
8219 --- linux-2.6.32.41/arch/x86/include/asm/emergency-restart.h 2011-03-27 14:31:47.000000000 -0400
8220 +++ linux-2.6.32.41/arch/x86/include/asm/emergency-restart.h 2011-05-22 23:02:06.000000000 -0400
8221 @@ -15,6 +15,6 @@ enum reboot_type {
8222
8223 extern enum reboot_type reboot_type;
8224
8225 -extern void machine_emergency_restart(void);
8226 +extern void machine_emergency_restart(void) __noreturn;
8227
8228 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8229 diff -urNp linux-2.6.32.41/arch/x86/include/asm/futex.h linux-2.6.32.41/arch/x86/include/asm/futex.h
8230 --- linux-2.6.32.41/arch/x86/include/asm/futex.h 2011-03-27 14:31:47.000000000 -0400
8231 +++ linux-2.6.32.41/arch/x86/include/asm/futex.h 2011-04-17 15:56:46.000000000 -0400
8232 @@ -12,16 +12,18 @@
8233 #include <asm/system.h>
8234
8235 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8236 + typecheck(u32 *, uaddr); \
8237 asm volatile("1:\t" insn "\n" \
8238 "2:\t.section .fixup,\"ax\"\n" \
8239 "3:\tmov\t%3, %1\n" \
8240 "\tjmp\t2b\n" \
8241 "\t.previous\n" \
8242 _ASM_EXTABLE(1b, 3b) \
8243 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8244 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
8245 : "i" (-EFAULT), "0" (oparg), "1" (0))
8246
8247 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8248 + typecheck(u32 *, uaddr); \
8249 asm volatile("1:\tmovl %2, %0\n" \
8250 "\tmovl\t%0, %3\n" \
8251 "\t" insn "\n" \
8252 @@ -34,10 +36,10 @@
8253 _ASM_EXTABLE(1b, 4b) \
8254 _ASM_EXTABLE(2b, 4b) \
8255 : "=&a" (oldval), "=&r" (ret), \
8256 - "+m" (*uaddr), "=&r" (tem) \
8257 + "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
8258 : "r" (oparg), "i" (-EFAULT), "1" (0))
8259
8260 -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
8261 +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8262 {
8263 int op = (encoded_op >> 28) & 7;
8264 int cmp = (encoded_op >> 24) & 15;
8265 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
8266
8267 switch (op) {
8268 case FUTEX_OP_SET:
8269 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8270 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8271 break;
8272 case FUTEX_OP_ADD:
8273 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8274 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8275 uaddr, oparg);
8276 break;
8277 case FUTEX_OP_OR:
8278 @@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser
8279 return ret;
8280 }
8281
8282 -static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
8283 +static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
8284 int newval)
8285 {
8286
8287 @@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_i
8288 return -ENOSYS;
8289 #endif
8290
8291 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
8292 + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8293 return -EFAULT;
8294
8295 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
8296 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
8297 "2:\t.section .fixup, \"ax\"\n"
8298 "3:\tmov %2, %0\n"
8299 "\tjmp 2b\n"
8300 "\t.previous\n"
8301 _ASM_EXTABLE(1b, 3b)
8302 - : "=a" (oldval), "+m" (*uaddr)
8303 + : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
8304 : "i" (-EFAULT), "r" (newval), "0" (oldval)
8305 : "memory"
8306 );
8307 diff -urNp linux-2.6.32.41/arch/x86/include/asm/hw_irq.h linux-2.6.32.41/arch/x86/include/asm/hw_irq.h
8308 --- linux-2.6.32.41/arch/x86/include/asm/hw_irq.h 2011-03-27 14:31:47.000000000 -0400
8309 +++ linux-2.6.32.41/arch/x86/include/asm/hw_irq.h 2011-05-04 17:56:28.000000000 -0400
8310 @@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
8311 extern void enable_IO_APIC(void);
8312
8313 /* Statistics */
8314 -extern atomic_t irq_err_count;
8315 -extern atomic_t irq_mis_count;
8316 +extern atomic_unchecked_t irq_err_count;
8317 +extern atomic_unchecked_t irq_mis_count;
8318
8319 /* EISA */
8320 extern void eisa_set_level_irq(unsigned int irq);
8321 diff -urNp linux-2.6.32.41/arch/x86/include/asm/i387.h linux-2.6.32.41/arch/x86/include/asm/i387.h
8322 --- linux-2.6.32.41/arch/x86/include/asm/i387.h 2011-03-27 14:31:47.000000000 -0400
8323 +++ linux-2.6.32.41/arch/x86/include/asm/i387.h 2011-04-17 15:56:46.000000000 -0400
8324 @@ -60,6 +60,11 @@ static inline int fxrstor_checking(struc
8325 {
8326 int err;
8327
8328 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8329 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8330 + fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
8331 +#endif
8332 +
8333 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
8334 "2:\n"
8335 ".section .fixup,\"ax\"\n"
8336 @@ -105,6 +110,11 @@ static inline int fxsave_user(struct i38
8337 {
8338 int err;
8339
8340 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8341 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8342 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8343 +#endif
8344 +
8345 asm volatile("1: rex64/fxsave (%[fx])\n\t"
8346 "2:\n"
8347 ".section .fixup,\"ax\"\n"
8348 @@ -195,13 +205,8 @@ static inline int fxrstor_checking(struc
8349 }
8350
8351 /* We need a safe address that is cheap to find and that is already
8352 - in L1 during context switch. The best choices are unfortunately
8353 - different for UP and SMP */
8354 -#ifdef CONFIG_SMP
8355 -#define safe_address (__per_cpu_offset[0])
8356 -#else
8357 -#define safe_address (kstat_cpu(0).cpustat.user)
8358 -#endif
8359 + in L1 during context switch. */
8360 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
8361
8362 /*
8363 * These must be called with preempt disabled
8364 @@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void
8365 struct thread_info *me = current_thread_info();
8366 preempt_disable();
8367 if (me->status & TS_USEDFPU)
8368 - __save_init_fpu(me->task);
8369 + __save_init_fpu(current);
8370 else
8371 clts();
8372 }
8373 diff -urNp linux-2.6.32.41/arch/x86/include/asm/io_32.h linux-2.6.32.41/arch/x86/include/asm/io_32.h
8374 --- linux-2.6.32.41/arch/x86/include/asm/io_32.h 2011-03-27 14:31:47.000000000 -0400
8375 +++ linux-2.6.32.41/arch/x86/include/asm/io_32.h 2011-04-17 15:56:46.000000000 -0400
8376 @@ -3,6 +3,7 @@
8377
8378 #include <linux/string.h>
8379 #include <linux/compiler.h>
8380 +#include <asm/processor.h>
8381
8382 /*
8383 * This file contains the definitions for the x86 IO instructions
8384 @@ -42,6 +43,17 @@
8385
8386 #ifdef __KERNEL__
8387
8388 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8389 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8390 +{
8391 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8392 +}
8393 +
8394 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8395 +{
8396 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8397 +}
8398 +
8399 #include <asm-generic/iomap.h>
8400
8401 #include <linux/vmalloc.h>
8402 diff -urNp linux-2.6.32.41/arch/x86/include/asm/io_64.h linux-2.6.32.41/arch/x86/include/asm/io_64.h
8403 --- linux-2.6.32.41/arch/x86/include/asm/io_64.h 2011-03-27 14:31:47.000000000 -0400
8404 +++ linux-2.6.32.41/arch/x86/include/asm/io_64.h 2011-04-17 15:56:46.000000000 -0400
8405 @@ -140,6 +140,17 @@ __OUTS(l)
8406
8407 #include <linux/vmalloc.h>
8408
8409 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8410 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8411 +{
8412 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8413 +}
8414 +
8415 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8416 +{
8417 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8418 +}
8419 +
8420 #include <asm-generic/iomap.h>
8421
8422 void __memcpy_fromio(void *, unsigned long, unsigned);
8423 diff -urNp linux-2.6.32.41/arch/x86/include/asm/iommu.h linux-2.6.32.41/arch/x86/include/asm/iommu.h
8424 --- linux-2.6.32.41/arch/x86/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
8425 +++ linux-2.6.32.41/arch/x86/include/asm/iommu.h 2011-04-17 15:56:46.000000000 -0400
8426 @@ -3,7 +3,7 @@
8427
8428 extern void pci_iommu_shutdown(void);
8429 extern void no_iommu_init(void);
8430 -extern struct dma_map_ops nommu_dma_ops;
8431 +extern const struct dma_map_ops nommu_dma_ops;
8432 extern int force_iommu, no_iommu;
8433 extern int iommu_detected;
8434 extern int iommu_pass_through;
8435 diff -urNp linux-2.6.32.41/arch/x86/include/asm/irqflags.h linux-2.6.32.41/arch/x86/include/asm/irqflags.h
8436 --- linux-2.6.32.41/arch/x86/include/asm/irqflags.h 2011-03-27 14:31:47.000000000 -0400
8437 +++ linux-2.6.32.41/arch/x86/include/asm/irqflags.h 2011-04-17 15:56:46.000000000 -0400
8438 @@ -142,6 +142,11 @@ static inline unsigned long __raw_local_
8439 sti; \
8440 sysexit
8441
8442 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
8443 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8444 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
8445 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8446 +
8447 #else
8448 #define INTERRUPT_RETURN iret
8449 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8450 diff -urNp linux-2.6.32.41/arch/x86/include/asm/kprobes.h linux-2.6.32.41/arch/x86/include/asm/kprobes.h
8451 --- linux-2.6.32.41/arch/x86/include/asm/kprobes.h 2011-03-27 14:31:47.000000000 -0400
8452 +++ linux-2.6.32.41/arch/x86/include/asm/kprobes.h 2011-04-23 12:56:12.000000000 -0400
8453 @@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
8454 #define BREAKPOINT_INSTRUCTION 0xcc
8455 #define RELATIVEJUMP_INSTRUCTION 0xe9
8456 #define MAX_INSN_SIZE 16
8457 -#define MAX_STACK_SIZE 64
8458 -#define MIN_STACK_SIZE(ADDR) \
8459 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8460 - THREAD_SIZE - (unsigned long)(ADDR))) \
8461 - ? (MAX_STACK_SIZE) \
8462 - : (((unsigned long)current_thread_info()) + \
8463 - THREAD_SIZE - (unsigned long)(ADDR)))
8464 +#define MAX_STACK_SIZE 64UL
8465 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8466
8467 #define flush_insn_slot(p) do { } while (0)
8468
8469 diff -urNp linux-2.6.32.41/arch/x86/include/asm/kvm_host.h linux-2.6.32.41/arch/x86/include/asm/kvm_host.h
8470 --- linux-2.6.32.41/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:01.000000000 -0400
8471 +++ linux-2.6.32.41/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:26.000000000 -0400
8472 @@ -536,7 +536,7 @@ struct kvm_x86_ops {
8473 const struct trace_print_flags *exit_reasons_str;
8474 };
8475
8476 -extern struct kvm_x86_ops *kvm_x86_ops;
8477 +extern const struct kvm_x86_ops *kvm_x86_ops;
8478
8479 int kvm_mmu_module_init(void);
8480 void kvm_mmu_module_exit(void);
8481 diff -urNp linux-2.6.32.41/arch/x86/include/asm/local.h linux-2.6.32.41/arch/x86/include/asm/local.h
8482 --- linux-2.6.32.41/arch/x86/include/asm/local.h 2011-03-27 14:31:47.000000000 -0400
8483 +++ linux-2.6.32.41/arch/x86/include/asm/local.h 2011-04-17 15:56:46.000000000 -0400
8484 @@ -18,26 +18,58 @@ typedef struct {
8485
8486 static inline void local_inc(local_t *l)
8487 {
8488 - asm volatile(_ASM_INC "%0"
8489 + asm volatile(_ASM_INC "%0\n"
8490 +
8491 +#ifdef CONFIG_PAX_REFCOUNT
8492 + "jno 0f\n"
8493 + _ASM_DEC "%0\n"
8494 + "int $4\n0:\n"
8495 + _ASM_EXTABLE(0b, 0b)
8496 +#endif
8497 +
8498 : "+m" (l->a.counter));
8499 }
8500
8501 static inline void local_dec(local_t *l)
8502 {
8503 - asm volatile(_ASM_DEC "%0"
8504 + asm volatile(_ASM_DEC "%0\n"
8505 +
8506 +#ifdef CONFIG_PAX_REFCOUNT
8507 + "jno 0f\n"
8508 + _ASM_INC "%0\n"
8509 + "int $4\n0:\n"
8510 + _ASM_EXTABLE(0b, 0b)
8511 +#endif
8512 +
8513 : "+m" (l->a.counter));
8514 }
8515
8516 static inline void local_add(long i, local_t *l)
8517 {
8518 - asm volatile(_ASM_ADD "%1,%0"
8519 + asm volatile(_ASM_ADD "%1,%0\n"
8520 +
8521 +#ifdef CONFIG_PAX_REFCOUNT
8522 + "jno 0f\n"
8523 + _ASM_SUB "%1,%0\n"
8524 + "int $4\n0:\n"
8525 + _ASM_EXTABLE(0b, 0b)
8526 +#endif
8527 +
8528 : "+m" (l->a.counter)
8529 : "ir" (i));
8530 }
8531
8532 static inline void local_sub(long i, local_t *l)
8533 {
8534 - asm volatile(_ASM_SUB "%1,%0"
8535 + asm volatile(_ASM_SUB "%1,%0\n"
8536 +
8537 +#ifdef CONFIG_PAX_REFCOUNT
8538 + "jno 0f\n"
8539 + _ASM_ADD "%1,%0\n"
8540 + "int $4\n0:\n"
8541 + _ASM_EXTABLE(0b, 0b)
8542 +#endif
8543 +
8544 : "+m" (l->a.counter)
8545 : "ir" (i));
8546 }
8547 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
8548 {
8549 unsigned char c;
8550
8551 - asm volatile(_ASM_SUB "%2,%0; sete %1"
8552 + asm volatile(_ASM_SUB "%2,%0\n"
8553 +
8554 +#ifdef CONFIG_PAX_REFCOUNT
8555 + "jno 0f\n"
8556 + _ASM_ADD "%2,%0\n"
8557 + "int $4\n0:\n"
8558 + _ASM_EXTABLE(0b, 0b)
8559 +#endif
8560 +
8561 + "sete %1\n"
8562 : "+m" (l->a.counter), "=qm" (c)
8563 : "ir" (i) : "memory");
8564 return c;
8565 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
8566 {
8567 unsigned char c;
8568
8569 - asm volatile(_ASM_DEC "%0; sete %1"
8570 + asm volatile(_ASM_DEC "%0\n"
8571 +
8572 +#ifdef CONFIG_PAX_REFCOUNT
8573 + "jno 0f\n"
8574 + _ASM_INC "%0\n"
8575 + "int $4\n0:\n"
8576 + _ASM_EXTABLE(0b, 0b)
8577 +#endif
8578 +
8579 + "sete %1\n"
8580 : "+m" (l->a.counter), "=qm" (c)
8581 : : "memory");
8582 return c != 0;
8583 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
8584 {
8585 unsigned char c;
8586
8587 - asm volatile(_ASM_INC "%0; sete %1"
8588 + asm volatile(_ASM_INC "%0\n"
8589 +
8590 +#ifdef CONFIG_PAX_REFCOUNT
8591 + "jno 0f\n"
8592 + _ASM_DEC "%0\n"
8593 + "int $4\n0:\n"
8594 + _ASM_EXTABLE(0b, 0b)
8595 +#endif
8596 +
8597 + "sete %1\n"
8598 : "+m" (l->a.counter), "=qm" (c)
8599 : : "memory");
8600 return c != 0;
8601 @@ -110,7 +169,16 @@ static inline int local_add_negative(lon
8602 {
8603 unsigned char c;
8604
8605 - asm volatile(_ASM_ADD "%2,%0; sets %1"
8606 + asm volatile(_ASM_ADD "%2,%0\n"
8607 +
8608 +#ifdef CONFIG_PAX_REFCOUNT
8609 + "jno 0f\n"
8610 + _ASM_SUB "%2,%0\n"
8611 + "int $4\n0:\n"
8612 + _ASM_EXTABLE(0b, 0b)
8613 +#endif
8614 +
8615 + "sets %1\n"
8616 : "+m" (l->a.counter), "=qm" (c)
8617 : "ir" (i) : "memory");
8618 return c;
8619 @@ -133,7 +201,15 @@ static inline long local_add_return(long
8620 #endif
8621 /* Modern 486+ processor */
8622 __i = i;
8623 - asm volatile(_ASM_XADD "%0, %1;"
8624 + asm volatile(_ASM_XADD "%0, %1\n"
8625 +
8626 +#ifdef CONFIG_PAX_REFCOUNT
8627 + "jno 0f\n"
8628 + _ASM_MOV "%0,%1\n"
8629 + "int $4\n0:\n"
8630 + _ASM_EXTABLE(0b, 0b)
8631 +#endif
8632 +
8633 : "+r" (i), "+m" (l->a.counter)
8634 : : "memory");
8635 return i + __i;
8636 diff -urNp linux-2.6.32.41/arch/x86/include/asm/microcode.h linux-2.6.32.41/arch/x86/include/asm/microcode.h
8637 --- linux-2.6.32.41/arch/x86/include/asm/microcode.h 2011-03-27 14:31:47.000000000 -0400
8638 +++ linux-2.6.32.41/arch/x86/include/asm/microcode.h 2011-04-17 15:56:46.000000000 -0400
8639 @@ -12,13 +12,13 @@ struct device;
8640 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
8641
8642 struct microcode_ops {
8643 - enum ucode_state (*request_microcode_user) (int cpu,
8644 + enum ucode_state (* const request_microcode_user) (int cpu,
8645 const void __user *buf, size_t size);
8646
8647 - enum ucode_state (*request_microcode_fw) (int cpu,
8648 + enum ucode_state (* const request_microcode_fw) (int cpu,
8649 struct device *device);
8650
8651 - void (*microcode_fini_cpu) (int cpu);
8652 + void (* const microcode_fini_cpu) (int cpu);
8653
8654 /*
8655 * The generic 'microcode_core' part guarantees that
8656 @@ -38,18 +38,18 @@ struct ucode_cpu_info {
8657 extern struct ucode_cpu_info ucode_cpu_info[];
8658
8659 #ifdef CONFIG_MICROCODE_INTEL
8660 -extern struct microcode_ops * __init init_intel_microcode(void);
8661 +extern const struct microcode_ops * __init init_intel_microcode(void);
8662 #else
8663 -static inline struct microcode_ops * __init init_intel_microcode(void)
8664 +static inline const struct microcode_ops * __init init_intel_microcode(void)
8665 {
8666 return NULL;
8667 }
8668 #endif /* CONFIG_MICROCODE_INTEL */
8669
8670 #ifdef CONFIG_MICROCODE_AMD
8671 -extern struct microcode_ops * __init init_amd_microcode(void);
8672 +extern const struct microcode_ops * __init init_amd_microcode(void);
8673 #else
8674 -static inline struct microcode_ops * __init init_amd_microcode(void)
8675 +static inline const struct microcode_ops * __init init_amd_microcode(void)
8676 {
8677 return NULL;
8678 }
8679 diff -urNp linux-2.6.32.41/arch/x86/include/asm/mman.h linux-2.6.32.41/arch/x86/include/asm/mman.h
8680 --- linux-2.6.32.41/arch/x86/include/asm/mman.h 2011-03-27 14:31:47.000000000 -0400
8681 +++ linux-2.6.32.41/arch/x86/include/asm/mman.h 2011-04-17 15:56:46.000000000 -0400
8682 @@ -5,4 +5,14 @@
8683
8684 #include <asm-generic/mman.h>
8685
8686 +#ifdef __KERNEL__
8687 +#ifndef __ASSEMBLY__
8688 +#ifdef CONFIG_X86_32
8689 +#define arch_mmap_check i386_mmap_check
8690 +int i386_mmap_check(unsigned long addr, unsigned long len,
8691 + unsigned long flags);
8692 +#endif
8693 +#endif
8694 +#endif
8695 +
8696 #endif /* _ASM_X86_MMAN_H */
8697 diff -urNp linux-2.6.32.41/arch/x86/include/asm/mmu_context.h linux-2.6.32.41/arch/x86/include/asm/mmu_context.h
8698 --- linux-2.6.32.41/arch/x86/include/asm/mmu_context.h 2011-03-27 14:31:47.000000000 -0400
8699 +++ linux-2.6.32.41/arch/x86/include/asm/mmu_context.h 2011-04-17 15:56:46.000000000 -0400
8700 @@ -24,6 +24,21 @@ void destroy_context(struct mm_struct *m
8701
8702 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
8703 {
8704 +
8705 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8706 + unsigned int i;
8707 + pgd_t *pgd;
8708 +
8709 + pax_open_kernel();
8710 + pgd = get_cpu_pgd(smp_processor_id());
8711 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
8712 + if (paravirt_enabled())
8713 + set_pgd(pgd+i, native_make_pgd(0));
8714 + else
8715 + pgd[i] = native_make_pgd(0);
8716 + pax_close_kernel();
8717 +#endif
8718 +
8719 #ifdef CONFIG_SMP
8720 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
8721 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
8722 @@ -34,16 +49,30 @@ static inline void switch_mm(struct mm_s
8723 struct task_struct *tsk)
8724 {
8725 unsigned cpu = smp_processor_id();
8726 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
8727 + int tlbstate = TLBSTATE_OK;
8728 +#endif
8729
8730 if (likely(prev != next)) {
8731 #ifdef CONFIG_SMP
8732 +#ifdef CONFIG_X86_32
8733 + tlbstate = percpu_read(cpu_tlbstate.state);
8734 +#endif
8735 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8736 percpu_write(cpu_tlbstate.active_mm, next);
8737 #endif
8738 cpumask_set_cpu(cpu, mm_cpumask(next));
8739
8740 /* Re-load page tables */
8741 +#ifdef CONFIG_PAX_PER_CPU_PGD
8742 + pax_open_kernel();
8743 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
8744 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
8745 + pax_close_kernel();
8746 + load_cr3(get_cpu_pgd(cpu));
8747 +#else
8748 load_cr3(next->pgd);
8749 +#endif
8750
8751 /* stop flush ipis for the previous mm */
8752 cpumask_clear_cpu(cpu, mm_cpumask(prev));
8753 @@ -53,9 +82,38 @@ static inline void switch_mm(struct mm_s
8754 */
8755 if (unlikely(prev->context.ldt != next->context.ldt))
8756 load_LDT_nolock(&next->context);
8757 - }
8758 +
8759 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8760 + if (!nx_enabled) {
8761 + smp_mb__before_clear_bit();
8762 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
8763 + smp_mb__after_clear_bit();
8764 + cpu_set(cpu, next->context.cpu_user_cs_mask);
8765 + }
8766 +#endif
8767 +
8768 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8769 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
8770 + prev->context.user_cs_limit != next->context.user_cs_limit))
8771 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8772 #ifdef CONFIG_SMP
8773 + else if (unlikely(tlbstate != TLBSTATE_OK))
8774 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8775 +#endif
8776 +#endif
8777 +
8778 + }
8779 else {
8780 +
8781 +#ifdef CONFIG_PAX_PER_CPU_PGD
8782 + pax_open_kernel();
8783 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
8784 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
8785 + pax_close_kernel();
8786 + load_cr3(get_cpu_pgd(cpu));
8787 +#endif
8788 +
8789 +#ifdef CONFIG_SMP
8790 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8791 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
8792
8793 @@ -64,11 +122,28 @@ static inline void switch_mm(struct mm_s
8794 * tlb flush IPI delivery. We must reload CR3
8795 * to make sure to use no freed page tables.
8796 */
8797 +
8798 +#ifndef CONFIG_PAX_PER_CPU_PGD
8799 load_cr3(next->pgd);
8800 +#endif
8801 +
8802 load_LDT_nolock(&next->context);
8803 +
8804 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
8805 + if (!nx_enabled)
8806 + cpu_set(cpu, next->context.cpu_user_cs_mask);
8807 +#endif
8808 +
8809 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8810 +#ifdef CONFIG_PAX_PAGEEXEC
8811 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
8812 +#endif
8813 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8814 +#endif
8815 +
8816 }
8817 - }
8818 #endif
8819 + }
8820 }
8821
8822 #define activate_mm(prev, next) \
8823 diff -urNp linux-2.6.32.41/arch/x86/include/asm/mmu.h linux-2.6.32.41/arch/x86/include/asm/mmu.h
8824 --- linux-2.6.32.41/arch/x86/include/asm/mmu.h 2011-03-27 14:31:47.000000000 -0400
8825 +++ linux-2.6.32.41/arch/x86/include/asm/mmu.h 2011-04-17 15:56:46.000000000 -0400
8826 @@ -9,10 +9,23 @@
8827 * we put the segment information here.
8828 */
8829 typedef struct {
8830 - void *ldt;
8831 + struct desc_struct *ldt;
8832 int size;
8833 struct mutex lock;
8834 - void *vdso;
8835 + unsigned long vdso;
8836 +
8837 +#ifdef CONFIG_X86_32
8838 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
8839 + unsigned long user_cs_base;
8840 + unsigned long user_cs_limit;
8841 +
8842 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8843 + cpumask_t cpu_user_cs_mask;
8844 +#endif
8845 +
8846 +#endif
8847 +#endif
8848 +
8849 } mm_context_t;
8850
8851 #ifdef CONFIG_SMP
8852 diff -urNp linux-2.6.32.41/arch/x86/include/asm/module.h linux-2.6.32.41/arch/x86/include/asm/module.h
8853 --- linux-2.6.32.41/arch/x86/include/asm/module.h 2011-03-27 14:31:47.000000000 -0400
8854 +++ linux-2.6.32.41/arch/x86/include/asm/module.h 2011-04-23 13:18:57.000000000 -0400
8855 @@ -5,6 +5,7 @@
8856
8857 #ifdef CONFIG_X86_64
8858 /* X86_64 does not define MODULE_PROC_FAMILY */
8859 +#define MODULE_PROC_FAMILY ""
8860 #elif defined CONFIG_M386
8861 #define MODULE_PROC_FAMILY "386 "
8862 #elif defined CONFIG_M486
8863 @@ -59,13 +60,36 @@
8864 #error unknown processor family
8865 #endif
8866
8867 -#ifdef CONFIG_X86_32
8868 -# ifdef CONFIG_4KSTACKS
8869 -# define MODULE_STACKSIZE "4KSTACKS "
8870 -# else
8871 -# define MODULE_STACKSIZE ""
8872 -# endif
8873 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
8874 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8875 +#define MODULE_PAX_UDEREF "UDEREF "
8876 +#else
8877 +#define MODULE_PAX_UDEREF ""
8878 +#endif
8879 +
8880 +#ifdef CONFIG_PAX_KERNEXEC
8881 +#define MODULE_PAX_KERNEXEC "KERNEXEC "
8882 +#else
8883 +#define MODULE_PAX_KERNEXEC ""
8884 +#endif
8885 +
8886 +#ifdef CONFIG_PAX_REFCOUNT
8887 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
8888 +#else
8889 +#define MODULE_PAX_REFCOUNT ""
8890 #endif
8891
8892 +#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
8893 +#define MODULE_STACKSIZE "4KSTACKS "
8894 +#else
8895 +#define MODULE_STACKSIZE ""
8896 +#endif
8897 +
8898 +#ifdef CONFIG_GRKERNSEC
8899 +#define MODULE_GRSEC "GRSECURITY "
8900 +#else
8901 +#define MODULE_GRSEC ""
8902 +#endif
8903 +
8904 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
8905 +
8906 #endif /* _ASM_X86_MODULE_H */
8907 diff -urNp linux-2.6.32.41/arch/x86/include/asm/page_64_types.h linux-2.6.32.41/arch/x86/include/asm/page_64_types.h
8908 --- linux-2.6.32.41/arch/x86/include/asm/page_64_types.h 2011-03-27 14:31:47.000000000 -0400
8909 +++ linux-2.6.32.41/arch/x86/include/asm/page_64_types.h 2011-04-17 15:56:46.000000000 -0400
8910 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
8911
8912 /* duplicated to the one in bootmem.h */
8913 extern unsigned long max_pfn;
8914 -extern unsigned long phys_base;
8915 +extern const unsigned long phys_base;
8916
8917 extern unsigned long __phys_addr(unsigned long);
8918 #define __phys_reloc_hide(x) (x)
8919 diff -urNp linux-2.6.32.41/arch/x86/include/asm/paravirt.h linux-2.6.32.41/arch/x86/include/asm/paravirt.h
8920 --- linux-2.6.32.41/arch/x86/include/asm/paravirt.h 2011-03-27 14:31:47.000000000 -0400
8921 +++ linux-2.6.32.41/arch/x86/include/asm/paravirt.h 2011-04-17 15:56:46.000000000 -0400
8922 @@ -729,6 +729,21 @@ static inline void __set_fixmap(unsigned
8923 pv_mmu_ops.set_fixmap(idx, phys, flags);
8924 }
8925
8926 +#ifdef CONFIG_PAX_KERNEXEC
8927 +static inline unsigned long pax_open_kernel(void)
8928 +{
8929 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
8930 +}
8931 +
8932 +static inline unsigned long pax_close_kernel(void)
8933 +{
8934 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
8935 +}
8936 +#else
8937 +static inline unsigned long pax_open_kernel(void) { return 0; }
8938 +static inline unsigned long pax_close_kernel(void) { return 0; }
8939 +#endif
8940 +
8941 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
8942
8943 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
8944 @@ -945,7 +960,7 @@ extern void default_banner(void);
8945
8946 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
8947 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
8948 -#define PARA_INDIRECT(addr) *%cs:addr
8949 +#define PARA_INDIRECT(addr) *%ss:addr
8950 #endif
8951
8952 #define INTERRUPT_RETURN \
8953 @@ -1022,6 +1037,21 @@ extern void default_banner(void);
8954 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
8955 CLBR_NONE, \
8956 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
8957 +
8958 +#define GET_CR0_INTO_RDI \
8959 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
8960 + mov %rax,%rdi
8961 +
8962 +#define SET_RDI_INTO_CR0 \
8963 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
8964 +
8965 +#define GET_CR3_INTO_RDI \
8966 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
8967 + mov %rax,%rdi
8968 +
8969 +#define SET_RDI_INTO_CR3 \
8970 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
8971 +
8972 #endif /* CONFIG_X86_32 */
8973
8974 #endif /* __ASSEMBLY__ */
8975 diff -urNp linux-2.6.32.41/arch/x86/include/asm/paravirt_types.h linux-2.6.32.41/arch/x86/include/asm/paravirt_types.h
8976 --- linux-2.6.32.41/arch/x86/include/asm/paravirt_types.h 2011-03-27 14:31:47.000000000 -0400
8977 +++ linux-2.6.32.41/arch/x86/include/asm/paravirt_types.h 2011-04-17 15:56:46.000000000 -0400
8978 @@ -316,6 +316,12 @@ struct pv_mmu_ops {
8979 an mfn. We can tell which is which from the index. */
8980 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
8981 phys_addr_t phys, pgprot_t flags);
8982 +
8983 +#ifdef CONFIG_PAX_KERNEXEC
8984 + unsigned long (*pax_open_kernel)(void);
8985 + unsigned long (*pax_close_kernel)(void);
8986 +#endif
8987 +
8988 };
8989
8990 struct raw_spinlock;
8991 diff -urNp linux-2.6.32.41/arch/x86/include/asm/pci_x86.h linux-2.6.32.41/arch/x86/include/asm/pci_x86.h
8992 --- linux-2.6.32.41/arch/x86/include/asm/pci_x86.h 2011-03-27 14:31:47.000000000 -0400
8993 +++ linux-2.6.32.41/arch/x86/include/asm/pci_x86.h 2011-04-17 15:56:46.000000000 -0400
8994 @@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct
8995 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
8996
8997 struct pci_raw_ops {
8998 - int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
8999 + int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9000 int reg, int len, u32 *val);
9001 - int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9002 + int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9003 int reg, int len, u32 val);
9004 };
9005
9006 -extern struct pci_raw_ops *raw_pci_ops;
9007 -extern struct pci_raw_ops *raw_pci_ext_ops;
9008 +extern const struct pci_raw_ops *raw_pci_ops;
9009 +extern const struct pci_raw_ops *raw_pci_ext_ops;
9010
9011 -extern struct pci_raw_ops pci_direct_conf1;
9012 +extern const struct pci_raw_ops pci_direct_conf1;
9013 extern bool port_cf9_safe;
9014
9015 /* arch_initcall level */
9016 diff -urNp linux-2.6.32.41/arch/x86/include/asm/pgalloc.h linux-2.6.32.41/arch/x86/include/asm/pgalloc.h
9017 --- linux-2.6.32.41/arch/x86/include/asm/pgalloc.h 2011-03-27 14:31:47.000000000 -0400
9018 +++ linux-2.6.32.41/arch/x86/include/asm/pgalloc.h 2011-04-17 15:56:46.000000000 -0400
9019 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
9020 pmd_t *pmd, pte_t *pte)
9021 {
9022 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9023 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
9024 +}
9025 +
9026 +static inline void pmd_populate_user(struct mm_struct *mm,
9027 + pmd_t *pmd, pte_t *pte)
9028 +{
9029 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9030 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
9031 }
9032
9033 diff -urNp linux-2.6.32.41/arch/x86/include/asm/pgtable-2level.h linux-2.6.32.41/arch/x86/include/asm/pgtable-2level.h
9034 --- linux-2.6.32.41/arch/x86/include/asm/pgtable-2level.h 2011-03-27 14:31:47.000000000 -0400
9035 +++ linux-2.6.32.41/arch/x86/include/asm/pgtable-2level.h 2011-04-17 15:56:46.000000000 -0400
9036 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
9037
9038 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9039 {
9040 + pax_open_kernel();
9041 *pmdp = pmd;
9042 + pax_close_kernel();
9043 }
9044
9045 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9046 diff -urNp linux-2.6.32.41/arch/x86/include/asm/pgtable_32.h linux-2.6.32.41/arch/x86/include/asm/pgtable_32.h
9047 --- linux-2.6.32.41/arch/x86/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
9048 +++ linux-2.6.32.41/arch/x86/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
9049 @@ -26,9 +26,6 @@
9050 struct mm_struct;
9051 struct vm_area_struct;
9052
9053 -extern pgd_t swapper_pg_dir[1024];
9054 -extern pgd_t trampoline_pg_dir[1024];
9055 -
9056 static inline void pgtable_cache_init(void) { }
9057 static inline void check_pgt_cache(void) { }
9058 void paging_init(void);
9059 @@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, u
9060 # include <asm/pgtable-2level.h>
9061 #endif
9062
9063 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9064 +extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
9065 +#ifdef CONFIG_X86_PAE
9066 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9067 +#endif
9068 +
9069 #if defined(CONFIG_HIGHPTE)
9070 #define __KM_PTE \
9071 (in_nmi() ? KM_NMI_PTE : \
9072 @@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, u
9073 /* Clear a kernel PTE and flush it from the TLB */
9074 #define kpte_clear_flush(ptep, vaddr) \
9075 do { \
9076 + pax_open_kernel(); \
9077 pte_clear(&init_mm, (vaddr), (ptep)); \
9078 + pax_close_kernel(); \
9079 __flush_tlb_one((vaddr)); \
9080 } while (0)
9081
9082 @@ -85,6 +90,9 @@ do { \
9083
9084 #endif /* !__ASSEMBLY__ */
9085
9086 +#define HAVE_ARCH_UNMAPPED_AREA
9087 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9088 +
9089 /*
9090 * kern_addr_valid() is (1) for FLATMEM and (0) for
9091 * SPARSEMEM and DISCONTIGMEM
9092 diff -urNp linux-2.6.32.41/arch/x86/include/asm/pgtable_32_types.h linux-2.6.32.41/arch/x86/include/asm/pgtable_32_types.h
9093 --- linux-2.6.32.41/arch/x86/include/asm/pgtable_32_types.h 2011-03-27 14:31:47.000000000 -0400
9094 +++ linux-2.6.32.41/arch/x86/include/asm/pgtable_32_types.h 2011-04-17 15:56:46.000000000 -0400
9095 @@ -8,7 +8,7 @@
9096 */
9097 #ifdef CONFIG_X86_PAE
9098 # include <asm/pgtable-3level_types.h>
9099 -# define PMD_SIZE (1UL << PMD_SHIFT)
9100 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9101 # define PMD_MASK (~(PMD_SIZE - 1))
9102 #else
9103 # include <asm/pgtable-2level_types.h>
9104 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
9105 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9106 #endif
9107
9108 +#ifdef CONFIG_PAX_KERNEXEC
9109 +#ifndef __ASSEMBLY__
9110 +extern unsigned char MODULES_EXEC_VADDR[];
9111 +extern unsigned char MODULES_EXEC_END[];
9112 +#endif
9113 +#include <asm/boot.h>
9114 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9115 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9116 +#else
9117 +#define ktla_ktva(addr) (addr)
9118 +#define ktva_ktla(addr) (addr)
9119 +#endif
9120 +
9121 #define MODULES_VADDR VMALLOC_START
9122 #define MODULES_END VMALLOC_END
9123 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9124 diff -urNp linux-2.6.32.41/arch/x86/include/asm/pgtable-3level.h linux-2.6.32.41/arch/x86/include/asm/pgtable-3level.h
9125 --- linux-2.6.32.41/arch/x86/include/asm/pgtable-3level.h 2011-03-27 14:31:47.000000000 -0400
9126 +++ linux-2.6.32.41/arch/x86/include/asm/pgtable-3level.h 2011-04-17 15:56:46.000000000 -0400
9127 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
9128
9129 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9130 {
9131 + pax_open_kernel();
9132 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9133 + pax_close_kernel();
9134 }
9135
9136 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9137 {
9138 + pax_open_kernel();
9139 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9140 + pax_close_kernel();
9141 }
9142
9143 /*
9144 diff -urNp linux-2.6.32.41/arch/x86/include/asm/pgtable_64.h linux-2.6.32.41/arch/x86/include/asm/pgtable_64.h
9145 --- linux-2.6.32.41/arch/x86/include/asm/pgtable_64.h 2011-03-27 14:31:47.000000000 -0400
9146 +++ linux-2.6.32.41/arch/x86/include/asm/pgtable_64.h 2011-04-17 15:56:46.000000000 -0400
9147 @@ -16,10 +16,13 @@
9148
9149 extern pud_t level3_kernel_pgt[512];
9150 extern pud_t level3_ident_pgt[512];
9151 +extern pud_t level3_vmalloc_pgt[512];
9152 +extern pud_t level3_vmemmap_pgt[512];
9153 +extern pud_t level2_vmemmap_pgt[512];
9154 extern pmd_t level2_kernel_pgt[512];
9155 extern pmd_t level2_fixmap_pgt[512];
9156 -extern pmd_t level2_ident_pgt[512];
9157 -extern pgd_t init_level4_pgt[];
9158 +extern pmd_t level2_ident_pgt[512*2];
9159 +extern pgd_t init_level4_pgt[512];
9160
9161 #define swapper_pg_dir init_level4_pgt
9162
9163 @@ -74,7 +77,9 @@ static inline pte_t native_ptep_get_and_
9164
9165 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9166 {
9167 + pax_open_kernel();
9168 *pmdp = pmd;
9169 + pax_close_kernel();
9170 }
9171
9172 static inline void native_pmd_clear(pmd_t *pmd)
9173 @@ -94,7 +99,9 @@ static inline void native_pud_clear(pud_
9174
9175 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9176 {
9177 + pax_open_kernel();
9178 *pgdp = pgd;
9179 + pax_close_kernel();
9180 }
9181
9182 static inline void native_pgd_clear(pgd_t *pgd)
9183 diff -urNp linux-2.6.32.41/arch/x86/include/asm/pgtable_64_types.h linux-2.6.32.41/arch/x86/include/asm/pgtable_64_types.h
9184 --- linux-2.6.32.41/arch/x86/include/asm/pgtable_64_types.h 2011-03-27 14:31:47.000000000 -0400
9185 +++ linux-2.6.32.41/arch/x86/include/asm/pgtable_64_types.h 2011-04-17 15:56:46.000000000 -0400
9186 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9187 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9188 #define MODULES_END _AC(0xffffffffff000000, UL)
9189 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9190 +#define MODULES_EXEC_VADDR MODULES_VADDR
9191 +#define MODULES_EXEC_END MODULES_END
9192 +
9193 +#define ktla_ktva(addr) (addr)
9194 +#define ktva_ktla(addr) (addr)
9195
9196 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9197 diff -urNp linux-2.6.32.41/arch/x86/include/asm/pgtable.h linux-2.6.32.41/arch/x86/include/asm/pgtable.h
9198 --- linux-2.6.32.41/arch/x86/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
9199 +++ linux-2.6.32.41/arch/x86/include/asm/pgtable.h 2011-04-17 15:56:46.000000000 -0400
9200 @@ -74,12 +74,51 @@ extern struct list_head pgd_list;
9201
9202 #define arch_end_context_switch(prev) do {} while(0)
9203
9204 +#define pax_open_kernel() native_pax_open_kernel()
9205 +#define pax_close_kernel() native_pax_close_kernel()
9206 #endif /* CONFIG_PARAVIRT */
9207
9208 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
9209 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9210 +
9211 +#ifdef CONFIG_PAX_KERNEXEC
9212 +static inline unsigned long native_pax_open_kernel(void)
9213 +{
9214 + unsigned long cr0;
9215 +
9216 + preempt_disable();
9217 + barrier();
9218 + cr0 = read_cr0() ^ X86_CR0_WP;
9219 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
9220 + write_cr0(cr0);
9221 + return cr0 ^ X86_CR0_WP;
9222 +}
9223 +
9224 +static inline unsigned long native_pax_close_kernel(void)
9225 +{
9226 + unsigned long cr0;
9227 +
9228 + cr0 = read_cr0() ^ X86_CR0_WP;
9229 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9230 + write_cr0(cr0);
9231 + barrier();
9232 + preempt_enable_no_resched();
9233 + return cr0 ^ X86_CR0_WP;
9234 +}
9235 +#else
9236 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
9237 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
9238 +#endif
9239 +
9240 /*
9241 * The following only work if pte_present() is true.
9242 * Undefined behaviour if not..
9243 */
9244 +static inline int pte_user(pte_t pte)
9245 +{
9246 + return pte_val(pte) & _PAGE_USER;
9247 +}
9248 +
9249 static inline int pte_dirty(pte_t pte)
9250 {
9251 return pte_flags(pte) & _PAGE_DIRTY;
9252 @@ -167,9 +206,29 @@ static inline pte_t pte_wrprotect(pte_t
9253 return pte_clear_flags(pte, _PAGE_RW);
9254 }
9255
9256 +static inline pte_t pte_mkread(pte_t pte)
9257 +{
9258 + return __pte(pte_val(pte) | _PAGE_USER);
9259 +}
9260 +
9261 static inline pte_t pte_mkexec(pte_t pte)
9262 {
9263 - return pte_clear_flags(pte, _PAGE_NX);
9264 +#ifdef CONFIG_X86_PAE
9265 + if (__supported_pte_mask & _PAGE_NX)
9266 + return pte_clear_flags(pte, _PAGE_NX);
9267 + else
9268 +#endif
9269 + return pte_set_flags(pte, _PAGE_USER);
9270 +}
9271 +
9272 +static inline pte_t pte_exprotect(pte_t pte)
9273 +{
9274 +#ifdef CONFIG_X86_PAE
9275 + if (__supported_pte_mask & _PAGE_NX)
9276 + return pte_set_flags(pte, _PAGE_NX);
9277 + else
9278 +#endif
9279 + return pte_clear_flags(pte, _PAGE_USER);
9280 }
9281
9282 static inline pte_t pte_mkdirty(pte_t pte)
9283 @@ -302,6 +361,15 @@ pte_t *populate_extra_pte(unsigned long
9284 #endif
9285
9286 #ifndef __ASSEMBLY__
9287 +
9288 +#ifdef CONFIG_PAX_PER_CPU_PGD
9289 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9290 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9291 +{
9292 + return cpu_pgd[cpu];
9293 +}
9294 +#endif
9295 +
9296 #include <linux/mm_types.h>
9297
9298 static inline int pte_none(pte_t pte)
9299 @@ -472,7 +540,7 @@ static inline pud_t *pud_offset(pgd_t *p
9300
9301 static inline int pgd_bad(pgd_t pgd)
9302 {
9303 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9304 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9305 }
9306
9307 static inline int pgd_none(pgd_t pgd)
9308 @@ -495,7 +563,12 @@ static inline int pgd_none(pgd_t pgd)
9309 * pgd_offset() returns a (pgd_t *)
9310 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9311 */
9312 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9313 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9314 +
9315 +#ifdef CONFIG_PAX_PER_CPU_PGD
9316 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9317 +#endif
9318 +
9319 /*
9320 * a shortcut which implies the use of the kernel's pgd, instead
9321 * of a process's
9322 @@ -506,6 +579,20 @@ static inline int pgd_none(pgd_t pgd)
9323 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9324 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9325
9326 +#ifdef CONFIG_X86_32
9327 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9328 +#else
9329 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9330 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9331 +
9332 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9333 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9334 +#else
9335 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9336 +#endif
9337 +
9338 +#endif
9339 +
9340 #ifndef __ASSEMBLY__
9341
9342 extern int direct_gbpages;
9343 @@ -611,11 +698,23 @@ static inline void ptep_set_wrprotect(st
9344 * dst and src can be on the same page, but the range must not overlap,
9345 * and must not cross a page boundary.
9346 */
9347 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9348 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9349 {
9350 - memcpy(dst, src, count * sizeof(pgd_t));
9351 + pax_open_kernel();
9352 + while (count--)
9353 + *dst++ = *src++;
9354 + pax_close_kernel();
9355 }
9356
9357 +#ifdef CONFIG_PAX_PER_CPU_PGD
9358 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9359 +#endif
9360 +
9361 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9362 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9363 +#else
9364 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9365 +#endif
9366
9367 #include <asm-generic/pgtable.h>
9368 #endif /* __ASSEMBLY__ */
9369 diff -urNp linux-2.6.32.41/arch/x86/include/asm/pgtable_types.h linux-2.6.32.41/arch/x86/include/asm/pgtable_types.h
9370 --- linux-2.6.32.41/arch/x86/include/asm/pgtable_types.h 2011-03-27 14:31:47.000000000 -0400
9371 +++ linux-2.6.32.41/arch/x86/include/asm/pgtable_types.h 2011-04-17 15:56:46.000000000 -0400
9372 @@ -16,12 +16,11 @@
9373 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9374 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9375 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9376 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9377 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9378 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9379 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9380 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9381 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9382 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9383 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9384 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9385
9386 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9387 @@ -39,7 +38,6 @@
9388 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9389 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9390 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9391 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9392 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9393 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9394 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9395 @@ -55,8 +53,10 @@
9396
9397 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9398 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9399 -#else
9400 +#elif defined(CONFIG_KMEMCHECK)
9401 #define _PAGE_NX (_AT(pteval_t, 0))
9402 +#else
9403 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9404 #endif
9405
9406 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9407 @@ -93,6 +93,9 @@
9408 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9409 _PAGE_ACCESSED)
9410
9411 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
9412 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
9413 +
9414 #define __PAGE_KERNEL_EXEC \
9415 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9416 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9417 @@ -103,8 +106,8 @@
9418 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9419 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9420 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9421 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9422 -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
9423 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9424 +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
9425 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9426 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
9427 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
9428 @@ -163,8 +166,8 @@
9429 * bits are combined, this will alow user to access the high address mapped
9430 * VDSO in the presence of CONFIG_COMPAT_VDSO
9431 */
9432 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9433 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9434 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9435 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9436 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9437 #endif
9438
9439 @@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t p
9440 {
9441 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9442 }
9443 +#endif
9444
9445 +#if PAGETABLE_LEVELS == 3
9446 +#include <asm-generic/pgtable-nopud.h>
9447 +#endif
9448 +
9449 +#if PAGETABLE_LEVELS == 2
9450 +#include <asm-generic/pgtable-nopmd.h>
9451 +#endif
9452 +
9453 +#ifndef __ASSEMBLY__
9454 #if PAGETABLE_LEVELS > 3
9455 typedef struct { pudval_t pud; } pud_t;
9456
9457 @@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pu
9458 return pud.pud;
9459 }
9460 #else
9461 -#include <asm-generic/pgtable-nopud.h>
9462 -
9463 static inline pudval_t native_pud_val(pud_t pud)
9464 {
9465 return native_pgd_val(pud.pgd);
9466 @@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pm
9467 return pmd.pmd;
9468 }
9469 #else
9470 -#include <asm-generic/pgtable-nopmd.h>
9471 -
9472 static inline pmdval_t native_pmd_val(pmd_t pmd)
9473 {
9474 return native_pgd_val(pmd.pud.pgd);
9475 @@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
9476
9477 extern pteval_t __supported_pte_mask;
9478 extern void set_nx(void);
9479 +
9480 +#ifdef CONFIG_X86_32
9481 +#ifdef CONFIG_X86_PAE
9482 extern int nx_enabled;
9483 +#else
9484 +#define nx_enabled (0)
9485 +#endif
9486 +#else
9487 +#define nx_enabled (1)
9488 +#endif
9489
9490 #define pgprot_writecombine pgprot_writecombine
9491 extern pgprot_t pgprot_writecombine(pgprot_t prot);
9492 diff -urNp linux-2.6.32.41/arch/x86/include/asm/processor.h linux-2.6.32.41/arch/x86/include/asm/processor.h
9493 --- linux-2.6.32.41/arch/x86/include/asm/processor.h 2011-04-22 19:16:29.000000000 -0400
9494 +++ linux-2.6.32.41/arch/x86/include/asm/processor.h 2011-05-11 18:25:15.000000000 -0400
9495 @@ -272,7 +272,7 @@ struct tss_struct {
9496
9497 } ____cacheline_aligned;
9498
9499 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
9500 +extern struct tss_struct init_tss[NR_CPUS];
9501
9502 /*
9503 * Save the original ist values for checking stack pointers during debugging
9504 @@ -888,11 +888,18 @@ static inline void spin_lock_prefetch(co
9505 */
9506 #define TASK_SIZE PAGE_OFFSET
9507 #define TASK_SIZE_MAX TASK_SIZE
9508 +
9509 +#ifdef CONFIG_PAX_SEGMEXEC
9510 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
9511 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
9512 +#else
9513 #define STACK_TOP TASK_SIZE
9514 -#define STACK_TOP_MAX STACK_TOP
9515 +#endif
9516 +
9517 +#define STACK_TOP_MAX TASK_SIZE
9518
9519 #define INIT_THREAD { \
9520 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
9521 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9522 .vm86_info = NULL, \
9523 .sysenter_cs = __KERNEL_CS, \
9524 .io_bitmap_ptr = NULL, \
9525 @@ -906,7 +913,7 @@ static inline void spin_lock_prefetch(co
9526 */
9527 #define INIT_TSS { \
9528 .x86_tss = { \
9529 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
9530 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9531 .ss0 = __KERNEL_DS, \
9532 .ss1 = __KERNEL_CS, \
9533 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
9534 @@ -917,11 +924,7 @@ static inline void spin_lock_prefetch(co
9535 extern unsigned long thread_saved_pc(struct task_struct *tsk);
9536
9537 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
9538 -#define KSTK_TOP(info) \
9539 -({ \
9540 - unsigned long *__ptr = (unsigned long *)(info); \
9541 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
9542 -})
9543 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
9544
9545 /*
9546 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
9547 @@ -936,7 +939,7 @@ extern unsigned long thread_saved_pc(str
9548 #define task_pt_regs(task) \
9549 ({ \
9550 struct pt_regs *__regs__; \
9551 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
9552 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
9553 __regs__ - 1; \
9554 })
9555
9556 @@ -946,13 +949,13 @@ extern unsigned long thread_saved_pc(str
9557 /*
9558 * User space process size. 47bits minus one guard page.
9559 */
9560 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
9561 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
9562
9563 /* This decides where the kernel will search for a free chunk of vm
9564 * space during mmap's.
9565 */
9566 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
9567 - 0xc0000000 : 0xFFFFe000)
9568 + 0xc0000000 : 0xFFFFf000)
9569
9570 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
9571 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
9572 @@ -963,11 +966,11 @@ extern unsigned long thread_saved_pc(str
9573 #define STACK_TOP_MAX TASK_SIZE_MAX
9574
9575 #define INIT_THREAD { \
9576 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9577 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9578 }
9579
9580 #define INIT_TSS { \
9581 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9582 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9583 }
9584
9585 /*
9586 @@ -989,6 +992,10 @@ extern void start_thread(struct pt_regs
9587 */
9588 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
9589
9590 +#ifdef CONFIG_PAX_SEGMEXEC
9591 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
9592 +#endif
9593 +
9594 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
9595
9596 /* Get/set a process' ability to use the timestamp counter instruction */
9597 diff -urNp linux-2.6.32.41/arch/x86/include/asm/ptrace.h linux-2.6.32.41/arch/x86/include/asm/ptrace.h
9598 --- linux-2.6.32.41/arch/x86/include/asm/ptrace.h 2011-03-27 14:31:47.000000000 -0400
9599 +++ linux-2.6.32.41/arch/x86/include/asm/ptrace.h 2011-04-17 15:56:46.000000000 -0400
9600 @@ -151,28 +151,29 @@ static inline unsigned long regs_return_
9601 }
9602
9603 /*
9604 - * user_mode_vm(regs) determines whether a register set came from user mode.
9605 + * user_mode(regs) determines whether a register set came from user mode.
9606 * This is true if V8086 mode was enabled OR if the register set was from
9607 * protected mode with RPL-3 CS value. This tricky test checks that with
9608 * one comparison. Many places in the kernel can bypass this full check
9609 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
9610 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
9611 + * be used.
9612 */
9613 -static inline int user_mode(struct pt_regs *regs)
9614 +static inline int user_mode_novm(struct pt_regs *regs)
9615 {
9616 #ifdef CONFIG_X86_32
9617 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
9618 #else
9619 - return !!(regs->cs & 3);
9620 + return !!(regs->cs & SEGMENT_RPL_MASK);
9621 #endif
9622 }
9623
9624 -static inline int user_mode_vm(struct pt_regs *regs)
9625 +static inline int user_mode(struct pt_regs *regs)
9626 {
9627 #ifdef CONFIG_X86_32
9628 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
9629 USER_RPL;
9630 #else
9631 - return user_mode(regs);
9632 + return user_mode_novm(regs);
9633 #endif
9634 }
9635
9636 diff -urNp linux-2.6.32.41/arch/x86/include/asm/reboot.h linux-2.6.32.41/arch/x86/include/asm/reboot.h
9637 --- linux-2.6.32.41/arch/x86/include/asm/reboot.h 2011-03-27 14:31:47.000000000 -0400
9638 +++ linux-2.6.32.41/arch/x86/include/asm/reboot.h 2011-05-22 23:02:03.000000000 -0400
9639 @@ -6,19 +6,19 @@
9640 struct pt_regs;
9641
9642 struct machine_ops {
9643 - void (*restart)(char *cmd);
9644 - void (*halt)(void);
9645 - void (*power_off)(void);
9646 + void (* __noreturn restart)(char *cmd);
9647 + void (* __noreturn halt)(void);
9648 + void (* __noreturn power_off)(void);
9649 void (*shutdown)(void);
9650 void (*crash_shutdown)(struct pt_regs *);
9651 - void (*emergency_restart)(void);
9652 + void (* __noreturn emergency_restart)(void);
9653 };
9654
9655 extern struct machine_ops machine_ops;
9656
9657 void native_machine_crash_shutdown(struct pt_regs *regs);
9658 void native_machine_shutdown(void);
9659 -void machine_real_restart(const unsigned char *code, int length);
9660 +void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
9661
9662 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
9663 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
9664 diff -urNp linux-2.6.32.41/arch/x86/include/asm/rwsem.h linux-2.6.32.41/arch/x86/include/asm/rwsem.h
9665 --- linux-2.6.32.41/arch/x86/include/asm/rwsem.h 2011-03-27 14:31:47.000000000 -0400
9666 +++ linux-2.6.32.41/arch/x86/include/asm/rwsem.h 2011-04-17 15:56:46.000000000 -0400
9667 @@ -118,6 +118,14 @@ static inline void __down_read(struct rw
9668 {
9669 asm volatile("# beginning down_read\n\t"
9670 LOCK_PREFIX _ASM_INC "(%1)\n\t"
9671 +
9672 +#ifdef CONFIG_PAX_REFCOUNT
9673 + "jno 0f\n"
9674 + LOCK_PREFIX _ASM_DEC "(%1)\n\t"
9675 + "int $4\n0:\n"
9676 + _ASM_EXTABLE(0b, 0b)
9677 +#endif
9678 +
9679 /* adds 0x00000001, returns the old value */
9680 " jns 1f\n"
9681 " call call_rwsem_down_read_failed\n"
9682 @@ -139,6 +147,14 @@ static inline int __down_read_trylock(st
9683 "1:\n\t"
9684 " mov %1,%2\n\t"
9685 " add %3,%2\n\t"
9686 +
9687 +#ifdef CONFIG_PAX_REFCOUNT
9688 + "jno 0f\n"
9689 + "sub %3,%2\n"
9690 + "int $4\n0:\n"
9691 + _ASM_EXTABLE(0b, 0b)
9692 +#endif
9693 +
9694 " jle 2f\n\t"
9695 LOCK_PREFIX " cmpxchg %2,%0\n\t"
9696 " jnz 1b\n\t"
9697 @@ -160,6 +176,14 @@ static inline void __down_write_nested(s
9698 tmp = RWSEM_ACTIVE_WRITE_BIAS;
9699 asm volatile("# beginning down_write\n\t"
9700 LOCK_PREFIX " xadd %1,(%2)\n\t"
9701 +
9702 +#ifdef CONFIG_PAX_REFCOUNT
9703 + "jno 0f\n"
9704 + "mov %1,(%2)\n"
9705 + "int $4\n0:\n"
9706 + _ASM_EXTABLE(0b, 0b)
9707 +#endif
9708 +
9709 /* subtract 0x0000ffff, returns the old value */
9710 " test %1,%1\n\t"
9711 /* was the count 0 before? */
9712 @@ -198,6 +222,14 @@ static inline void __up_read(struct rw_s
9713 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
9714 asm volatile("# beginning __up_read\n\t"
9715 LOCK_PREFIX " xadd %1,(%2)\n\t"
9716 +
9717 +#ifdef CONFIG_PAX_REFCOUNT
9718 + "jno 0f\n"
9719 + "mov %1,(%2)\n"
9720 + "int $4\n0:\n"
9721 + _ASM_EXTABLE(0b, 0b)
9722 +#endif
9723 +
9724 /* subtracts 1, returns the old value */
9725 " jns 1f\n\t"
9726 " call call_rwsem_wake\n"
9727 @@ -216,6 +248,14 @@ static inline void __up_write(struct rw_
9728 rwsem_count_t tmp;
9729 asm volatile("# beginning __up_write\n\t"
9730 LOCK_PREFIX " xadd %1,(%2)\n\t"
9731 +
9732 +#ifdef CONFIG_PAX_REFCOUNT
9733 + "jno 0f\n"
9734 + "mov %1,(%2)\n"
9735 + "int $4\n0:\n"
9736 + _ASM_EXTABLE(0b, 0b)
9737 +#endif
9738 +
9739 /* tries to transition
9740 0xffff0001 -> 0x00000000 */
9741 " jz 1f\n"
9742 @@ -234,6 +274,14 @@ static inline void __downgrade_write(str
9743 {
9744 asm volatile("# beginning __downgrade_write\n\t"
9745 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
9746 +
9747 +#ifdef CONFIG_PAX_REFCOUNT
9748 + "jno 0f\n"
9749 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
9750 + "int $4\n0:\n"
9751 + _ASM_EXTABLE(0b, 0b)
9752 +#endif
9753 +
9754 /*
9755 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
9756 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
9757 @@ -253,7 +301,15 @@ static inline void __downgrade_write(str
9758 static inline void rwsem_atomic_add(rwsem_count_t delta,
9759 struct rw_semaphore *sem)
9760 {
9761 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
9762 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
9763 +
9764 +#ifdef CONFIG_PAX_REFCOUNT
9765 + "jno 0f\n"
9766 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
9767 + "int $4\n0:\n"
9768 + _ASM_EXTABLE(0b, 0b)
9769 +#endif
9770 +
9771 : "+m" (sem->count)
9772 : "er" (delta));
9773 }
9774 @@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic
9775 {
9776 rwsem_count_t tmp = delta;
9777
9778 - asm volatile(LOCK_PREFIX "xadd %0,%1"
9779 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
9780 +
9781 +#ifdef CONFIG_PAX_REFCOUNT
9782 + "jno 0f\n"
9783 + "mov %0,%1\n"
9784 + "int $4\n0:\n"
9785 + _ASM_EXTABLE(0b, 0b)
9786 +#endif
9787 +
9788 : "+r" (tmp), "+m" (sem->count)
9789 : : "memory");
9790
9791 diff -urNp linux-2.6.32.41/arch/x86/include/asm/segment.h linux-2.6.32.41/arch/x86/include/asm/segment.h
9792 --- linux-2.6.32.41/arch/x86/include/asm/segment.h 2011-03-27 14:31:47.000000000 -0400
9793 +++ linux-2.6.32.41/arch/x86/include/asm/segment.h 2011-04-17 15:56:46.000000000 -0400
9794 @@ -62,8 +62,8 @@
9795 * 26 - ESPFIX small SS
9796 * 27 - per-cpu [ offset to per-cpu data area ]
9797 * 28 - stack_canary-20 [ for stack protector ]
9798 - * 29 - unused
9799 - * 30 - unused
9800 + * 29 - PCI BIOS CS
9801 + * 30 - PCI BIOS DS
9802 * 31 - TSS for double fault handler
9803 */
9804 #define GDT_ENTRY_TLS_MIN 6
9805 @@ -77,6 +77,8 @@
9806
9807 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
9808
9809 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
9810 +
9811 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
9812
9813 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
9814 @@ -88,7 +90,7 @@
9815 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
9816 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
9817
9818 -#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
9819 +#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
9820 #ifdef CONFIG_SMP
9821 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
9822 #else
9823 @@ -102,6 +104,12 @@
9824 #define __KERNEL_STACK_CANARY 0
9825 #endif
9826
9827 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
9828 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
9829 +
9830 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
9831 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
9832 +
9833 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
9834
9835 /*
9836 @@ -139,7 +147,7 @@
9837 */
9838
9839 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
9840 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
9841 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
9842
9843
9844 #else
9845 @@ -163,6 +171,8 @@
9846 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
9847 #define __USER32_DS __USER_DS
9848
9849 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
9850 +
9851 #define GDT_ENTRY_TSS 8 /* needs two entries */
9852 #define GDT_ENTRY_LDT 10 /* needs two entries */
9853 #define GDT_ENTRY_TLS_MIN 12
9854 @@ -183,6 +193,7 @@
9855 #endif
9856
9857 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
9858 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
9859 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
9860 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
9861 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
9862 diff -urNp linux-2.6.32.41/arch/x86/include/asm/smp.h linux-2.6.32.41/arch/x86/include/asm/smp.h
9863 --- linux-2.6.32.41/arch/x86/include/asm/smp.h 2011-03-27 14:31:47.000000000 -0400
9864 +++ linux-2.6.32.41/arch/x86/include/asm/smp.h 2011-04-17 15:56:46.000000000 -0400
9865 @@ -24,7 +24,7 @@ extern unsigned int num_processors;
9866 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
9867 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
9868 DECLARE_PER_CPU(u16, cpu_llc_id);
9869 -DECLARE_PER_CPU(int, cpu_number);
9870 +DECLARE_PER_CPU(unsigned int, cpu_number);
9871
9872 static inline struct cpumask *cpu_sibling_mask(int cpu)
9873 {
9874 @@ -175,14 +175,8 @@ extern unsigned disabled_cpus __cpuinitd
9875 extern int safe_smp_processor_id(void);
9876
9877 #elif defined(CONFIG_X86_64_SMP)
9878 -#define raw_smp_processor_id() (percpu_read(cpu_number))
9879 -
9880 -#define stack_smp_processor_id() \
9881 -({ \
9882 - struct thread_info *ti; \
9883 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
9884 - ti->cpu; \
9885 -})
9886 +#define raw_smp_processor_id() (percpu_read(cpu_number))
9887 +#define stack_smp_processor_id() raw_smp_processor_id()
9888 #define safe_smp_processor_id() smp_processor_id()
9889
9890 #endif
9891 diff -urNp linux-2.6.32.41/arch/x86/include/asm/spinlock.h linux-2.6.32.41/arch/x86/include/asm/spinlock.h
9892 --- linux-2.6.32.41/arch/x86/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
9893 +++ linux-2.6.32.41/arch/x86/include/asm/spinlock.h 2011-04-17 15:56:46.000000000 -0400
9894 @@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(r
9895 static inline void __raw_read_lock(raw_rwlock_t *rw)
9896 {
9897 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
9898 +
9899 +#ifdef CONFIG_PAX_REFCOUNT
9900 + "jno 0f\n"
9901 + LOCK_PREFIX " addl $1,(%0)\n"
9902 + "int $4\n0:\n"
9903 + _ASM_EXTABLE(0b, 0b)
9904 +#endif
9905 +
9906 "jns 1f\n"
9907 "call __read_lock_failed\n\t"
9908 "1:\n"
9909 @@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_r
9910 static inline void __raw_write_lock(raw_rwlock_t *rw)
9911 {
9912 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
9913 +
9914 +#ifdef CONFIG_PAX_REFCOUNT
9915 + "jno 0f\n"
9916 + LOCK_PREFIX " addl %1,(%0)\n"
9917 + "int $4\n0:\n"
9918 + _ASM_EXTABLE(0b, 0b)
9919 +#endif
9920 +
9921 "jz 1f\n"
9922 "call __write_lock_failed\n\t"
9923 "1:\n"
9924 @@ -286,12 +302,29 @@ static inline int __raw_write_trylock(ra
9925
9926 static inline void __raw_read_unlock(raw_rwlock_t *rw)
9927 {
9928 - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
9929 + asm volatile(LOCK_PREFIX "incl %0\n"
9930 +
9931 +#ifdef CONFIG_PAX_REFCOUNT
9932 + "jno 0f\n"
9933 + LOCK_PREFIX "decl %0\n"
9934 + "int $4\n0:\n"
9935 + _ASM_EXTABLE(0b, 0b)
9936 +#endif
9937 +
9938 + :"+m" (rw->lock) : : "memory");
9939 }
9940
9941 static inline void __raw_write_unlock(raw_rwlock_t *rw)
9942 {
9943 - asm volatile(LOCK_PREFIX "addl %1, %0"
9944 + asm volatile(LOCK_PREFIX "addl %1, %0\n"
9945 +
9946 +#ifdef CONFIG_PAX_REFCOUNT
9947 + "jno 0f\n"
9948 + LOCK_PREFIX "subl %1, %0\n"
9949 + "int $4\n0:\n"
9950 + _ASM_EXTABLE(0b, 0b)
9951 +#endif
9952 +
9953 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
9954 }
9955
9956 diff -urNp linux-2.6.32.41/arch/x86/include/asm/stackprotector.h linux-2.6.32.41/arch/x86/include/asm/stackprotector.h
9957 --- linux-2.6.32.41/arch/x86/include/asm/stackprotector.h 2011-03-27 14:31:47.000000000 -0400
9958 +++ linux-2.6.32.41/arch/x86/include/asm/stackprotector.h 2011-04-17 15:56:46.000000000 -0400
9959 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
9960
9961 static inline void load_stack_canary_segment(void)
9962 {
9963 -#ifdef CONFIG_X86_32
9964 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
9965 asm volatile ("mov %0, %%gs" : : "r" (0));
9966 #endif
9967 }
9968 diff -urNp linux-2.6.32.41/arch/x86/include/asm/system.h linux-2.6.32.41/arch/x86/include/asm/system.h
9969 --- linux-2.6.32.41/arch/x86/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
9970 +++ linux-2.6.32.41/arch/x86/include/asm/system.h 2011-05-22 23:02:03.000000000 -0400
9971 @@ -132,7 +132,7 @@ do { \
9972 "thread_return:\n\t" \
9973 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
9974 __switch_canary \
9975 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
9976 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
9977 "movq %%rax,%%rdi\n\t" \
9978 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
9979 "jnz ret_from_fork\n\t" \
9980 @@ -143,7 +143,7 @@ do { \
9981 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
9982 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
9983 [_tif_fork] "i" (_TIF_FORK), \
9984 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
9985 + [thread_info] "m" (per_cpu_var(current_tinfo)), \
9986 [current_task] "m" (per_cpu_var(current_task)) \
9987 __switch_canary_iparam \
9988 : "memory", "cc" __EXTRA_CLOBBER)
9989 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
9990 {
9991 unsigned long __limit;
9992 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
9993 - return __limit + 1;
9994 + return __limit;
9995 }
9996
9997 static inline void native_clts(void)
9998 @@ -340,12 +340,12 @@ void enable_hlt(void);
9999
10000 void cpu_idle_wait(void);
10001
10002 -extern unsigned long arch_align_stack(unsigned long sp);
10003 +#define arch_align_stack(x) ((x) & ~0xfUL)
10004 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
10005
10006 void default_idle(void);
10007
10008 -void stop_this_cpu(void *dummy);
10009 +void stop_this_cpu(void *dummy) __noreturn;
10010
10011 /*
10012 * Force strict CPU ordering.
10013 diff -urNp linux-2.6.32.41/arch/x86/include/asm/thread_info.h linux-2.6.32.41/arch/x86/include/asm/thread_info.h
10014 --- linux-2.6.32.41/arch/x86/include/asm/thread_info.h 2011-03-27 14:31:47.000000000 -0400
10015 +++ linux-2.6.32.41/arch/x86/include/asm/thread_info.h 2011-05-17 19:26:34.000000000 -0400
10016 @@ -10,6 +10,7 @@
10017 #include <linux/compiler.h>
10018 #include <asm/page.h>
10019 #include <asm/types.h>
10020 +#include <asm/percpu.h>
10021
10022 /*
10023 * low level task data that entry.S needs immediate access to
10024 @@ -24,7 +25,6 @@ struct exec_domain;
10025 #include <asm/atomic.h>
10026
10027 struct thread_info {
10028 - struct task_struct *task; /* main task structure */
10029 struct exec_domain *exec_domain; /* execution domain */
10030 __u32 flags; /* low level flags */
10031 __u32 status; /* thread synchronous flags */
10032 @@ -34,18 +34,12 @@ struct thread_info {
10033 mm_segment_t addr_limit;
10034 struct restart_block restart_block;
10035 void __user *sysenter_return;
10036 -#ifdef CONFIG_X86_32
10037 - unsigned long previous_esp; /* ESP of the previous stack in
10038 - case of nested (IRQ) stacks
10039 - */
10040 - __u8 supervisor_stack[0];
10041 -#endif
10042 + unsigned long lowest_stack;
10043 int uaccess_err;
10044 };
10045
10046 -#define INIT_THREAD_INFO(tsk) \
10047 +#define INIT_THREAD_INFO \
10048 { \
10049 - .task = &tsk, \
10050 .exec_domain = &default_exec_domain, \
10051 .flags = 0, \
10052 .cpu = 0, \
10053 @@ -56,7 +50,7 @@ struct thread_info {
10054 }, \
10055 }
10056
10057 -#define init_thread_info (init_thread_union.thread_info)
10058 +#define init_thread_info (init_thread_union.stack)
10059 #define init_stack (init_thread_union.stack)
10060
10061 #else /* !__ASSEMBLY__ */
10062 @@ -163,6 +157,23 @@ struct thread_info {
10063 #define alloc_thread_info(tsk) \
10064 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
10065
10066 +#ifdef __ASSEMBLY__
10067 +/* how to get the thread information struct from ASM */
10068 +#define GET_THREAD_INFO(reg) \
10069 + mov PER_CPU_VAR(current_tinfo), reg
10070 +
10071 +/* use this one if reg already contains %esp */
10072 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10073 +#else
10074 +/* how to get the thread information struct from C */
10075 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10076 +
10077 +static __always_inline struct thread_info *current_thread_info(void)
10078 +{
10079 + return percpu_read_stable(current_tinfo);
10080 +}
10081 +#endif
10082 +
10083 #ifdef CONFIG_X86_32
10084
10085 #define STACK_WARN (THREAD_SIZE/8)
10086 @@ -173,35 +184,13 @@ struct thread_info {
10087 */
10088 #ifndef __ASSEMBLY__
10089
10090 -
10091 /* how to get the current stack pointer from C */
10092 register unsigned long current_stack_pointer asm("esp") __used;
10093
10094 -/* how to get the thread information struct from C */
10095 -static inline struct thread_info *current_thread_info(void)
10096 -{
10097 - return (struct thread_info *)
10098 - (current_stack_pointer & ~(THREAD_SIZE - 1));
10099 -}
10100 -
10101 -#else /* !__ASSEMBLY__ */
10102 -
10103 -/* how to get the thread information struct from ASM */
10104 -#define GET_THREAD_INFO(reg) \
10105 - movl $-THREAD_SIZE, reg; \
10106 - andl %esp, reg
10107 -
10108 -/* use this one if reg already contains %esp */
10109 -#define GET_THREAD_INFO_WITH_ESP(reg) \
10110 - andl $-THREAD_SIZE, reg
10111 -
10112 #endif
10113
10114 #else /* X86_32 */
10115
10116 -#include <asm/percpu.h>
10117 -#define KERNEL_STACK_OFFSET (5*8)
10118 -
10119 /*
10120 * macros/functions for gaining access to the thread information structure
10121 * preempt_count needs to be 1 initially, until the scheduler is functional.
10122 @@ -209,21 +198,8 @@ static inline struct thread_info *curren
10123 #ifndef __ASSEMBLY__
10124 DECLARE_PER_CPU(unsigned long, kernel_stack);
10125
10126 -static inline struct thread_info *current_thread_info(void)
10127 -{
10128 - struct thread_info *ti;
10129 - ti = (void *)(percpu_read_stable(kernel_stack) +
10130 - KERNEL_STACK_OFFSET - THREAD_SIZE);
10131 - return ti;
10132 -}
10133 -
10134 -#else /* !__ASSEMBLY__ */
10135 -
10136 -/* how to get the thread information struct from ASM */
10137 -#define GET_THREAD_INFO(reg) \
10138 - movq PER_CPU_VAR(kernel_stack),reg ; \
10139 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10140 -
10141 +/* how to get the current stack pointer from C */
10142 +register unsigned long current_stack_pointer asm("rsp") __used;
10143 #endif
10144
10145 #endif /* !X86_32 */
10146 @@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
10147 extern void free_thread_info(struct thread_info *ti);
10148 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10149 #define arch_task_cache_init arch_task_cache_init
10150 +
10151 +#define __HAVE_THREAD_FUNCTIONS
10152 +#define task_thread_info(task) (&(task)->tinfo)
10153 +#define task_stack_page(task) ((task)->stack)
10154 +#define setup_thread_stack(p, org) do {} while (0)
10155 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10156 +
10157 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10158 +extern struct task_struct *alloc_task_struct(void);
10159 +extern void free_task_struct(struct task_struct *);
10160 +
10161 #endif
10162 #endif /* _ASM_X86_THREAD_INFO_H */
10163 diff -urNp linux-2.6.32.41/arch/x86/include/asm/uaccess_32.h linux-2.6.32.41/arch/x86/include/asm/uaccess_32.h
10164 --- linux-2.6.32.41/arch/x86/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
10165 +++ linux-2.6.32.41/arch/x86/include/asm/uaccess_32.h 2011-05-16 21:46:57.000000000 -0400
10166 @@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_u
10167 static __always_inline unsigned long __must_check
10168 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10169 {
10170 + pax_track_stack();
10171 +
10172 + if ((long)n < 0)
10173 + return n;
10174 +
10175 if (__builtin_constant_p(n)) {
10176 unsigned long ret;
10177
10178 @@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to,
10179 return ret;
10180 }
10181 }
10182 + if (!__builtin_constant_p(n))
10183 + check_object_size(from, n, true);
10184 return __copy_to_user_ll(to, from, n);
10185 }
10186
10187 @@ -83,12 +90,16 @@ static __always_inline unsigned long __m
10188 __copy_to_user(void __user *to, const void *from, unsigned long n)
10189 {
10190 might_fault();
10191 +
10192 return __copy_to_user_inatomic(to, from, n);
10193 }
10194
10195 static __always_inline unsigned long
10196 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
10197 {
10198 + if ((long)n < 0)
10199 + return n;
10200 +
10201 /* Avoid zeroing the tail if the copy fails..
10202 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
10203 * but as the zeroing behaviour is only significant when n is not
10204 @@ -138,6 +149,12 @@ static __always_inline unsigned long
10205 __copy_from_user(void *to, const void __user *from, unsigned long n)
10206 {
10207 might_fault();
10208 +
10209 + pax_track_stack();
10210 +
10211 + if ((long)n < 0)
10212 + return n;
10213 +
10214 if (__builtin_constant_p(n)) {
10215 unsigned long ret;
10216
10217 @@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __
10218 return ret;
10219 }
10220 }
10221 + if (!__builtin_constant_p(n))
10222 + check_object_size(to, n, false);
10223 return __copy_from_user_ll(to, from, n);
10224 }
10225
10226 @@ -160,6 +179,10 @@ static __always_inline unsigned long __c
10227 const void __user *from, unsigned long n)
10228 {
10229 might_fault();
10230 +
10231 + if ((long)n < 0)
10232 + return n;
10233 +
10234 if (__builtin_constant_p(n)) {
10235 unsigned long ret;
10236
10237 @@ -182,14 +205,62 @@ static __always_inline unsigned long
10238 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
10239 unsigned long n)
10240 {
10241 - return __copy_from_user_ll_nocache_nozero(to, from, n);
10242 + if ((long)n < 0)
10243 + return n;
10244 +
10245 + return __copy_from_user_ll_nocache_nozero(to, from, n);
10246 +}
10247 +
10248 +/**
10249 + * copy_to_user: - Copy a block of data into user space.
10250 + * @to: Destination address, in user space.
10251 + * @from: Source address, in kernel space.
10252 + * @n: Number of bytes to copy.
10253 + *
10254 + * Context: User context only. This function may sleep.
10255 + *
10256 + * Copy data from kernel space to user space.
10257 + *
10258 + * Returns number of bytes that could not be copied.
10259 + * On success, this will be zero.
10260 + */
10261 +static __always_inline unsigned long __must_check
10262 +copy_to_user(void __user *to, const void *from, unsigned long n)
10263 +{
10264 + if (access_ok(VERIFY_WRITE, to, n))
10265 + n = __copy_to_user(to, from, n);
10266 + return n;
10267 +}
10268 +
10269 +/**
10270 + * copy_from_user: - Copy a block of data from user space.
10271 + * @to: Destination address, in kernel space.
10272 + * @from: Source address, in user space.
10273 + * @n: Number of bytes to copy.
10274 + *
10275 + * Context: User context only. This function may sleep.
10276 + *
10277 + * Copy data from user space to kernel space.
10278 + *
10279 + * Returns number of bytes that could not be copied.
10280 + * On success, this will be zero.
10281 + *
10282 + * If some data could not be copied, this function will pad the copied
10283 + * data to the requested size using zero bytes.
10284 + */
10285 +static __always_inline unsigned long __must_check
10286 +copy_from_user(void *to, const void __user *from, unsigned long n)
10287 +{
10288 + if (access_ok(VERIFY_READ, from, n))
10289 + n = __copy_from_user(to, from, n);
10290 + else if ((long)n > 0) {
10291 + if (!__builtin_constant_p(n))
10292 + check_object_size(to, n, false);
10293 + memset(to, 0, n);
10294 + }
10295 + return n;
10296 }
10297
10298 -unsigned long __must_check copy_to_user(void __user *to,
10299 - const void *from, unsigned long n);
10300 -unsigned long __must_check copy_from_user(void *to,
10301 - const void __user *from,
10302 - unsigned long n);
10303 long __must_check strncpy_from_user(char *dst, const char __user *src,
10304 long count);
10305 long __must_check __strncpy_from_user(char *dst,
10306 diff -urNp linux-2.6.32.41/arch/x86/include/asm/uaccess_64.h linux-2.6.32.41/arch/x86/include/asm/uaccess_64.h
10307 --- linux-2.6.32.41/arch/x86/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
10308 +++ linux-2.6.32.41/arch/x86/include/asm/uaccess_64.h 2011-05-16 21:46:57.000000000 -0400
10309 @@ -9,6 +9,9 @@
10310 #include <linux/prefetch.h>
10311 #include <linux/lockdep.h>
10312 #include <asm/page.h>
10313 +#include <asm/pgtable.h>
10314 +
10315 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
10316
10317 /*
10318 * Copy To/From Userspace
10319 @@ -19,113 +22,203 @@ __must_check unsigned long
10320 copy_user_generic(void *to, const void *from, unsigned len);
10321
10322 __must_check unsigned long
10323 -copy_to_user(void __user *to, const void *from, unsigned len);
10324 -__must_check unsigned long
10325 -copy_from_user(void *to, const void __user *from, unsigned len);
10326 -__must_check unsigned long
10327 copy_in_user(void __user *to, const void __user *from, unsigned len);
10328
10329 static __always_inline __must_check
10330 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
10331 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
10332 {
10333 - int ret = 0;
10334 + unsigned ret = 0;
10335
10336 might_fault();
10337 - if (!__builtin_constant_p(size))
10338 - return copy_user_generic(dst, (__force void *)src, size);
10339 +
10340 + if ((int)size < 0)
10341 + return size;
10342 +
10343 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10344 + if (!__access_ok(VERIFY_READ, src, size))
10345 + return size;
10346 +#endif
10347 +
10348 + if (!__builtin_constant_p(size)) {
10349 + check_object_size(dst, size, false);
10350 +
10351 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10352 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10353 + src += PAX_USER_SHADOW_BASE;
10354 +#endif
10355 +
10356 + return copy_user_generic(dst, (__force const void *)src, size);
10357 + }
10358 switch (size) {
10359 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
10360 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
10361 ret, "b", "b", "=q", 1);
10362 return ret;
10363 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
10364 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
10365 ret, "w", "w", "=r", 2);
10366 return ret;
10367 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
10368 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
10369 ret, "l", "k", "=r", 4);
10370 return ret;
10371 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
10372 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10373 ret, "q", "", "=r", 8);
10374 return ret;
10375 case 10:
10376 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10377 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10378 ret, "q", "", "=r", 10);
10379 if (unlikely(ret))
10380 return ret;
10381 __get_user_asm(*(u16 *)(8 + (char *)dst),
10382 - (u16 __user *)(8 + (char __user *)src),
10383 + (const u16 __user *)(8 + (const char __user *)src),
10384 ret, "w", "w", "=r", 2);
10385 return ret;
10386 case 16:
10387 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10388 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10389 ret, "q", "", "=r", 16);
10390 if (unlikely(ret))
10391 return ret;
10392 __get_user_asm(*(u64 *)(8 + (char *)dst),
10393 - (u64 __user *)(8 + (char __user *)src),
10394 + (const u64 __user *)(8 + (const char __user *)src),
10395 ret, "q", "", "=r", 8);
10396 return ret;
10397 default:
10398 - return copy_user_generic(dst, (__force void *)src, size);
10399 +
10400 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10401 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10402 + src += PAX_USER_SHADOW_BASE;
10403 +#endif
10404 +
10405 + return copy_user_generic(dst, (__force const void *)src, size);
10406 }
10407 }
10408
10409 static __always_inline __must_check
10410 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
10411 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
10412 {
10413 - int ret = 0;
10414 + unsigned ret = 0;
10415
10416 might_fault();
10417 - if (!__builtin_constant_p(size))
10418 +
10419 + pax_track_stack();
10420 +
10421 + if ((int)size < 0)
10422 + return size;
10423 +
10424 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10425 + if (!__access_ok(VERIFY_WRITE, dst, size))
10426 + return size;
10427 +#endif
10428 +
10429 + if (!__builtin_constant_p(size)) {
10430 + check_object_size(src, size, true);
10431 +
10432 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10433 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10434 + dst += PAX_USER_SHADOW_BASE;
10435 +#endif
10436 +
10437 return copy_user_generic((__force void *)dst, src, size);
10438 + }
10439 switch (size) {
10440 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
10441 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
10442 ret, "b", "b", "iq", 1);
10443 return ret;
10444 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
10445 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
10446 ret, "w", "w", "ir", 2);
10447 return ret;
10448 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
10449 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
10450 ret, "l", "k", "ir", 4);
10451 return ret;
10452 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
10453 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10454 ret, "q", "", "er", 8);
10455 return ret;
10456 case 10:
10457 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10458 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10459 ret, "q", "", "er", 10);
10460 if (unlikely(ret))
10461 return ret;
10462 asm("":::"memory");
10463 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
10464 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
10465 ret, "w", "w", "ir", 2);
10466 return ret;
10467 case 16:
10468 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10469 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10470 ret, "q", "", "er", 16);
10471 if (unlikely(ret))
10472 return ret;
10473 asm("":::"memory");
10474 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
10475 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
10476 ret, "q", "", "er", 8);
10477 return ret;
10478 default:
10479 +
10480 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10481 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10482 + dst += PAX_USER_SHADOW_BASE;
10483 +#endif
10484 +
10485 return copy_user_generic((__force void *)dst, src, size);
10486 }
10487 }
10488
10489 static __always_inline __must_check
10490 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10491 +unsigned long copy_to_user(void __user *to, const void *from, unsigned len)
10492 +{
10493 + if (access_ok(VERIFY_WRITE, to, len))
10494 + len = __copy_to_user(to, from, len);
10495 + return len;
10496 +}
10497 +
10498 +static __always_inline __must_check
10499 +unsigned long copy_from_user(void *to, const void __user *from, unsigned len)
10500 +{
10501 + if ((int)len < 0)
10502 + return len;
10503 +
10504 + if (access_ok(VERIFY_READ, from, len))
10505 + len = __copy_from_user(to, from, len);
10506 + else if ((int)len > 0) {
10507 + if (!__builtin_constant_p(len))
10508 + check_object_size(to, len, false);
10509 + memset(to, 0, len);
10510 + }
10511 + return len;
10512 +}
10513 +
10514 +static __always_inline __must_check
10515 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10516 {
10517 - int ret = 0;
10518 + unsigned ret = 0;
10519
10520 might_fault();
10521 - if (!__builtin_constant_p(size))
10522 +
10523 + pax_track_stack();
10524 +
10525 + if ((int)size < 0)
10526 + return size;
10527 +
10528 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10529 + if (!__access_ok(VERIFY_READ, src, size))
10530 + return size;
10531 + if (!__access_ok(VERIFY_WRITE, dst, size))
10532 + return size;
10533 +#endif
10534 +
10535 + if (!__builtin_constant_p(size)) {
10536 +
10537 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10538 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10539 + src += PAX_USER_SHADOW_BASE;
10540 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10541 + dst += PAX_USER_SHADOW_BASE;
10542 +#endif
10543 +
10544 return copy_user_generic((__force void *)dst,
10545 - (__force void *)src, size);
10546 + (__force const void *)src, size);
10547 + }
10548 switch (size) {
10549 case 1: {
10550 u8 tmp;
10551 - __get_user_asm(tmp, (u8 __user *)src,
10552 + __get_user_asm(tmp, (const u8 __user *)src,
10553 ret, "b", "b", "=q", 1);
10554 if (likely(!ret))
10555 __put_user_asm(tmp, (u8 __user *)dst,
10556 @@ -134,7 +227,7 @@ int __copy_in_user(void __user *dst, con
10557 }
10558 case 2: {
10559 u16 tmp;
10560 - __get_user_asm(tmp, (u16 __user *)src,
10561 + __get_user_asm(tmp, (const u16 __user *)src,
10562 ret, "w", "w", "=r", 2);
10563 if (likely(!ret))
10564 __put_user_asm(tmp, (u16 __user *)dst,
10565 @@ -144,7 +237,7 @@ int __copy_in_user(void __user *dst, con
10566
10567 case 4: {
10568 u32 tmp;
10569 - __get_user_asm(tmp, (u32 __user *)src,
10570 + __get_user_asm(tmp, (const u32 __user *)src,
10571 ret, "l", "k", "=r", 4);
10572 if (likely(!ret))
10573 __put_user_asm(tmp, (u32 __user *)dst,
10574 @@ -153,7 +246,7 @@ int __copy_in_user(void __user *dst, con
10575 }
10576 case 8: {
10577 u64 tmp;
10578 - __get_user_asm(tmp, (u64 __user *)src,
10579 + __get_user_asm(tmp, (const u64 __user *)src,
10580 ret, "q", "", "=r", 8);
10581 if (likely(!ret))
10582 __put_user_asm(tmp, (u64 __user *)dst,
10583 @@ -161,8 +254,16 @@ int __copy_in_user(void __user *dst, con
10584 return ret;
10585 }
10586 default:
10587 +
10588 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10589 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10590 + src += PAX_USER_SHADOW_BASE;
10591 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10592 + dst += PAX_USER_SHADOW_BASE;
10593 +#endif
10594 +
10595 return copy_user_generic((__force void *)dst,
10596 - (__force void *)src, size);
10597 + (__force const void *)src, size);
10598 }
10599 }
10600
10601 @@ -176,33 +277,75 @@ __must_check long strlen_user(const char
10602 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
10603 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
10604
10605 -__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
10606 - unsigned size);
10607 +static __must_check __always_inline unsigned long
10608 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
10609 +{
10610 + pax_track_stack();
10611 +
10612 + if ((int)size < 0)
10613 + return size;
10614
10615 -static __must_check __always_inline int
10616 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10617 + if (!__access_ok(VERIFY_READ, src, size))
10618 + return size;
10619 +
10620 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10621 + src += PAX_USER_SHADOW_BASE;
10622 +#endif
10623 +
10624 + return copy_user_generic(dst, (__force const void *)src, size);
10625 +}
10626 +
10627 +static __must_check __always_inline unsigned long
10628 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
10629 {
10630 + if ((int)size < 0)
10631 + return size;
10632 +
10633 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10634 + if (!__access_ok(VERIFY_WRITE, dst, size))
10635 + return size;
10636 +
10637 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10638 + dst += PAX_USER_SHADOW_BASE;
10639 +#endif
10640 +
10641 return copy_user_generic((__force void *)dst, src, size);
10642 }
10643
10644 -extern long __copy_user_nocache(void *dst, const void __user *src,
10645 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
10646 unsigned size, int zerorest);
10647
10648 -static inline int
10649 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
10650 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
10651 {
10652 might_sleep();
10653 +
10654 + if ((int)size < 0)
10655 + return size;
10656 +
10657 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10658 + if (!__access_ok(VERIFY_READ, src, size))
10659 + return size;
10660 +#endif
10661 +
10662 return __copy_user_nocache(dst, src, size, 1);
10663 }
10664
10665 -static inline int
10666 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
10667 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
10668 unsigned size)
10669 {
10670 + if ((int)size < 0)
10671 + return size;
10672 +
10673 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10674 + if (!__access_ok(VERIFY_READ, src, size))
10675 + return size;
10676 +#endif
10677 +
10678 return __copy_user_nocache(dst, src, size, 0);
10679 }
10680
10681 -unsigned long
10682 +extern unsigned long
10683 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
10684
10685 #endif /* _ASM_X86_UACCESS_64_H */
10686 diff -urNp linux-2.6.32.41/arch/x86/include/asm/uaccess.h linux-2.6.32.41/arch/x86/include/asm/uaccess.h
10687 --- linux-2.6.32.41/arch/x86/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
10688 +++ linux-2.6.32.41/arch/x86/include/asm/uaccess.h 2011-04-17 15:56:46.000000000 -0400
10689 @@ -8,12 +8,15 @@
10690 #include <linux/thread_info.h>
10691 #include <linux/prefetch.h>
10692 #include <linux/string.h>
10693 +#include <linux/sched.h>
10694 #include <asm/asm.h>
10695 #include <asm/page.h>
10696
10697 #define VERIFY_READ 0
10698 #define VERIFY_WRITE 1
10699
10700 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
10701 +
10702 /*
10703 * The fs value determines whether argument validity checking should be
10704 * performed or not. If get_fs() == USER_DS, checking is performed, with
10705 @@ -29,7 +32,12 @@
10706
10707 #define get_ds() (KERNEL_DS)
10708 #define get_fs() (current_thread_info()->addr_limit)
10709 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10710 +void __set_fs(mm_segment_t x);
10711 +void set_fs(mm_segment_t x);
10712 +#else
10713 #define set_fs(x) (current_thread_info()->addr_limit = (x))
10714 +#endif
10715
10716 #define segment_eq(a, b) ((a).seg == (b).seg)
10717
10718 @@ -77,7 +85,33 @@
10719 * checks that the pointer is in the user space range - after calling
10720 * this function, memory access functions may still return -EFAULT.
10721 */
10722 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10723 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10724 +#define access_ok(type, addr, size) \
10725 +({ \
10726 + long __size = size; \
10727 + unsigned long __addr = (unsigned long)addr; \
10728 + unsigned long __addr_ao = __addr & PAGE_MASK; \
10729 + unsigned long __end_ao = __addr + __size - 1; \
10730 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
10731 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
10732 + while(__addr_ao <= __end_ao) { \
10733 + char __c_ao; \
10734 + __addr_ao += PAGE_SIZE; \
10735 + if (__size > PAGE_SIZE) \
10736 + cond_resched(); \
10737 + if (__get_user(__c_ao, (char __user *)__addr)) \
10738 + break; \
10739 + if (type != VERIFY_WRITE) { \
10740 + __addr = __addr_ao; \
10741 + continue; \
10742 + } \
10743 + if (__put_user(__c_ao, (char __user *)__addr)) \
10744 + break; \
10745 + __addr = __addr_ao; \
10746 + } \
10747 + } \
10748 + __ret_ao; \
10749 +})
10750
10751 /*
10752 * The exception table consists of pairs of addresses: the first is the
10753 @@ -183,12 +217,20 @@ extern int __get_user_bad(void);
10754 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
10755 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
10756
10757 -
10758 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10759 +#define __copyuser_seg "gs;"
10760 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
10761 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
10762 +#else
10763 +#define __copyuser_seg
10764 +#define __COPYUSER_SET_ES
10765 +#define __COPYUSER_RESTORE_ES
10766 +#endif
10767
10768 #ifdef CONFIG_X86_32
10769 #define __put_user_asm_u64(x, addr, err, errret) \
10770 - asm volatile("1: movl %%eax,0(%2)\n" \
10771 - "2: movl %%edx,4(%2)\n" \
10772 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
10773 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
10774 "3:\n" \
10775 ".section .fixup,\"ax\"\n" \
10776 "4: movl %3,%0\n" \
10777 @@ -200,8 +242,8 @@ extern int __get_user_bad(void);
10778 : "A" (x), "r" (addr), "i" (errret), "0" (err))
10779
10780 #define __put_user_asm_ex_u64(x, addr) \
10781 - asm volatile("1: movl %%eax,0(%1)\n" \
10782 - "2: movl %%edx,4(%1)\n" \
10783 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
10784 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
10785 "3:\n" \
10786 _ASM_EXTABLE(1b, 2b - 1b) \
10787 _ASM_EXTABLE(2b, 3b - 2b) \
10788 @@ -374,7 +416,7 @@ do { \
10789 } while (0)
10790
10791 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10792 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
10793 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
10794 "2:\n" \
10795 ".section .fixup,\"ax\"\n" \
10796 "3: mov %3,%0\n" \
10797 @@ -382,7 +424,7 @@ do { \
10798 " jmp 2b\n" \
10799 ".previous\n" \
10800 _ASM_EXTABLE(1b, 3b) \
10801 - : "=r" (err), ltype(x) \
10802 + : "=r" (err), ltype (x) \
10803 : "m" (__m(addr)), "i" (errret), "0" (err))
10804
10805 #define __get_user_size_ex(x, ptr, size) \
10806 @@ -407,7 +449,7 @@ do { \
10807 } while (0)
10808
10809 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
10810 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
10811 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
10812 "2:\n" \
10813 _ASM_EXTABLE(1b, 2b - 1b) \
10814 : ltype(x) : "m" (__m(addr)))
10815 @@ -424,13 +466,24 @@ do { \
10816 int __gu_err; \
10817 unsigned long __gu_val; \
10818 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
10819 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
10820 + (x) = (__typeof__(*(ptr)))__gu_val; \
10821 __gu_err; \
10822 })
10823
10824 /* FIXME: this hack is definitely wrong -AK */
10825 struct __large_struct { unsigned long buf[100]; };
10826 -#define __m(x) (*(struct __large_struct __user *)(x))
10827 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10828 +#define ____m(x) \
10829 +({ \
10830 + unsigned long ____x = (unsigned long)(x); \
10831 + if (____x < PAX_USER_SHADOW_BASE) \
10832 + ____x += PAX_USER_SHADOW_BASE; \
10833 + (void __user *)____x; \
10834 +})
10835 +#else
10836 +#define ____m(x) (x)
10837 +#endif
10838 +#define __m(x) (*(struct __large_struct __user *)____m(x))
10839
10840 /*
10841 * Tell gcc we read from memory instead of writing: this is because
10842 @@ -438,7 +491,7 @@ struct __large_struct { unsigned long bu
10843 * aliasing issues.
10844 */
10845 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10846 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
10847 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
10848 "2:\n" \
10849 ".section .fixup,\"ax\"\n" \
10850 "3: mov %3,%0\n" \
10851 @@ -446,10 +499,10 @@ struct __large_struct { unsigned long bu
10852 ".previous\n" \
10853 _ASM_EXTABLE(1b, 3b) \
10854 : "=r"(err) \
10855 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
10856 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
10857
10858 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
10859 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
10860 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
10861 "2:\n" \
10862 _ASM_EXTABLE(1b, 2b - 1b) \
10863 : : ltype(x), "m" (__m(addr)))
10864 @@ -488,8 +541,12 @@ struct __large_struct { unsigned long bu
10865 * On error, the variable @x is set to zero.
10866 */
10867
10868 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10869 +#define __get_user(x, ptr) get_user((x), (ptr))
10870 +#else
10871 #define __get_user(x, ptr) \
10872 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
10873 +#endif
10874
10875 /**
10876 * __put_user: - Write a simple value into user space, with less checking.
10877 @@ -511,8 +568,12 @@ struct __large_struct { unsigned long bu
10878 * Returns zero on success, or -EFAULT on error.
10879 */
10880
10881 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10882 +#define __put_user(x, ptr) put_user((x), (ptr))
10883 +#else
10884 #define __put_user(x, ptr) \
10885 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
10886 +#endif
10887
10888 #define __get_user_unaligned __get_user
10889 #define __put_user_unaligned __put_user
10890 @@ -530,7 +591,7 @@ struct __large_struct { unsigned long bu
10891 #define get_user_ex(x, ptr) do { \
10892 unsigned long __gue_val; \
10893 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
10894 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
10895 + (x) = (__typeof__(*(ptr)))__gue_val; \
10896 } while (0)
10897
10898 #ifdef CONFIG_X86_WP_WORKS_OK
10899 @@ -567,6 +628,7 @@ extern struct movsl_mask {
10900
10901 #define ARCH_HAS_NOCACHE_UACCESS 1
10902
10903 +#define ARCH_HAS_SORT_EXTABLE
10904 #ifdef CONFIG_X86_32
10905 # include "uaccess_32.h"
10906 #else
10907 diff -urNp linux-2.6.32.41/arch/x86/include/asm/vgtod.h linux-2.6.32.41/arch/x86/include/asm/vgtod.h
10908 --- linux-2.6.32.41/arch/x86/include/asm/vgtod.h 2011-03-27 14:31:47.000000000 -0400
10909 +++ linux-2.6.32.41/arch/x86/include/asm/vgtod.h 2011-04-17 15:56:46.000000000 -0400
10910 @@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
10911 int sysctl_enabled;
10912 struct timezone sys_tz;
10913 struct { /* extract of a clocksource struct */
10914 + char name[8];
10915 cycle_t (*vread)(void);
10916 cycle_t cycle_last;
10917 cycle_t mask;
10918 diff -urNp linux-2.6.32.41/arch/x86/include/asm/vmi.h linux-2.6.32.41/arch/x86/include/asm/vmi.h
10919 --- linux-2.6.32.41/arch/x86/include/asm/vmi.h 2011-03-27 14:31:47.000000000 -0400
10920 +++ linux-2.6.32.41/arch/x86/include/asm/vmi.h 2011-04-17 15:56:46.000000000 -0400
10921 @@ -191,6 +191,7 @@ struct vrom_header {
10922 u8 reserved[96]; /* Reserved for headers */
10923 char vmi_init[8]; /* VMI_Init jump point */
10924 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
10925 + char rom_data[8048]; /* rest of the option ROM */
10926 } __attribute__((packed));
10927
10928 struct pnp_header {
10929 diff -urNp linux-2.6.32.41/arch/x86/include/asm/vsyscall.h linux-2.6.32.41/arch/x86/include/asm/vsyscall.h
10930 --- linux-2.6.32.41/arch/x86/include/asm/vsyscall.h 2011-03-27 14:31:47.000000000 -0400
10931 +++ linux-2.6.32.41/arch/x86/include/asm/vsyscall.h 2011-04-17 15:56:46.000000000 -0400
10932 @@ -15,9 +15,10 @@ enum vsyscall_num {
10933
10934 #ifdef __KERNEL__
10935 #include <linux/seqlock.h>
10936 +#include <linux/getcpu.h>
10937 +#include <linux/time.h>
10938
10939 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
10940 -#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
10941
10942 /* Definitions for CONFIG_GENERIC_TIME definitions */
10943 #define __section_vsyscall_gtod_data __attribute__ \
10944 @@ -31,7 +32,6 @@ enum vsyscall_num {
10945 #define VGETCPU_LSL 2
10946
10947 extern int __vgetcpu_mode;
10948 -extern volatile unsigned long __jiffies;
10949
10950 /* kernel space (writeable) */
10951 extern int vgetcpu_mode;
10952 @@ -39,6 +39,9 @@ extern struct timezone sys_tz;
10953
10954 extern void map_vsyscall(void);
10955
10956 +extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
10957 +extern time_t vtime(time_t *t);
10958 +extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
10959 #endif /* __KERNEL__ */
10960
10961 #endif /* _ASM_X86_VSYSCALL_H */
10962 diff -urNp linux-2.6.32.41/arch/x86/include/asm/xsave.h linux-2.6.32.41/arch/x86/include/asm/xsave.h
10963 --- linux-2.6.32.41/arch/x86/include/asm/xsave.h 2011-03-27 14:31:47.000000000 -0400
10964 +++ linux-2.6.32.41/arch/x86/include/asm/xsave.h 2011-04-17 15:56:46.000000000 -0400
10965 @@ -56,6 +56,12 @@ static inline int xrstor_checking(struct
10966 static inline int xsave_user(struct xsave_struct __user *buf)
10967 {
10968 int err;
10969 +
10970 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10971 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
10972 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
10973 +#endif
10974 +
10975 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
10976 "2:\n"
10977 ".section .fixup,\"ax\"\n"
10978 @@ -82,6 +88,11 @@ static inline int xrestore_user(struct x
10979 u32 lmask = mask;
10980 u32 hmask = mask >> 32;
10981
10982 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10983 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
10984 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
10985 +#endif
10986 +
10987 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
10988 "2:\n"
10989 ".section .fixup,\"ax\"\n"
10990 diff -urNp linux-2.6.32.41/arch/x86/Kconfig linux-2.6.32.41/arch/x86/Kconfig
10991 --- linux-2.6.32.41/arch/x86/Kconfig 2011-03-27 14:31:47.000000000 -0400
10992 +++ linux-2.6.32.41/arch/x86/Kconfig 2011-04-17 15:56:46.000000000 -0400
10993 @@ -223,7 +223,7 @@ config X86_TRAMPOLINE
10994
10995 config X86_32_LAZY_GS
10996 def_bool y
10997 - depends on X86_32 && !CC_STACKPROTECTOR
10998 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10999
11000 config KTIME_SCALAR
11001 def_bool X86_32
11002 @@ -1008,7 +1008,7 @@ choice
11003
11004 config NOHIGHMEM
11005 bool "off"
11006 - depends on !X86_NUMAQ
11007 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11008 ---help---
11009 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
11010 However, the address space of 32-bit x86 processors is only 4
11011 @@ -1045,7 +1045,7 @@ config NOHIGHMEM
11012
11013 config HIGHMEM4G
11014 bool "4GB"
11015 - depends on !X86_NUMAQ
11016 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11017 ---help---
11018 Select this if you have a 32-bit processor and between 1 and 4
11019 gigabytes of physical RAM.
11020 @@ -1099,7 +1099,7 @@ config PAGE_OFFSET
11021 hex
11022 default 0xB0000000 if VMSPLIT_3G_OPT
11023 default 0x80000000 if VMSPLIT_2G
11024 - default 0x78000000 if VMSPLIT_2G_OPT
11025 + default 0x70000000 if VMSPLIT_2G_OPT
11026 default 0x40000000 if VMSPLIT_1G
11027 default 0xC0000000
11028 depends on X86_32
11029 @@ -1430,7 +1430,7 @@ config ARCH_USES_PG_UNCACHED
11030
11031 config EFI
11032 bool "EFI runtime service support"
11033 - depends on ACPI
11034 + depends on ACPI && !PAX_KERNEXEC
11035 ---help---
11036 This enables the kernel to use EFI runtime services that are
11037 available (such as the EFI variable services).
11038 @@ -1460,6 +1460,7 @@ config SECCOMP
11039
11040 config CC_STACKPROTECTOR
11041 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
11042 + depends on X86_64 || !PAX_MEMORY_UDEREF
11043 ---help---
11044 This option turns on the -fstack-protector GCC feature. This
11045 feature puts, at the beginning of functions, a canary value on
11046 @@ -1517,6 +1518,7 @@ config KEXEC_JUMP
11047 config PHYSICAL_START
11048 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
11049 default "0x1000000"
11050 + range 0x400000 0x40000000
11051 ---help---
11052 This gives the physical address where the kernel is loaded.
11053
11054 @@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
11055 hex
11056 prompt "Alignment value to which kernel should be aligned" if X86_32
11057 default "0x1000000"
11058 + range 0x400000 0x1000000 if PAX_KERNEXEC
11059 range 0x2000 0x1000000
11060 ---help---
11061 This value puts the alignment restrictions on physical address
11062 @@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
11063 Say N if you want to disable CPU hotplug.
11064
11065 config COMPAT_VDSO
11066 - def_bool y
11067 + def_bool n
11068 prompt "Compat VDSO support"
11069 depends on X86_32 || IA32_EMULATION
11070 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
11071 ---help---
11072 Map the 32-bit VDSO to the predictable old-style address too.
11073 ---help---
11074 diff -urNp linux-2.6.32.41/arch/x86/Kconfig.cpu linux-2.6.32.41/arch/x86/Kconfig.cpu
11075 --- linux-2.6.32.41/arch/x86/Kconfig.cpu 2011-03-27 14:31:47.000000000 -0400
11076 +++ linux-2.6.32.41/arch/x86/Kconfig.cpu 2011-04-17 15:56:46.000000000 -0400
11077 @@ -340,7 +340,7 @@ config X86_PPRO_FENCE
11078
11079 config X86_F00F_BUG
11080 def_bool y
11081 - depends on M586MMX || M586TSC || M586 || M486 || M386
11082 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
11083
11084 config X86_WP_WORKS_OK
11085 def_bool y
11086 @@ -360,7 +360,7 @@ config X86_POPAD_OK
11087
11088 config X86_ALIGNMENT_16
11089 def_bool y
11090 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11091 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11092
11093 config X86_INTEL_USERCOPY
11094 def_bool y
11095 @@ -406,7 +406,7 @@ config X86_CMPXCHG64
11096 # generates cmov.
11097 config X86_CMOV
11098 def_bool y
11099 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11100 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11101
11102 config X86_MINIMUM_CPU_FAMILY
11103 int
11104 diff -urNp linux-2.6.32.41/arch/x86/Kconfig.debug linux-2.6.32.41/arch/x86/Kconfig.debug
11105 --- linux-2.6.32.41/arch/x86/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
11106 +++ linux-2.6.32.41/arch/x86/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
11107 @@ -99,7 +99,7 @@ config X86_PTDUMP
11108 config DEBUG_RODATA
11109 bool "Write protect kernel read-only data structures"
11110 default y
11111 - depends on DEBUG_KERNEL
11112 + depends on DEBUG_KERNEL && BROKEN
11113 ---help---
11114 Mark the kernel read-only data as write-protected in the pagetables,
11115 in order to catch accidental (and incorrect) writes to such const
11116 diff -urNp linux-2.6.32.41/arch/x86/kernel/acpi/realmode/wakeup.S linux-2.6.32.41/arch/x86/kernel/acpi/realmode/wakeup.S
11117 --- linux-2.6.32.41/arch/x86/kernel/acpi/realmode/wakeup.S 2011-03-27 14:31:47.000000000 -0400
11118 +++ linux-2.6.32.41/arch/x86/kernel/acpi/realmode/wakeup.S 2011-04-17 15:56:46.000000000 -0400
11119 @@ -104,7 +104,7 @@ _start:
11120 movl %eax, %ecx
11121 orl %edx, %ecx
11122 jz 1f
11123 - movl $0xc0000080, %ecx
11124 + mov $MSR_EFER, %ecx
11125 wrmsr
11126 1:
11127
11128 diff -urNp linux-2.6.32.41/arch/x86/kernel/acpi/sleep.c linux-2.6.32.41/arch/x86/kernel/acpi/sleep.c
11129 --- linux-2.6.32.41/arch/x86/kernel/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
11130 +++ linux-2.6.32.41/arch/x86/kernel/acpi/sleep.c 2011-04-17 15:56:46.000000000 -0400
11131 @@ -11,11 +11,12 @@
11132 #include <linux/cpumask.h>
11133 #include <asm/segment.h>
11134 #include <asm/desc.h>
11135 +#include <asm/e820.h>
11136
11137 #include "realmode/wakeup.h"
11138 #include "sleep.h"
11139
11140 -unsigned long acpi_wakeup_address;
11141 +unsigned long acpi_wakeup_address = 0x2000;
11142 unsigned long acpi_realmode_flags;
11143
11144 /* address in low memory of the wakeup routine. */
11145 @@ -99,8 +100,12 @@ int acpi_save_state_mem(void)
11146 header->trampoline_segment = setup_trampoline() >> 4;
11147 #ifdef CONFIG_SMP
11148 stack_start.sp = temp_stack + sizeof(temp_stack);
11149 +
11150 + pax_open_kernel();
11151 early_gdt_descr.address =
11152 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11153 + pax_close_kernel();
11154 +
11155 initial_gs = per_cpu_offset(smp_processor_id());
11156 #endif
11157 initial_code = (unsigned long)wakeup_long64;
11158 @@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
11159 return;
11160 }
11161
11162 - acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
11163 -
11164 - if (!acpi_realmode) {
11165 - printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
11166 - return;
11167 - }
11168 -
11169 - acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
11170 + reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
11171 + acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
11172 }
11173
11174
11175 diff -urNp linux-2.6.32.41/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.32.41/arch/x86/kernel/acpi/wakeup_32.S
11176 --- linux-2.6.32.41/arch/x86/kernel/acpi/wakeup_32.S 2011-03-27 14:31:47.000000000 -0400
11177 +++ linux-2.6.32.41/arch/x86/kernel/acpi/wakeup_32.S 2011-04-17 15:56:46.000000000 -0400
11178 @@ -30,13 +30,11 @@ wakeup_pmode_return:
11179 # and restore the stack ... but you need gdt for this to work
11180 movl saved_context_esp, %esp
11181
11182 - movl %cs:saved_magic, %eax
11183 - cmpl $0x12345678, %eax
11184 + cmpl $0x12345678, saved_magic
11185 jne bogus_magic
11186
11187 # jump to place where we left off
11188 - movl saved_eip, %eax
11189 - jmp *%eax
11190 + jmp *(saved_eip)
11191
11192 bogus_magic:
11193 jmp bogus_magic
11194 diff -urNp linux-2.6.32.41/arch/x86/kernel/alternative.c linux-2.6.32.41/arch/x86/kernel/alternative.c
11195 --- linux-2.6.32.41/arch/x86/kernel/alternative.c 2011-03-27 14:31:47.000000000 -0400
11196 +++ linux-2.6.32.41/arch/x86/kernel/alternative.c 2011-04-17 15:56:46.000000000 -0400
11197 @@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(str
11198
11199 BUG_ON(p->len > MAX_PATCH_LEN);
11200 /* prep the buffer with the original instructions */
11201 - memcpy(insnbuf, p->instr, p->len);
11202 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11203 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11204 (unsigned long)p->instr, p->len);
11205
11206 @@ -475,7 +475,7 @@ void __init alternative_instructions(voi
11207 if (smp_alt_once)
11208 free_init_pages("SMP alternatives",
11209 (unsigned long)__smp_locks,
11210 - (unsigned long)__smp_locks_end);
11211 + PAGE_ALIGN((unsigned long)__smp_locks_end));
11212
11213 restart_nmi();
11214 }
11215 @@ -492,13 +492,17 @@ void __init alternative_instructions(voi
11216 * instructions. And on the local CPU you need to be protected again NMI or MCE
11217 * handlers seeing an inconsistent instruction while you patch.
11218 */
11219 -static void *__init_or_module text_poke_early(void *addr, const void *opcode,
11220 +static void *__kprobes text_poke_early(void *addr, const void *opcode,
11221 size_t len)
11222 {
11223 unsigned long flags;
11224 local_irq_save(flags);
11225 - memcpy(addr, opcode, len);
11226 +
11227 + pax_open_kernel();
11228 + memcpy(ktla_ktva(addr), opcode, len);
11229 sync_core();
11230 + pax_close_kernel();
11231 +
11232 local_irq_restore(flags);
11233 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11234 that causes hangs on some VIA CPUs. */
11235 @@ -520,35 +524,21 @@ static void *__init_or_module text_poke_
11236 */
11237 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11238 {
11239 - unsigned long flags;
11240 - char *vaddr;
11241 + unsigned char *vaddr = ktla_ktva(addr);
11242 struct page *pages[2];
11243 - int i;
11244 + size_t i;
11245
11246 if (!core_kernel_text((unsigned long)addr)) {
11247 - pages[0] = vmalloc_to_page(addr);
11248 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11249 + pages[0] = vmalloc_to_page(vaddr);
11250 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11251 } else {
11252 - pages[0] = virt_to_page(addr);
11253 + pages[0] = virt_to_page(vaddr);
11254 WARN_ON(!PageReserved(pages[0]));
11255 - pages[1] = virt_to_page(addr + PAGE_SIZE);
11256 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11257 }
11258 BUG_ON(!pages[0]);
11259 - local_irq_save(flags);
11260 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11261 - if (pages[1])
11262 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11263 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11264 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11265 - clear_fixmap(FIX_TEXT_POKE0);
11266 - if (pages[1])
11267 - clear_fixmap(FIX_TEXT_POKE1);
11268 - local_flush_tlb();
11269 - sync_core();
11270 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
11271 - that causes hangs on some VIA CPUs. */
11272 + text_poke_early(addr, opcode, len);
11273 for (i = 0; i < len; i++)
11274 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11275 - local_irq_restore(flags);
11276 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11277 return addr;
11278 }
11279 diff -urNp linux-2.6.32.41/arch/x86/kernel/amd_iommu.c linux-2.6.32.41/arch/x86/kernel/amd_iommu.c
11280 --- linux-2.6.32.41/arch/x86/kernel/amd_iommu.c 2011-03-27 14:31:47.000000000 -0400
11281 +++ linux-2.6.32.41/arch/x86/kernel/amd_iommu.c 2011-04-17 15:56:46.000000000 -0400
11282 @@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(
11283 }
11284 }
11285
11286 -static struct dma_map_ops amd_iommu_dma_ops = {
11287 +static const struct dma_map_ops amd_iommu_dma_ops = {
11288 .alloc_coherent = alloc_coherent,
11289 .free_coherent = free_coherent,
11290 .map_page = map_page,
11291 diff -urNp linux-2.6.32.41/arch/x86/kernel/apic/apic.c linux-2.6.32.41/arch/x86/kernel/apic/apic.c
11292 --- linux-2.6.32.41/arch/x86/kernel/apic/apic.c 2011-03-27 14:31:47.000000000 -0400
11293 +++ linux-2.6.32.41/arch/x86/kernel/apic/apic.c 2011-05-16 21:46:57.000000000 -0400
11294 @@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs
11295 apic_write(APIC_ESR, 0);
11296 v1 = apic_read(APIC_ESR);
11297 ack_APIC_irq();
11298 - atomic_inc(&irq_err_count);
11299 + atomic_inc_unchecked(&irq_err_count);
11300
11301 /*
11302 * Here is what the APIC error bits mean:
11303 @@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(vo
11304 u16 *bios_cpu_apicid;
11305 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
11306
11307 + pax_track_stack();
11308 +
11309 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
11310 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
11311
11312 diff -urNp linux-2.6.32.41/arch/x86/kernel/apic/io_apic.c linux-2.6.32.41/arch/x86/kernel/apic/io_apic.c
11313 --- linux-2.6.32.41/arch/x86/kernel/apic/io_apic.c 2011-03-27 14:31:47.000000000 -0400
11314 +++ linux-2.6.32.41/arch/x86/kernel/apic/io_apic.c 2011-05-04 17:56:20.000000000 -0400
11315 @@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapi
11316 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
11317 GFP_ATOMIC);
11318 if (!ioapic_entries)
11319 - return 0;
11320 + return NULL;
11321
11322 for (apic = 0; apic < nr_ioapics; apic++) {
11323 ioapic_entries[apic] =
11324 @@ -733,7 +733,7 @@ nomem:
11325 kfree(ioapic_entries[apic]);
11326 kfree(ioapic_entries);
11327
11328 - return 0;
11329 + return NULL;
11330 }
11331
11332 /*
11333 @@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
11334 }
11335 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
11336
11337 -void lock_vector_lock(void)
11338 +void lock_vector_lock(void) __acquires(vector_lock)
11339 {
11340 /* Used to the online set of cpus does not change
11341 * during assign_irq_vector.
11342 @@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
11343 spin_lock(&vector_lock);
11344 }
11345
11346 -void unlock_vector_lock(void)
11347 +void unlock_vector_lock(void) __releases(vector_lock)
11348 {
11349 spin_unlock(&vector_lock);
11350 }
11351 @@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int i
11352 ack_APIC_irq();
11353 }
11354
11355 -atomic_t irq_mis_count;
11356 +atomic_unchecked_t irq_mis_count;
11357
11358 static void ack_apic_level(unsigned int irq)
11359 {
11360 @@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int
11361
11362 /* Tail end of version 0x11 I/O APIC bug workaround */
11363 if (!(v & (1 << (i & 0x1f)))) {
11364 - atomic_inc(&irq_mis_count);
11365 + atomic_inc_unchecked(&irq_mis_count);
11366 spin_lock(&ioapic_lock);
11367 __mask_and_edge_IO_APIC_irq(cfg);
11368 __unmask_and_level_IO_APIC_irq(cfg);
11369 diff -urNp linux-2.6.32.41/arch/x86/kernel/apm_32.c linux-2.6.32.41/arch/x86/kernel/apm_32.c
11370 --- linux-2.6.32.41/arch/x86/kernel/apm_32.c 2011-03-27 14:31:47.000000000 -0400
11371 +++ linux-2.6.32.41/arch/x86/kernel/apm_32.c 2011-04-23 12:56:10.000000000 -0400
11372 @@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
11373 * This is for buggy BIOS's that refer to (real mode) segment 0x40
11374 * even though they are called in protected mode.
11375 */
11376 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
11377 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
11378 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
11379
11380 static const char driver_version[] = "1.16ac"; /* no spaces */
11381 @@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
11382 BUG_ON(cpu != 0);
11383 gdt = get_cpu_gdt_table(cpu);
11384 save_desc_40 = gdt[0x40 / 8];
11385 +
11386 + pax_open_kernel();
11387 gdt[0x40 / 8] = bad_bios_desc;
11388 + pax_close_kernel();
11389
11390 apm_irq_save(flags);
11391 APM_DO_SAVE_SEGS;
11392 @@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
11393 &call->esi);
11394 APM_DO_RESTORE_SEGS;
11395 apm_irq_restore(flags);
11396 +
11397 + pax_open_kernel();
11398 gdt[0x40 / 8] = save_desc_40;
11399 + pax_close_kernel();
11400 +
11401 put_cpu();
11402
11403 return call->eax & 0xff;
11404 @@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void
11405 BUG_ON(cpu != 0);
11406 gdt = get_cpu_gdt_table(cpu);
11407 save_desc_40 = gdt[0x40 / 8];
11408 +
11409 + pax_open_kernel();
11410 gdt[0x40 / 8] = bad_bios_desc;
11411 + pax_close_kernel();
11412
11413 apm_irq_save(flags);
11414 APM_DO_SAVE_SEGS;
11415 @@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void
11416 &call->eax);
11417 APM_DO_RESTORE_SEGS;
11418 apm_irq_restore(flags);
11419 +
11420 + pax_open_kernel();
11421 gdt[0x40 / 8] = save_desc_40;
11422 + pax_close_kernel();
11423 +
11424 put_cpu();
11425 return error;
11426 }
11427 @@ -975,7 +989,7 @@ recalc:
11428
11429 static void apm_power_off(void)
11430 {
11431 - unsigned char po_bios_call[] = {
11432 + const unsigned char po_bios_call[] = {
11433 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
11434 0x8e, 0xd0, /* movw ax,ss */
11435 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
11436 @@ -2357,12 +2371,15 @@ static int __init apm_init(void)
11437 * code to that CPU.
11438 */
11439 gdt = get_cpu_gdt_table(0);
11440 +
11441 + pax_open_kernel();
11442 set_desc_base(&gdt[APM_CS >> 3],
11443 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
11444 set_desc_base(&gdt[APM_CS_16 >> 3],
11445 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
11446 set_desc_base(&gdt[APM_DS >> 3],
11447 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
11448 + pax_close_kernel();
11449
11450 proc_create("apm", 0, NULL, &apm_file_ops);
11451
11452 diff -urNp linux-2.6.32.41/arch/x86/kernel/asm-offsets_32.c linux-2.6.32.41/arch/x86/kernel/asm-offsets_32.c
11453 --- linux-2.6.32.41/arch/x86/kernel/asm-offsets_32.c 2011-03-27 14:31:47.000000000 -0400
11454 +++ linux-2.6.32.41/arch/x86/kernel/asm-offsets_32.c 2011-05-16 21:46:57.000000000 -0400
11455 @@ -51,7 +51,6 @@ void foo(void)
11456 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
11457 BLANK();
11458
11459 - OFFSET(TI_task, thread_info, task);
11460 OFFSET(TI_exec_domain, thread_info, exec_domain);
11461 OFFSET(TI_flags, thread_info, flags);
11462 OFFSET(TI_status, thread_info, status);
11463 @@ -60,6 +59,8 @@ void foo(void)
11464 OFFSET(TI_restart_block, thread_info, restart_block);
11465 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
11466 OFFSET(TI_cpu, thread_info, cpu);
11467 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
11468 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11469 BLANK();
11470
11471 OFFSET(GDS_size, desc_ptr, size);
11472 @@ -99,6 +100,7 @@ void foo(void)
11473
11474 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11475 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
11476 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
11477 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
11478 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
11479 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
11480 @@ -115,6 +117,11 @@ void foo(void)
11481 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
11482 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11483 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11484 +
11485 +#ifdef CONFIG_PAX_KERNEXEC
11486 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11487 +#endif
11488 +
11489 #endif
11490
11491 #ifdef CONFIG_XEN
11492 diff -urNp linux-2.6.32.41/arch/x86/kernel/asm-offsets_64.c linux-2.6.32.41/arch/x86/kernel/asm-offsets_64.c
11493 --- linux-2.6.32.41/arch/x86/kernel/asm-offsets_64.c 2011-03-27 14:31:47.000000000 -0400
11494 +++ linux-2.6.32.41/arch/x86/kernel/asm-offsets_64.c 2011-05-16 21:46:57.000000000 -0400
11495 @@ -44,6 +44,8 @@ int main(void)
11496 ENTRY(addr_limit);
11497 ENTRY(preempt_count);
11498 ENTRY(status);
11499 + ENTRY(lowest_stack);
11500 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11501 #ifdef CONFIG_IA32_EMULATION
11502 ENTRY(sysenter_return);
11503 #endif
11504 @@ -63,6 +65,18 @@ int main(void)
11505 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11506 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
11507 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
11508 +
11509 +#ifdef CONFIG_PAX_KERNEXEC
11510 + OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11511 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11512 +#endif
11513 +
11514 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11515 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
11516 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
11517 + OFFSET(PV_MMU_set_pgd, pv_mmu_ops, set_pgd);
11518 +#endif
11519 +
11520 #endif
11521
11522
11523 @@ -115,6 +129,7 @@ int main(void)
11524 ENTRY(cr8);
11525 BLANK();
11526 #undef ENTRY
11527 + DEFINE(TSS_size, sizeof(struct tss_struct));
11528 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
11529 BLANK();
11530 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
11531 @@ -130,6 +145,7 @@ int main(void)
11532
11533 BLANK();
11534 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11535 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
11536 #ifdef CONFIG_XEN
11537 BLANK();
11538 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
11539 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/amd.c linux-2.6.32.41/arch/x86/kernel/cpu/amd.c
11540 --- linux-2.6.32.41/arch/x86/kernel/cpu/amd.c 2011-05-23 16:56:59.000000000 -0400
11541 +++ linux-2.6.32.41/arch/x86/kernel/cpu/amd.c 2011-05-23 16:57:13.000000000 -0400
11542 @@ -596,7 +596,7 @@ static unsigned int __cpuinit amd_size_c
11543 unsigned int size)
11544 {
11545 /* AMD errata T13 (order #21922) */
11546 - if ((c->x86 == 6)) {
11547 + if (c->x86 == 6) {
11548 /* Duron Rev A0 */
11549 if (c->x86_model == 3 && c->x86_mask == 0)
11550 size = 64;
11551 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/common.c linux-2.6.32.41/arch/x86/kernel/cpu/common.c
11552 --- linux-2.6.32.41/arch/x86/kernel/cpu/common.c 2011-03-27 14:31:47.000000000 -0400
11553 +++ linux-2.6.32.41/arch/x86/kernel/cpu/common.c 2011-05-11 18:25:15.000000000 -0400
11554 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
11555
11556 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
11557
11558 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
11559 -#ifdef CONFIG_X86_64
11560 - /*
11561 - * We need valid kernel segments for data and code in long mode too
11562 - * IRET will check the segment types kkeil 2000/10/28
11563 - * Also sysret mandates a special GDT layout
11564 - *
11565 - * TLS descriptors are currently at a different place compared to i386.
11566 - * Hopefully nobody expects them at a fixed place (Wine?)
11567 - */
11568 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
11569 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
11570 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
11571 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
11572 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
11573 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
11574 -#else
11575 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
11576 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11577 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
11578 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
11579 - /*
11580 - * Segments used for calling PnP BIOS have byte granularity.
11581 - * They code segments and data segments have fixed 64k limits,
11582 - * the transfer segment sizes are set at run time.
11583 - */
11584 - /* 32-bit code */
11585 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11586 - /* 16-bit code */
11587 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11588 - /* 16-bit data */
11589 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
11590 - /* 16-bit data */
11591 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
11592 - /* 16-bit data */
11593 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
11594 - /*
11595 - * The APM segments have byte granularity and their bases
11596 - * are set at run time. All have 64k limits.
11597 - */
11598 - /* 32-bit code */
11599 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11600 - /* 16-bit code */
11601 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11602 - /* data */
11603 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
11604 -
11605 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11606 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11607 - GDT_STACK_CANARY_INIT
11608 -#endif
11609 -} };
11610 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
11611 -
11612 static int __init x86_xsave_setup(char *s)
11613 {
11614 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
11615 @@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
11616 {
11617 struct desc_ptr gdt_descr;
11618
11619 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
11620 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
11621 gdt_descr.size = GDT_SIZE - 1;
11622 load_gdt(&gdt_descr);
11623 /* Reload the per-cpu base */
11624 @@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struc
11625 /* Filter out anything that depends on CPUID levels we don't have */
11626 filter_cpuid_features(c, true);
11627
11628 +#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
11629 + setup_clear_cpu_cap(X86_FEATURE_SEP);
11630 +#endif
11631 +
11632 /* If the model name is still unset, do table lookup. */
11633 if (!c->x86_model_id[0]) {
11634 const char *p;
11635 @@ -980,6 +930,9 @@ static __init int setup_disablecpuid(cha
11636 }
11637 __setup("clearcpuid=", setup_disablecpuid);
11638
11639 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
11640 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
11641 +
11642 #ifdef CONFIG_X86_64
11643 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
11644
11645 @@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
11646 EXPORT_PER_CPU_SYMBOL(current_task);
11647
11648 DEFINE_PER_CPU(unsigned long, kernel_stack) =
11649 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
11650 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
11651 EXPORT_PER_CPU_SYMBOL(kernel_stack);
11652
11653 DEFINE_PER_CPU(char *, irq_stack_ptr) =
11654 @@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(str
11655 {
11656 memset(regs, 0, sizeof(struct pt_regs));
11657 regs->fs = __KERNEL_PERCPU;
11658 - regs->gs = __KERNEL_STACK_CANARY;
11659 + savesegment(gs, regs->gs);
11660
11661 return regs;
11662 }
11663 @@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
11664 int i;
11665
11666 cpu = stack_smp_processor_id();
11667 - t = &per_cpu(init_tss, cpu);
11668 + t = init_tss + cpu;
11669 orig_ist = &per_cpu(orig_ist, cpu);
11670
11671 #ifdef CONFIG_NUMA
11672 @@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
11673 switch_to_new_gdt(cpu);
11674 loadsegment(fs, 0);
11675
11676 - load_idt((const struct desc_ptr *)&idt_descr);
11677 + load_idt(&idt_descr);
11678
11679 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
11680 syscall_init();
11681 @@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
11682 wrmsrl(MSR_KERNEL_GS_BASE, 0);
11683 barrier();
11684
11685 - check_efer();
11686 if (cpu != 0)
11687 enable_x2apic();
11688
11689 @@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
11690 {
11691 int cpu = smp_processor_id();
11692 struct task_struct *curr = current;
11693 - struct tss_struct *t = &per_cpu(init_tss, cpu);
11694 + struct tss_struct *t = init_tss + cpu;
11695 struct thread_struct *thread = &curr->thread;
11696
11697 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
11698 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/intel.c linux-2.6.32.41/arch/x86/kernel/cpu/intel.c
11699 --- linux-2.6.32.41/arch/x86/kernel/cpu/intel.c 2011-03-27 14:31:47.000000000 -0400
11700 +++ linux-2.6.32.41/arch/x86/kernel/cpu/intel.c 2011-04-17 15:56:46.000000000 -0400
11701 @@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug
11702 * Update the IDT descriptor and reload the IDT so that
11703 * it uses the read-only mapped virtual address.
11704 */
11705 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
11706 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
11707 load_idt(&idt_descr);
11708 }
11709 #endif
11710 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/intel_cacheinfo.c linux-2.6.32.41/arch/x86/kernel/cpu/intel_cacheinfo.c
11711 --- linux-2.6.32.41/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
11712 +++ linux-2.6.32.41/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-04-17 15:56:46.000000000 -0400
11713 @@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kob
11714 return ret;
11715 }
11716
11717 -static struct sysfs_ops sysfs_ops = {
11718 +static const struct sysfs_ops sysfs_ops = {
11719 .show = show,
11720 .store = store,
11721 };
11722 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/Makefile linux-2.6.32.41/arch/x86/kernel/cpu/Makefile
11723 --- linux-2.6.32.41/arch/x86/kernel/cpu/Makefile 2011-03-27 14:31:47.000000000 -0400
11724 +++ linux-2.6.32.41/arch/x86/kernel/cpu/Makefile 2011-04-17 15:56:46.000000000 -0400
11725 @@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
11726 CFLAGS_REMOVE_common.o = -pg
11727 endif
11728
11729 -# Make sure load_percpu_segment has no stackprotector
11730 -nostackp := $(call cc-option, -fno-stack-protector)
11731 -CFLAGS_common.o := $(nostackp)
11732 -
11733 obj-y := intel_cacheinfo.o addon_cpuid_features.o
11734 obj-y += proc.o capflags.o powerflags.o common.o
11735 obj-y += vmware.o hypervisor.o sched.o
11736 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/mcheck/mce_amd.c linux-2.6.32.41/arch/x86/kernel/cpu/mcheck/mce_amd.c
11737 --- linux-2.6.32.41/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:56:59.000000000 -0400
11738 +++ linux-2.6.32.41/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:57:13.000000000 -0400
11739 @@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kob
11740 return ret;
11741 }
11742
11743 -static struct sysfs_ops threshold_ops = {
11744 +static const struct sysfs_ops threshold_ops = {
11745 .show = show,
11746 .store = store,
11747 };
11748 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/mcheck/mce.c linux-2.6.32.41/arch/x86/kernel/cpu/mcheck/mce.c
11749 --- linux-2.6.32.41/arch/x86/kernel/cpu/mcheck/mce.c 2011-03-27 14:31:47.000000000 -0400
11750 +++ linux-2.6.32.41/arch/x86/kernel/cpu/mcheck/mce.c 2011-05-04 17:56:20.000000000 -0400
11751 @@ -43,6 +43,7 @@
11752 #include <asm/ipi.h>
11753 #include <asm/mce.h>
11754 #include <asm/msr.h>
11755 +#include <asm/local.h>
11756
11757 #include "mce-internal.h"
11758
11759 @@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
11760 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
11761 m->cs, m->ip);
11762
11763 - if (m->cs == __KERNEL_CS)
11764 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
11765 print_symbol("{%s}", m->ip);
11766 pr_cont("\n");
11767 }
11768 @@ -221,10 +222,10 @@ static void print_mce_tail(void)
11769
11770 #define PANIC_TIMEOUT 5 /* 5 seconds */
11771
11772 -static atomic_t mce_paniced;
11773 +static atomic_unchecked_t mce_paniced;
11774
11775 static int fake_panic;
11776 -static atomic_t mce_fake_paniced;
11777 +static atomic_unchecked_t mce_fake_paniced;
11778
11779 /* Panic in progress. Enable interrupts and wait for final IPI */
11780 static void wait_for_panic(void)
11781 @@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct
11782 /*
11783 * Make sure only one CPU runs in machine check panic
11784 */
11785 - if (atomic_inc_return(&mce_paniced) > 1)
11786 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
11787 wait_for_panic();
11788 barrier();
11789
11790 @@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct
11791 console_verbose();
11792 } else {
11793 /* Don't log too much for fake panic */
11794 - if (atomic_inc_return(&mce_fake_paniced) > 1)
11795 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
11796 return;
11797 }
11798 print_mce_head();
11799 @@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
11800 * might have been modified by someone else.
11801 */
11802 rmb();
11803 - if (atomic_read(&mce_paniced))
11804 + if (atomic_read_unchecked(&mce_paniced))
11805 wait_for_panic();
11806 if (!monarch_timeout)
11807 goto out;
11808 @@ -1429,14 +1430,14 @@ void __cpuinit mcheck_init(struct cpuinf
11809 */
11810
11811 static DEFINE_SPINLOCK(mce_state_lock);
11812 -static int open_count; /* #times opened */
11813 +static local_t open_count; /* #times opened */
11814 static int open_exclu; /* already open exclusive? */
11815
11816 static int mce_open(struct inode *inode, struct file *file)
11817 {
11818 spin_lock(&mce_state_lock);
11819
11820 - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
11821 + if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
11822 spin_unlock(&mce_state_lock);
11823
11824 return -EBUSY;
11825 @@ -1444,7 +1445,7 @@ static int mce_open(struct inode *inode,
11826
11827 if (file->f_flags & O_EXCL)
11828 open_exclu = 1;
11829 - open_count++;
11830 + local_inc(&open_count);
11831
11832 spin_unlock(&mce_state_lock);
11833
11834 @@ -1455,7 +1456,7 @@ static int mce_release(struct inode *ino
11835 {
11836 spin_lock(&mce_state_lock);
11837
11838 - open_count--;
11839 + local_dec(&open_count);
11840 open_exclu = 0;
11841
11842 spin_unlock(&mce_state_lock);
11843 @@ -2082,7 +2083,7 @@ struct dentry *mce_get_debugfs_dir(void)
11844 static void mce_reset(void)
11845 {
11846 cpu_missing = 0;
11847 - atomic_set(&mce_fake_paniced, 0);
11848 + atomic_set_unchecked(&mce_fake_paniced, 0);
11849 atomic_set(&mce_executing, 0);
11850 atomic_set(&mce_callin, 0);
11851 atomic_set(&global_nwo, 0);
11852 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/amd.c linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/amd.c
11853 --- linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/amd.c 2011-03-27 14:31:47.000000000 -0400
11854 +++ linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/amd.c 2011-04-17 15:56:46.000000000 -0400
11855 @@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base
11856 return 0;
11857 }
11858
11859 -static struct mtrr_ops amd_mtrr_ops = {
11860 +static const struct mtrr_ops amd_mtrr_ops = {
11861 .vendor = X86_VENDOR_AMD,
11862 .set = amd_set_mtrr,
11863 .get = amd_get_mtrr,
11864 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/centaur.c linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/centaur.c
11865 --- linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/centaur.c 2011-03-27 14:31:47.000000000 -0400
11866 +++ linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/centaur.c 2011-04-17 15:56:46.000000000 -0400
11867 @@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long
11868 return 0;
11869 }
11870
11871 -static struct mtrr_ops centaur_mtrr_ops = {
11872 +static const struct mtrr_ops centaur_mtrr_ops = {
11873 .vendor = X86_VENDOR_CENTAUR,
11874 .set = centaur_set_mcr,
11875 .get = centaur_get_mcr,
11876 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/cyrix.c linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/cyrix.c
11877 --- linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-03-27 14:31:47.000000000 -0400
11878 +++ linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-04-17 15:56:46.000000000 -0400
11879 @@ -265,7 +265,7 @@ static void cyrix_set_all(void)
11880 post_set();
11881 }
11882
11883 -static struct mtrr_ops cyrix_mtrr_ops = {
11884 +static const struct mtrr_ops cyrix_mtrr_ops = {
11885 .vendor = X86_VENDOR_CYRIX,
11886 .set_all = cyrix_set_all,
11887 .set = cyrix_set_arr,
11888 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/generic.c linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/generic.c
11889 --- linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/generic.c 2011-03-27 14:31:47.000000000 -0400
11890 +++ linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/generic.c 2011-04-23 12:56:10.000000000 -0400
11891 @@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
11892 /*
11893 * Generic structure...
11894 */
11895 -struct mtrr_ops generic_mtrr_ops = {
11896 +const struct mtrr_ops generic_mtrr_ops = {
11897 .use_intel_if = 1,
11898 .set_all = generic_set_all,
11899 .get = generic_get_mtrr,
11900 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/main.c linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/main.c
11901 --- linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:00:52.000000000 -0400
11902 +++ linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:03:05.000000000 -0400
11903 @@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
11904 u64 size_or_mask, size_and_mask;
11905 static bool mtrr_aps_delayed_init;
11906
11907 -static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
11908 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
11909
11910 -struct mtrr_ops *mtrr_if;
11911 +const struct mtrr_ops *mtrr_if;
11912
11913 static void set_mtrr(unsigned int reg, unsigned long base,
11914 unsigned long size, mtrr_type type);
11915
11916 -void set_mtrr_ops(struct mtrr_ops *ops)
11917 +void set_mtrr_ops(const struct mtrr_ops *ops)
11918 {
11919 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
11920 mtrr_ops[ops->vendor] = ops;
11921 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/mtrr.h linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/mtrr.h
11922 --- linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-03-27 14:31:47.000000000 -0400
11923 +++ linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-04-17 15:56:46.000000000 -0400
11924 @@ -12,19 +12,19 @@
11925 extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
11926
11927 struct mtrr_ops {
11928 - u32 vendor;
11929 - u32 use_intel_if;
11930 - void (*set)(unsigned int reg, unsigned long base,
11931 + const u32 vendor;
11932 + const u32 use_intel_if;
11933 + void (* const set)(unsigned int reg, unsigned long base,
11934 unsigned long size, mtrr_type type);
11935 - void (*set_all)(void);
11936 + void (* const set_all)(void);
11937
11938 - void (*get)(unsigned int reg, unsigned long *base,
11939 + void (* const get)(unsigned int reg, unsigned long *base,
11940 unsigned long *size, mtrr_type *type);
11941 - int (*get_free_region)(unsigned long base, unsigned long size,
11942 + int (* const get_free_region)(unsigned long base, unsigned long size,
11943 int replace_reg);
11944 - int (*validate_add_page)(unsigned long base, unsigned long size,
11945 + int (* const validate_add_page)(unsigned long base, unsigned long size,
11946 unsigned int type);
11947 - int (*have_wrcomb)(void);
11948 + int (* const have_wrcomb)(void);
11949 };
11950
11951 extern int generic_get_free_region(unsigned long base, unsigned long size,
11952 @@ -32,7 +32,7 @@ extern int generic_get_free_region(unsig
11953 extern int generic_validate_add_page(unsigned long base, unsigned long size,
11954 unsigned int type);
11955
11956 -extern struct mtrr_ops generic_mtrr_ops;
11957 +extern const struct mtrr_ops generic_mtrr_ops;
11958
11959 extern int positive_have_wrcomb(void);
11960
11961 @@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int in
11962 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
11963 void get_mtrr_state(void);
11964
11965 -extern void set_mtrr_ops(struct mtrr_ops *ops);
11966 +extern void set_mtrr_ops(const struct mtrr_ops *ops);
11967
11968 extern u64 size_or_mask, size_and_mask;
11969 -extern struct mtrr_ops *mtrr_if;
11970 +extern const struct mtrr_ops *mtrr_if;
11971
11972 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
11973 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
11974 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/perfctr-watchdog.c linux-2.6.32.41/arch/x86/kernel/cpu/perfctr-watchdog.c
11975 --- linux-2.6.32.41/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-03-27 14:31:47.000000000 -0400
11976 +++ linux-2.6.32.41/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-04-17 15:56:46.000000000 -0400
11977 @@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
11978
11979 /* Interface defining a CPU specific perfctr watchdog */
11980 struct wd_ops {
11981 - int (*reserve)(void);
11982 - void (*unreserve)(void);
11983 - int (*setup)(unsigned nmi_hz);
11984 - void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
11985 - void (*stop)(void);
11986 + int (* const reserve)(void);
11987 + void (* const unreserve)(void);
11988 + int (* const setup)(unsigned nmi_hz);
11989 + void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
11990 + void (* const stop)(void);
11991 unsigned perfctr;
11992 unsigned evntsel;
11993 u64 checkbit;
11994 @@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
11995 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
11996 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
11997
11998 +/* cannot be const */
11999 static struct wd_ops intel_arch_wd_ops;
12000
12001 static int setup_intel_arch_watchdog(unsigned nmi_hz)
12002 @@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(uns
12003 return 1;
12004 }
12005
12006 +/* cannot be const */
12007 static struct wd_ops intel_arch_wd_ops __read_mostly = {
12008 .reserve = single_msr_reserve,
12009 .unreserve = single_msr_unreserve,
12010 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/perf_event.c linux-2.6.32.41/arch/x86/kernel/cpu/perf_event.c
12011 --- linux-2.6.32.41/arch/x86/kernel/cpu/perf_event.c 2011-03-27 14:31:47.000000000 -0400
12012 +++ linux-2.6.32.41/arch/x86/kernel/cpu/perf_event.c 2011-05-04 17:56:20.000000000 -0400
12013 @@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event
12014 * count to the generic event atomically:
12015 */
12016 again:
12017 - prev_raw_count = atomic64_read(&hwc->prev_count);
12018 + prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
12019 rdmsrl(hwc->event_base + idx, new_raw_count);
12020
12021 - if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
12022 + if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
12023 new_raw_count) != prev_raw_count)
12024 goto again;
12025
12026 @@ -741,7 +741,7 @@ again:
12027 delta = (new_raw_count << shift) - (prev_raw_count << shift);
12028 delta >>= shift;
12029
12030 - atomic64_add(delta, &event->count);
12031 + atomic64_add_unchecked(delta, &event->count);
12032 atomic64_sub(delta, &hwc->period_left);
12033
12034 return new_raw_count;
12035 @@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_ev
12036 * The hw event starts counting from this event offset,
12037 * mark it to be able to extra future deltas:
12038 */
12039 - atomic64_set(&hwc->prev_count, (u64)-left);
12040 + atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
12041
12042 err = checking_wrmsrl(hwc->event_base + idx,
12043 (u64)(-left) & x86_pmu.event_mask);
12044 @@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs
12045 break;
12046
12047 callchain_store(entry, frame.return_address);
12048 - fp = frame.next_frame;
12049 + fp = (__force const void __user *)frame.next_frame;
12050 }
12051 }
12052
12053 diff -urNp linux-2.6.32.41/arch/x86/kernel/crash.c linux-2.6.32.41/arch/x86/kernel/crash.c
12054 --- linux-2.6.32.41/arch/x86/kernel/crash.c 2011-03-27 14:31:47.000000000 -0400
12055 +++ linux-2.6.32.41/arch/x86/kernel/crash.c 2011-04-17 15:56:46.000000000 -0400
12056 @@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu,
12057 regs = args->regs;
12058
12059 #ifdef CONFIG_X86_32
12060 - if (!user_mode_vm(regs)) {
12061 + if (!user_mode(regs)) {
12062 crash_fixup_ss_esp(&fixed_regs, regs);
12063 regs = &fixed_regs;
12064 }
12065 diff -urNp linux-2.6.32.41/arch/x86/kernel/doublefault_32.c linux-2.6.32.41/arch/x86/kernel/doublefault_32.c
12066 --- linux-2.6.32.41/arch/x86/kernel/doublefault_32.c 2011-03-27 14:31:47.000000000 -0400
12067 +++ linux-2.6.32.41/arch/x86/kernel/doublefault_32.c 2011-04-17 15:56:46.000000000 -0400
12068 @@ -11,7 +11,7 @@
12069
12070 #define DOUBLEFAULT_STACKSIZE (1024)
12071 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12072 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12073 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12074
12075 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12076
12077 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
12078 unsigned long gdt, tss;
12079
12080 store_gdt(&gdt_desc);
12081 - gdt = gdt_desc.address;
12082 + gdt = (unsigned long)gdt_desc.address;
12083
12084 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12085
12086 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
12087 /* 0x2 bit is always set */
12088 .flags = X86_EFLAGS_SF | 0x2,
12089 .sp = STACK_START,
12090 - .es = __USER_DS,
12091 + .es = __KERNEL_DS,
12092 .cs = __KERNEL_CS,
12093 .ss = __KERNEL_DS,
12094 - .ds = __USER_DS,
12095 + .ds = __KERNEL_DS,
12096 .fs = __KERNEL_PERCPU,
12097
12098 .__cr3 = __pa_nodebug(swapper_pg_dir),
12099 diff -urNp linux-2.6.32.41/arch/x86/kernel/dumpstack_32.c linux-2.6.32.41/arch/x86/kernel/dumpstack_32.c
12100 --- linux-2.6.32.41/arch/x86/kernel/dumpstack_32.c 2011-03-27 14:31:47.000000000 -0400
12101 +++ linux-2.6.32.41/arch/x86/kernel/dumpstack_32.c 2011-04-17 15:56:46.000000000 -0400
12102 @@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task
12103 #endif
12104
12105 for (;;) {
12106 - struct thread_info *context;
12107 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12108 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12109
12110 - context = (struct thread_info *)
12111 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12112 - bp = print_context_stack(context, stack, bp, ops,
12113 - data, NULL, &graph);
12114 -
12115 - stack = (unsigned long *)context->previous_esp;
12116 - if (!stack)
12117 + if (stack_start == task_stack_page(task))
12118 break;
12119 + stack = *(unsigned long **)stack_start;
12120 if (ops->stack(data, "IRQ") < 0)
12121 break;
12122 touch_nmi_watchdog();
12123 @@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs
12124 * When in-kernel, we also print out the stack and code at the
12125 * time of the fault..
12126 */
12127 - if (!user_mode_vm(regs)) {
12128 + if (!user_mode(regs)) {
12129 unsigned int code_prologue = code_bytes * 43 / 64;
12130 unsigned int code_len = code_bytes;
12131 unsigned char c;
12132 u8 *ip;
12133 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12134
12135 printk(KERN_EMERG "Stack:\n");
12136 show_stack_log_lvl(NULL, regs, &regs->sp,
12137 @@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs
12138
12139 printk(KERN_EMERG "Code: ");
12140
12141 - ip = (u8 *)regs->ip - code_prologue;
12142 + ip = (u8 *)regs->ip - code_prologue + cs_base;
12143 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12144 /* try starting at IP */
12145 - ip = (u8 *)regs->ip;
12146 + ip = (u8 *)regs->ip + cs_base;
12147 code_len = code_len - code_prologue + 1;
12148 }
12149 for (i = 0; i < code_len; i++, ip++) {
12150 @@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs
12151 printk(" Bad EIP value.");
12152 break;
12153 }
12154 - if (ip == (u8 *)regs->ip)
12155 + if (ip == (u8 *)regs->ip + cs_base)
12156 printk("<%02x> ", c);
12157 else
12158 printk("%02x ", c);
12159 @@ -149,6 +146,7 @@ int is_valid_bugaddr(unsigned long ip)
12160 {
12161 unsigned short ud2;
12162
12163 + ip = ktla_ktva(ip);
12164 if (ip < PAGE_OFFSET)
12165 return 0;
12166 if (probe_kernel_address((unsigned short *)ip, ud2))
12167 diff -urNp linux-2.6.32.41/arch/x86/kernel/dumpstack_64.c linux-2.6.32.41/arch/x86/kernel/dumpstack_64.c
12168 --- linux-2.6.32.41/arch/x86/kernel/dumpstack_64.c 2011-03-27 14:31:47.000000000 -0400
12169 +++ linux-2.6.32.41/arch/x86/kernel/dumpstack_64.c 2011-04-17 15:56:46.000000000 -0400
12170 @@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task
12171 unsigned long *irq_stack_end =
12172 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12173 unsigned used = 0;
12174 - struct thread_info *tinfo;
12175 int graph = 0;
12176 + void *stack_start;
12177
12178 if (!task)
12179 task = current;
12180 @@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task
12181 * current stack address. If the stacks consist of nested
12182 * exceptions
12183 */
12184 - tinfo = task_thread_info(task);
12185 for (;;) {
12186 char *id;
12187 unsigned long *estack_end;
12188 +
12189 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12190 &used, &id);
12191
12192 @@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task
12193 if (ops->stack(data, id) < 0)
12194 break;
12195
12196 - bp = print_context_stack(tinfo, stack, bp, ops,
12197 + bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12198 data, estack_end, &graph);
12199 ops->stack(data, "<EOE>");
12200 /*
12201 @@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task
12202 if (stack >= irq_stack && stack < irq_stack_end) {
12203 if (ops->stack(data, "IRQ") < 0)
12204 break;
12205 - bp = print_context_stack(tinfo, stack, bp,
12206 + bp = print_context_stack(task, irq_stack, stack, bp,
12207 ops, data, irq_stack_end, &graph);
12208 /*
12209 * We link to the next stack (which would be
12210 @@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task
12211 /*
12212 * This handles the process stack:
12213 */
12214 - bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12215 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12216 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12217 put_cpu();
12218 }
12219 EXPORT_SYMBOL(dump_trace);
12220 diff -urNp linux-2.6.32.41/arch/x86/kernel/dumpstack.c linux-2.6.32.41/arch/x86/kernel/dumpstack.c
12221 --- linux-2.6.32.41/arch/x86/kernel/dumpstack.c 2011-03-27 14:31:47.000000000 -0400
12222 +++ linux-2.6.32.41/arch/x86/kernel/dumpstack.c 2011-04-17 15:56:46.000000000 -0400
12223 @@ -2,6 +2,9 @@
12224 * Copyright (C) 1991, 1992 Linus Torvalds
12225 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12226 */
12227 +#ifdef CONFIG_GRKERNSEC_HIDESYM
12228 +#define __INCLUDED_BY_HIDESYM 1
12229 +#endif
12230 #include <linux/kallsyms.h>
12231 #include <linux/kprobes.h>
12232 #include <linux/uaccess.h>
12233 @@ -28,7 +31,7 @@ static int die_counter;
12234
12235 void printk_address(unsigned long address, int reliable)
12236 {
12237 - printk(" [<%p>] %s%pS\n", (void *) address,
12238 + printk(" [<%p>] %s%pA\n", (void *) address,
12239 reliable ? "" : "? ", (void *) address);
12240 }
12241
12242 @@ -36,9 +39,8 @@ void printk_address(unsigned long addres
12243 static void
12244 print_ftrace_graph_addr(unsigned long addr, void *data,
12245 const struct stacktrace_ops *ops,
12246 - struct thread_info *tinfo, int *graph)
12247 + struct task_struct *task, int *graph)
12248 {
12249 - struct task_struct *task = tinfo->task;
12250 unsigned long ret_addr;
12251 int index = task->curr_ret_stack;
12252
12253 @@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long ad
12254 static inline void
12255 print_ftrace_graph_addr(unsigned long addr, void *data,
12256 const struct stacktrace_ops *ops,
12257 - struct thread_info *tinfo, int *graph)
12258 + struct task_struct *task, int *graph)
12259 { }
12260 #endif
12261
12262 @@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long ad
12263 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12264 */
12265
12266 -static inline int valid_stack_ptr(struct thread_info *tinfo,
12267 - void *p, unsigned int size, void *end)
12268 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12269 {
12270 - void *t = tinfo;
12271 if (end) {
12272 if (p < end && p >= (end-THREAD_SIZE))
12273 return 1;
12274 @@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct
12275 }
12276
12277 unsigned long
12278 -print_context_stack(struct thread_info *tinfo,
12279 +print_context_stack(struct task_struct *task, void *stack_start,
12280 unsigned long *stack, unsigned long bp,
12281 const struct stacktrace_ops *ops, void *data,
12282 unsigned long *end, int *graph)
12283 {
12284 struct stack_frame *frame = (struct stack_frame *)bp;
12285
12286 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12287 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12288 unsigned long addr;
12289
12290 addr = *stack;
12291 @@ -103,7 +103,7 @@ print_context_stack(struct thread_info *
12292 } else {
12293 ops->address(data, addr, 0);
12294 }
12295 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12296 + print_ftrace_graph_addr(addr, data, ops, task, graph);
12297 }
12298 stack++;
12299 }
12300 @@ -180,7 +180,7 @@ void dump_stack(void)
12301 #endif
12302
12303 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
12304 - current->pid, current->comm, print_tainted(),
12305 + task_pid_nr(current), current->comm, print_tainted(),
12306 init_utsname()->release,
12307 (int)strcspn(init_utsname()->version, " "),
12308 init_utsname()->version);
12309 @@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
12310 return flags;
12311 }
12312
12313 +extern void gr_handle_kernel_exploit(void);
12314 +
12315 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12316 {
12317 if (regs && kexec_should_crash(current))
12318 @@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long fl
12319 panic("Fatal exception in interrupt");
12320 if (panic_on_oops)
12321 panic("Fatal exception");
12322 - do_exit(signr);
12323 +
12324 + gr_handle_kernel_exploit();
12325 +
12326 + do_group_exit(signr);
12327 }
12328
12329 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12330 @@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs
12331 unsigned long flags = oops_begin();
12332 int sig = SIGSEGV;
12333
12334 - if (!user_mode_vm(regs))
12335 + if (!user_mode(regs))
12336 report_bug(regs->ip, regs);
12337
12338 if (__die(str, regs, err))
12339 diff -urNp linux-2.6.32.41/arch/x86/kernel/dumpstack.h linux-2.6.32.41/arch/x86/kernel/dumpstack.h
12340 --- linux-2.6.32.41/arch/x86/kernel/dumpstack.h 2011-03-27 14:31:47.000000000 -0400
12341 +++ linux-2.6.32.41/arch/x86/kernel/dumpstack.h 2011-04-23 13:25:26.000000000 -0400
12342 @@ -15,7 +15,7 @@
12343 #endif
12344
12345 extern unsigned long
12346 -print_context_stack(struct thread_info *tinfo,
12347 +print_context_stack(struct task_struct *task, void *stack_start,
12348 unsigned long *stack, unsigned long bp,
12349 const struct stacktrace_ops *ops, void *data,
12350 unsigned long *end, int *graph);
12351 diff -urNp linux-2.6.32.41/arch/x86/kernel/e820.c linux-2.6.32.41/arch/x86/kernel/e820.c
12352 --- linux-2.6.32.41/arch/x86/kernel/e820.c 2011-03-27 14:31:47.000000000 -0400
12353 +++ linux-2.6.32.41/arch/x86/kernel/e820.c 2011-04-17 15:56:46.000000000 -0400
12354 @@ -733,7 +733,7 @@ struct early_res {
12355 };
12356 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
12357 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
12358 - {}
12359 + { 0, 0, {0}, 0 }
12360 };
12361
12362 static int __init find_overlapped_early(u64 start, u64 end)
12363 diff -urNp linux-2.6.32.41/arch/x86/kernel/early_printk.c linux-2.6.32.41/arch/x86/kernel/early_printk.c
12364 --- linux-2.6.32.41/arch/x86/kernel/early_printk.c 2011-03-27 14:31:47.000000000 -0400
12365 +++ linux-2.6.32.41/arch/x86/kernel/early_printk.c 2011-05-16 21:46:57.000000000 -0400
12366 @@ -7,6 +7,7 @@
12367 #include <linux/pci_regs.h>
12368 #include <linux/pci_ids.h>
12369 #include <linux/errno.h>
12370 +#include <linux/sched.h>
12371 #include <asm/io.h>
12372 #include <asm/processor.h>
12373 #include <asm/fcntl.h>
12374 @@ -170,6 +171,8 @@ asmlinkage void early_printk(const char
12375 int n;
12376 va_list ap;
12377
12378 + pax_track_stack();
12379 +
12380 va_start(ap, fmt);
12381 n = vscnprintf(buf, sizeof(buf), fmt, ap);
12382 early_console->write(early_console, buf, n);
12383 diff -urNp linux-2.6.32.41/arch/x86/kernel/efi_32.c linux-2.6.32.41/arch/x86/kernel/efi_32.c
12384 --- linux-2.6.32.41/arch/x86/kernel/efi_32.c 2011-03-27 14:31:47.000000000 -0400
12385 +++ linux-2.6.32.41/arch/x86/kernel/efi_32.c 2011-04-17 15:56:46.000000000 -0400
12386 @@ -38,70 +38,38 @@
12387 */
12388
12389 static unsigned long efi_rt_eflags;
12390 -static pgd_t efi_bak_pg_dir_pointer[2];
12391 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
12392
12393 -void efi_call_phys_prelog(void)
12394 +void __init efi_call_phys_prelog(void)
12395 {
12396 - unsigned long cr4;
12397 - unsigned long temp;
12398 struct desc_ptr gdt_descr;
12399
12400 local_irq_save(efi_rt_eflags);
12401
12402 - /*
12403 - * If I don't have PAE, I should just duplicate two entries in page
12404 - * directory. If I have PAE, I just need to duplicate one entry in
12405 - * page directory.
12406 - */
12407 - cr4 = read_cr4_safe();
12408
12409 - if (cr4 & X86_CR4_PAE) {
12410 - efi_bak_pg_dir_pointer[0].pgd =
12411 - swapper_pg_dir[pgd_index(0)].pgd;
12412 - swapper_pg_dir[0].pgd =
12413 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
12414 - } else {
12415 - efi_bak_pg_dir_pointer[0].pgd =
12416 - swapper_pg_dir[pgd_index(0)].pgd;
12417 - efi_bak_pg_dir_pointer[1].pgd =
12418 - swapper_pg_dir[pgd_index(0x400000)].pgd;
12419 - swapper_pg_dir[pgd_index(0)].pgd =
12420 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
12421 - temp = PAGE_OFFSET + 0x400000;
12422 - swapper_pg_dir[pgd_index(0x400000)].pgd =
12423 - swapper_pg_dir[pgd_index(temp)].pgd;
12424 - }
12425 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
12426 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
12427 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
12428
12429 /*
12430 * After the lock is released, the original page table is restored.
12431 */
12432 __flush_tlb_all();
12433
12434 - gdt_descr.address = __pa(get_cpu_gdt_table(0));
12435 + gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
12436 gdt_descr.size = GDT_SIZE - 1;
12437 load_gdt(&gdt_descr);
12438 }
12439
12440 -void efi_call_phys_epilog(void)
12441 +void __init efi_call_phys_epilog(void)
12442 {
12443 - unsigned long cr4;
12444 struct desc_ptr gdt_descr;
12445
12446 - gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
12447 + gdt_descr.address = get_cpu_gdt_table(0);
12448 gdt_descr.size = GDT_SIZE - 1;
12449 load_gdt(&gdt_descr);
12450
12451 - cr4 = read_cr4_safe();
12452 -
12453 - if (cr4 & X86_CR4_PAE) {
12454 - swapper_pg_dir[pgd_index(0)].pgd =
12455 - efi_bak_pg_dir_pointer[0].pgd;
12456 - } else {
12457 - swapper_pg_dir[pgd_index(0)].pgd =
12458 - efi_bak_pg_dir_pointer[0].pgd;
12459 - swapper_pg_dir[pgd_index(0x400000)].pgd =
12460 - efi_bak_pg_dir_pointer[1].pgd;
12461 - }
12462 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
12463
12464 /*
12465 * After the lock is released, the original page table is restored.
12466 diff -urNp linux-2.6.32.41/arch/x86/kernel/efi_stub_32.S linux-2.6.32.41/arch/x86/kernel/efi_stub_32.S
12467 --- linux-2.6.32.41/arch/x86/kernel/efi_stub_32.S 2011-03-27 14:31:47.000000000 -0400
12468 +++ linux-2.6.32.41/arch/x86/kernel/efi_stub_32.S 2011-04-17 15:56:46.000000000 -0400
12469 @@ -6,6 +6,7 @@
12470 */
12471
12472 #include <linux/linkage.h>
12473 +#include <linux/init.h>
12474 #include <asm/page_types.h>
12475
12476 /*
12477 @@ -20,7 +21,7 @@
12478 * service functions will comply with gcc calling convention, too.
12479 */
12480
12481 -.text
12482 +__INIT
12483 ENTRY(efi_call_phys)
12484 /*
12485 * 0. The function can only be called in Linux kernel. So CS has been
12486 @@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
12487 * The mapping of lower virtual memory has been created in prelog and
12488 * epilog.
12489 */
12490 - movl $1f, %edx
12491 - subl $__PAGE_OFFSET, %edx
12492 - jmp *%edx
12493 + jmp 1f-__PAGE_OFFSET
12494 1:
12495
12496 /*
12497 @@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
12498 * parameter 2, ..., param n. To make things easy, we save the return
12499 * address of efi_call_phys in a global variable.
12500 */
12501 - popl %edx
12502 - movl %edx, saved_return_addr
12503 - /* get the function pointer into ECX*/
12504 - popl %ecx
12505 - movl %ecx, efi_rt_function_ptr
12506 - movl $2f, %edx
12507 - subl $__PAGE_OFFSET, %edx
12508 - pushl %edx
12509 + popl (saved_return_addr)
12510 + popl (efi_rt_function_ptr)
12511
12512 /*
12513 * 3. Clear PG bit in %CR0.
12514 @@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
12515 /*
12516 * 5. Call the physical function.
12517 */
12518 - jmp *%ecx
12519 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
12520
12521 -2:
12522 /*
12523 * 6. After EFI runtime service returns, control will return to
12524 * following instruction. We'd better readjust stack pointer first.
12525 @@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
12526 movl %cr0, %edx
12527 orl $0x80000000, %edx
12528 movl %edx, %cr0
12529 - jmp 1f
12530 -1:
12531 +
12532 /*
12533 * 8. Now restore the virtual mode from flat mode by
12534 * adding EIP with PAGE_OFFSET.
12535 */
12536 - movl $1f, %edx
12537 - jmp *%edx
12538 + jmp 1f+__PAGE_OFFSET
12539 1:
12540
12541 /*
12542 * 9. Balance the stack. And because EAX contain the return value,
12543 * we'd better not clobber it.
12544 */
12545 - leal efi_rt_function_ptr, %edx
12546 - movl (%edx), %ecx
12547 - pushl %ecx
12548 + pushl (efi_rt_function_ptr)
12549
12550 /*
12551 - * 10. Push the saved return address onto the stack and return.
12552 + * 10. Return to the saved return address.
12553 */
12554 - leal saved_return_addr, %edx
12555 - movl (%edx), %ecx
12556 - pushl %ecx
12557 - ret
12558 + jmpl *(saved_return_addr)
12559 ENDPROC(efi_call_phys)
12560 .previous
12561
12562 -.data
12563 +__INITDATA
12564 saved_return_addr:
12565 .long 0
12566 efi_rt_function_ptr:
12567 diff -urNp linux-2.6.32.41/arch/x86/kernel/entry_32.S linux-2.6.32.41/arch/x86/kernel/entry_32.S
12568 --- linux-2.6.32.41/arch/x86/kernel/entry_32.S 2011-03-27 14:31:47.000000000 -0400
12569 +++ linux-2.6.32.41/arch/x86/kernel/entry_32.S 2011-05-22 23:02:03.000000000 -0400
12570 @@ -185,13 +185,146 @@
12571 /*CFI_REL_OFFSET gs, PT_GS*/
12572 .endm
12573 .macro SET_KERNEL_GS reg
12574 +
12575 +#ifdef CONFIG_CC_STACKPROTECTOR
12576 movl $(__KERNEL_STACK_CANARY), \reg
12577 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12578 + movl $(__USER_DS), \reg
12579 +#else
12580 + xorl \reg, \reg
12581 +#endif
12582 +
12583 movl \reg, %gs
12584 .endm
12585
12586 #endif /* CONFIG_X86_32_LAZY_GS */
12587
12588 -.macro SAVE_ALL
12589 +.macro pax_enter_kernel
12590 +#ifdef CONFIG_PAX_KERNEXEC
12591 + call pax_enter_kernel
12592 +#endif
12593 +.endm
12594 +
12595 +.macro pax_exit_kernel
12596 +#ifdef CONFIG_PAX_KERNEXEC
12597 + call pax_exit_kernel
12598 +#endif
12599 +.endm
12600 +
12601 +#ifdef CONFIG_PAX_KERNEXEC
12602 +ENTRY(pax_enter_kernel)
12603 +#ifdef CONFIG_PARAVIRT
12604 + pushl %eax
12605 + pushl %ecx
12606 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
12607 + mov %eax, %esi
12608 +#else
12609 + mov %cr0, %esi
12610 +#endif
12611 + bts $16, %esi
12612 + jnc 1f
12613 + mov %cs, %esi
12614 + cmp $__KERNEL_CS, %esi
12615 + jz 3f
12616 + ljmp $__KERNEL_CS, $3f
12617 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
12618 +2:
12619 +#ifdef CONFIG_PARAVIRT
12620 + mov %esi, %eax
12621 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
12622 +#else
12623 + mov %esi, %cr0
12624 +#endif
12625 +3:
12626 +#ifdef CONFIG_PARAVIRT
12627 + popl %ecx
12628 + popl %eax
12629 +#endif
12630 + ret
12631 +ENDPROC(pax_enter_kernel)
12632 +
12633 +ENTRY(pax_exit_kernel)
12634 +#ifdef CONFIG_PARAVIRT
12635 + pushl %eax
12636 + pushl %ecx
12637 +#endif
12638 + mov %cs, %esi
12639 + cmp $__KERNEXEC_KERNEL_CS, %esi
12640 + jnz 2f
12641 +#ifdef CONFIG_PARAVIRT
12642 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
12643 + mov %eax, %esi
12644 +#else
12645 + mov %cr0, %esi
12646 +#endif
12647 + btr $16, %esi
12648 + ljmp $__KERNEL_CS, $1f
12649 +1:
12650 +#ifdef CONFIG_PARAVIRT
12651 + mov %esi, %eax
12652 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
12653 +#else
12654 + mov %esi, %cr0
12655 +#endif
12656 +2:
12657 +#ifdef CONFIG_PARAVIRT
12658 + popl %ecx
12659 + popl %eax
12660 +#endif
12661 + ret
12662 +ENDPROC(pax_exit_kernel)
12663 +#endif
12664 +
12665 +.macro pax_erase_kstack
12666 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12667 + call pax_erase_kstack
12668 +#endif
12669 +.endm
12670 +
12671 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12672 +/*
12673 + * ebp: thread_info
12674 + * ecx, edx: can be clobbered
12675 + */
12676 +ENTRY(pax_erase_kstack)
12677 + pushl %edi
12678 + pushl %eax
12679 +
12680 + mov TI_lowest_stack(%ebp), %edi
12681 + mov $-0xBEEF, %eax
12682 + std
12683 +
12684 +1: mov %edi, %ecx
12685 + and $THREAD_SIZE_asm - 1, %ecx
12686 + shr $2, %ecx
12687 + repne scasl
12688 + jecxz 2f
12689 +
12690 + cmp $2*16, %ecx
12691 + jc 2f
12692 +
12693 + mov $2*16, %ecx
12694 + repe scasl
12695 + jecxz 2f
12696 + jne 1b
12697 +
12698 +2: cld
12699 + mov %esp, %ecx
12700 + sub %edi, %ecx
12701 + shr $2, %ecx
12702 + rep stosl
12703 +
12704 + mov TI_task_thread_sp0(%ebp), %edi
12705 + sub $128, %edi
12706 + mov %edi, TI_lowest_stack(%ebp)
12707 +
12708 + popl %eax
12709 + popl %edi
12710 + ret
12711 +ENDPROC(pax_erase_kstack)
12712 +#endif
12713 +
12714 +.macro __SAVE_ALL _DS
12715 cld
12716 PUSH_GS
12717 pushl %fs
12718 @@ -224,7 +357,7 @@
12719 pushl %ebx
12720 CFI_ADJUST_CFA_OFFSET 4
12721 CFI_REL_OFFSET ebx, 0
12722 - movl $(__USER_DS), %edx
12723 + movl $\_DS, %edx
12724 movl %edx, %ds
12725 movl %edx, %es
12726 movl $(__KERNEL_PERCPU), %edx
12727 @@ -232,6 +365,15 @@
12728 SET_KERNEL_GS %edx
12729 .endm
12730
12731 +.macro SAVE_ALL
12732 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
12733 + __SAVE_ALL __KERNEL_DS
12734 + pax_enter_kernel
12735 +#else
12736 + __SAVE_ALL __USER_DS
12737 +#endif
12738 +.endm
12739 +
12740 .macro RESTORE_INT_REGS
12741 popl %ebx
12742 CFI_ADJUST_CFA_OFFSET -4
12743 @@ -352,7 +494,15 @@ check_userspace:
12744 movb PT_CS(%esp), %al
12745 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
12746 cmpl $USER_RPL, %eax
12747 +
12748 +#ifdef CONFIG_PAX_KERNEXEC
12749 + jae resume_userspace
12750 +
12751 + PAX_EXIT_KERNEL
12752 + jmp resume_kernel
12753 +#else
12754 jb resume_kernel # not returning to v8086 or userspace
12755 +#endif
12756
12757 ENTRY(resume_userspace)
12758 LOCKDEP_SYS_EXIT
12759 @@ -364,7 +514,7 @@ ENTRY(resume_userspace)
12760 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
12761 # int/exception return?
12762 jne work_pending
12763 - jmp restore_all
12764 + jmp restore_all_pax
12765 END(ret_from_exception)
12766
12767 #ifdef CONFIG_PREEMPT
12768 @@ -414,25 +564,36 @@ sysenter_past_esp:
12769 /*CFI_REL_OFFSET cs, 0*/
12770 /*
12771 * Push current_thread_info()->sysenter_return to the stack.
12772 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
12773 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
12774 */
12775 - pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
12776 + pushl $0
12777 CFI_ADJUST_CFA_OFFSET 4
12778 CFI_REL_OFFSET eip, 0
12779
12780 pushl %eax
12781 CFI_ADJUST_CFA_OFFSET 4
12782 SAVE_ALL
12783 + GET_THREAD_INFO(%ebp)
12784 + movl TI_sysenter_return(%ebp),%ebp
12785 + movl %ebp,PT_EIP(%esp)
12786 ENABLE_INTERRUPTS(CLBR_NONE)
12787
12788 /*
12789 * Load the potential sixth argument from user stack.
12790 * Careful about security.
12791 */
12792 + movl PT_OLDESP(%esp),%ebp
12793 +
12794 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12795 + mov PT_OLDSS(%esp),%ds
12796 +1: movl %ds:(%ebp),%ebp
12797 + push %ss
12798 + pop %ds
12799 +#else
12800 cmpl $__PAGE_OFFSET-3,%ebp
12801 jae syscall_fault
12802 1: movl (%ebp),%ebp
12803 +#endif
12804 +
12805 movl %ebp,PT_EBP(%esp)
12806 .section __ex_table,"a"
12807 .align 4
12808 @@ -455,12 +616,23 @@ sysenter_do_call:
12809 testl $_TIF_ALLWORK_MASK, %ecx
12810 jne sysexit_audit
12811 sysenter_exit:
12812 +
12813 +#ifdef CONFIG_PAX_RANDKSTACK
12814 + pushl_cfi %eax
12815 + call pax_randomize_kstack
12816 + popl_cfi %eax
12817 +#endif
12818 +
12819 + pax_erase_kstack
12820 +
12821 /* if something modifies registers it must also disable sysexit */
12822 movl PT_EIP(%esp), %edx
12823 movl PT_OLDESP(%esp), %ecx
12824 xorl %ebp,%ebp
12825 TRACE_IRQS_ON
12826 1: mov PT_FS(%esp), %fs
12827 +2: mov PT_DS(%esp), %ds
12828 +3: mov PT_ES(%esp), %es
12829 PTGS_TO_GS
12830 ENABLE_INTERRUPTS_SYSEXIT
12831
12832 @@ -477,6 +649,9 @@ sysenter_audit:
12833 movl %eax,%edx /* 2nd arg: syscall number */
12834 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
12835 call audit_syscall_entry
12836 +
12837 + pax_erase_kstack
12838 +
12839 pushl %ebx
12840 CFI_ADJUST_CFA_OFFSET 4
12841 movl PT_EAX(%esp),%eax /* reload syscall number */
12842 @@ -504,11 +679,17 @@ sysexit_audit:
12843
12844 CFI_ENDPROC
12845 .pushsection .fixup,"ax"
12846 -2: movl $0,PT_FS(%esp)
12847 +4: movl $0,PT_FS(%esp)
12848 + jmp 1b
12849 +5: movl $0,PT_DS(%esp)
12850 + jmp 1b
12851 +6: movl $0,PT_ES(%esp)
12852 jmp 1b
12853 .section __ex_table,"a"
12854 .align 4
12855 - .long 1b,2b
12856 + .long 1b,4b
12857 + .long 2b,5b
12858 + .long 3b,6b
12859 .popsection
12860 PTGS_TO_GS_EX
12861 ENDPROC(ia32_sysenter_target)
12862 @@ -538,6 +719,14 @@ syscall_exit:
12863 testl $_TIF_ALLWORK_MASK, %ecx # current->work
12864 jne syscall_exit_work
12865
12866 +restore_all_pax:
12867 +
12868 +#ifdef CONFIG_PAX_RANDKSTACK
12869 + call pax_randomize_kstack
12870 +#endif
12871 +
12872 + pax_erase_kstack
12873 +
12874 restore_all:
12875 TRACE_IRQS_IRET
12876 restore_all_notrace:
12877 @@ -602,7 +791,13 @@ ldt_ss:
12878 mov PT_OLDESP(%esp), %eax /* load userspace esp */
12879 mov %dx, %ax /* eax: new kernel esp */
12880 sub %eax, %edx /* offset (low word is 0) */
12881 - PER_CPU(gdt_page, %ebx)
12882 +#ifdef CONFIG_SMP
12883 + movl PER_CPU_VAR(cpu_number), %ebx
12884 + shll $PAGE_SHIFT_asm, %ebx
12885 + addl $cpu_gdt_table, %ebx
12886 +#else
12887 + movl $cpu_gdt_table, %ebx
12888 +#endif
12889 shr $16, %edx
12890 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
12891 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
12892 @@ -636,31 +831,25 @@ work_resched:
12893 movl TI_flags(%ebp), %ecx
12894 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
12895 # than syscall tracing?
12896 - jz restore_all
12897 + jz restore_all_pax
12898 testb $_TIF_NEED_RESCHED, %cl
12899 jnz work_resched
12900
12901 work_notifysig: # deal with pending signals and
12902 # notify-resume requests
12903 + movl %esp, %eax
12904 #ifdef CONFIG_VM86
12905 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
12906 - movl %esp, %eax
12907 - jne work_notifysig_v86 # returning to kernel-space or
12908 + jz 1f # returning to kernel-space or
12909 # vm86-space
12910 - xorl %edx, %edx
12911 - call do_notify_resume
12912 - jmp resume_userspace_sig
12913
12914 - ALIGN
12915 -work_notifysig_v86:
12916 pushl %ecx # save ti_flags for do_notify_resume
12917 CFI_ADJUST_CFA_OFFSET 4
12918 call save_v86_state # %eax contains pt_regs pointer
12919 popl %ecx
12920 CFI_ADJUST_CFA_OFFSET -4
12921 movl %eax, %esp
12922 -#else
12923 - movl %esp, %eax
12924 +1:
12925 #endif
12926 xorl %edx, %edx
12927 call do_notify_resume
12928 @@ -673,6 +862,9 @@ syscall_trace_entry:
12929 movl $-ENOSYS,PT_EAX(%esp)
12930 movl %esp, %eax
12931 call syscall_trace_enter
12932 +
12933 + pax_erase_kstack
12934 +
12935 /* What it returned is what we'll actually use. */
12936 cmpl $(nr_syscalls), %eax
12937 jnae syscall_call
12938 @@ -695,6 +887,10 @@ END(syscall_exit_work)
12939
12940 RING0_INT_FRAME # can't unwind into user space anyway
12941 syscall_fault:
12942 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12943 + push %ss
12944 + pop %ds
12945 +#endif
12946 GET_THREAD_INFO(%ebp)
12947 movl $-EFAULT,PT_EAX(%esp)
12948 jmp resume_userspace
12949 @@ -726,6 +922,33 @@ PTREGSCALL(rt_sigreturn)
12950 PTREGSCALL(vm86)
12951 PTREGSCALL(vm86old)
12952
12953 + ALIGN;
12954 +ENTRY(kernel_execve)
12955 + push %ebp
12956 + sub $PT_OLDSS+4,%esp
12957 + push %edi
12958 + push %ecx
12959 + push %eax
12960 + lea 3*4(%esp),%edi
12961 + mov $PT_OLDSS/4+1,%ecx
12962 + xorl %eax,%eax
12963 + rep stosl
12964 + pop %eax
12965 + pop %ecx
12966 + pop %edi
12967 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
12968 + mov %eax,PT_EBX(%esp)
12969 + mov %edx,PT_ECX(%esp)
12970 + mov %ecx,PT_EDX(%esp)
12971 + mov %esp,%eax
12972 + call sys_execve
12973 + GET_THREAD_INFO(%ebp)
12974 + test %eax,%eax
12975 + jz syscall_exit
12976 + add $PT_OLDSS+4,%esp
12977 + pop %ebp
12978 + ret
12979 +
12980 .macro FIXUP_ESPFIX_STACK
12981 /*
12982 * Switch back for ESPFIX stack to the normal zerobased stack
12983 @@ -735,7 +958,13 @@ PTREGSCALL(vm86old)
12984 * normal stack and adjusts ESP with the matching offset.
12985 */
12986 /* fixup the stack */
12987 - PER_CPU(gdt_page, %ebx)
12988 +#ifdef CONFIG_SMP
12989 + movl PER_CPU_VAR(cpu_number), %ebx
12990 + shll $PAGE_SHIFT_asm, %ebx
12991 + addl $cpu_gdt_table, %ebx
12992 +#else
12993 + movl $cpu_gdt_table, %ebx
12994 +#endif
12995 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
12996 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
12997 shl $16, %eax
12998 @@ -1198,7 +1427,6 @@ return_to_handler:
12999 ret
13000 #endif
13001
13002 -.section .rodata,"a"
13003 #include "syscall_table_32.S"
13004
13005 syscall_table_size=(.-sys_call_table)
13006 @@ -1255,9 +1483,12 @@ error_code:
13007 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
13008 REG_TO_PTGS %ecx
13009 SET_KERNEL_GS %ecx
13010 - movl $(__USER_DS), %ecx
13011 + movl $(__KERNEL_DS), %ecx
13012 movl %ecx, %ds
13013 movl %ecx, %es
13014 +
13015 + pax_enter_kernel
13016 +
13017 TRACE_IRQS_OFF
13018 movl %esp,%eax # pt_regs pointer
13019 call *%edi
13020 @@ -1351,6 +1582,9 @@ nmi_stack_correct:
13021 xorl %edx,%edx # zero error code
13022 movl %esp,%eax # pt_regs pointer
13023 call do_nmi
13024 +
13025 + pax_exit_kernel
13026 +
13027 jmp restore_all_notrace
13028 CFI_ENDPROC
13029
13030 @@ -1391,6 +1625,9 @@ nmi_espfix_stack:
13031 FIXUP_ESPFIX_STACK # %eax == %esp
13032 xorl %edx,%edx # zero error code
13033 call do_nmi
13034 +
13035 + pax_exit_kernel
13036 +
13037 RESTORE_REGS
13038 lss 12+4(%esp), %esp # back to espfix stack
13039 CFI_ADJUST_CFA_OFFSET -24
13040 diff -urNp linux-2.6.32.41/arch/x86/kernel/entry_64.S linux-2.6.32.41/arch/x86/kernel/entry_64.S
13041 --- linux-2.6.32.41/arch/x86/kernel/entry_64.S 2011-03-27 14:31:47.000000000 -0400
13042 +++ linux-2.6.32.41/arch/x86/kernel/entry_64.S 2011-06-04 20:30:53.000000000 -0400
13043 @@ -53,6 +53,7 @@
13044 #include <asm/paravirt.h>
13045 #include <asm/ftrace.h>
13046 #include <asm/percpu.h>
13047 +#include <asm/pgtable.h>
13048
13049 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13050 #include <linux/elf-em.h>
13051 @@ -174,6 +175,257 @@ ENTRY(native_usergs_sysret64)
13052 ENDPROC(native_usergs_sysret64)
13053 #endif /* CONFIG_PARAVIRT */
13054
13055 + .macro ljmpq sel, off
13056 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
13057 + .byte 0x48; ljmp *1234f(%rip)
13058 + .pushsection .rodata
13059 + .align 16
13060 + 1234: .quad \off; .word \sel
13061 + .popsection
13062 +#else
13063 + pushq $\sel
13064 + pushq $\off
13065 + lretq
13066 +#endif
13067 + .endm
13068 +
13069 + .macro pax_enter_kernel
13070 +#ifdef CONFIG_PAX_KERNEXEC
13071 + call pax_enter_kernel
13072 +#endif
13073 + .endm
13074 +
13075 + .macro pax_exit_kernel
13076 +#ifdef CONFIG_PAX_KERNEXEC
13077 + call pax_exit_kernel
13078 +#endif
13079 + .endm
13080 +
13081 +#ifdef CONFIG_PAX_KERNEXEC
13082 +ENTRY(pax_enter_kernel)
13083 + pushq %rdi
13084 +
13085 +#ifdef CONFIG_PARAVIRT
13086 + PV_SAVE_REGS(CLBR_RDI)
13087 +#endif
13088 +
13089 + GET_CR0_INTO_RDI
13090 + bts $16,%rdi
13091 + jnc 1f
13092 + mov %cs,%edi
13093 + cmp $__KERNEL_CS,%edi
13094 + jz 3f
13095 + ljmpq __KERNEL_CS,3f
13096 +1: ljmpq __KERNEXEC_KERNEL_CS,2f
13097 +2: SET_RDI_INTO_CR0
13098 +3:
13099 +
13100 +#ifdef CONFIG_PARAVIRT
13101 + PV_RESTORE_REGS(CLBR_RDI)
13102 +#endif
13103 +
13104 + popq %rdi
13105 + retq
13106 +ENDPROC(pax_enter_kernel)
13107 +
13108 +ENTRY(pax_exit_kernel)
13109 + pushq %rdi
13110 +
13111 +#ifdef CONFIG_PARAVIRT
13112 + PV_SAVE_REGS(CLBR_RDI)
13113 +#endif
13114 +
13115 + mov %cs,%rdi
13116 + cmp $__KERNEXEC_KERNEL_CS,%edi
13117 + jnz 2f
13118 + GET_CR0_INTO_RDI
13119 + btr $16,%rdi
13120 + ljmpq __KERNEL_CS,1f
13121 +1: SET_RDI_INTO_CR0
13122 +2:
13123 +
13124 +#ifdef CONFIG_PARAVIRT
13125 + PV_RESTORE_REGS(CLBR_RDI);
13126 +#endif
13127 +
13128 + popq %rdi
13129 + retq
13130 +ENDPROC(pax_exit_kernel)
13131 +#endif
13132 +
13133 + .macro pax_enter_kernel_user
13134 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13135 + call pax_enter_kernel_user
13136 +#endif
13137 + .endm
13138 +
13139 + .macro pax_exit_kernel_user
13140 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13141 + call pax_exit_kernel_user
13142 +#endif
13143 +#ifdef CONFIG_PAX_RANDKSTACK
13144 + push %rax
13145 + call pax_randomize_kstack
13146 + pop %rax
13147 +#endif
13148 + pax_erase_kstack
13149 + .endm
13150 +
13151 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13152 +ENTRY(pax_enter_kernel_user)
13153 + pushq %rdi
13154 + pushq %rbx
13155 +
13156 +#ifdef CONFIG_PARAVIRT
13157 + PV_SAVE_REGS(CLBR_RDI)
13158 +#endif
13159 +
13160 + GET_CR3_INTO_RDI
13161 + mov %rdi,%rbx
13162 + add $__START_KERNEL_map,%rbx
13163 + sub phys_base(%rip),%rbx
13164 +
13165 +#ifdef CONFIG_PARAVIRT
13166 + pushq %rdi
13167 + cmpl $0, pv_info+PARAVIRT_enabled
13168 + jz 1f
13169 + i = 0
13170 + .rept USER_PGD_PTRS
13171 + mov i*8(%rbx),%rsi
13172 + mov $0,%sil
13173 + lea i*8(%rbx),%rdi
13174 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
13175 + i = i + 1
13176 + .endr
13177 + jmp 2f
13178 +1:
13179 +#endif
13180 +
13181 + i = 0
13182 + .rept USER_PGD_PTRS
13183 + movb $0,i*8(%rbx)
13184 + i = i + 1
13185 + .endr
13186 +
13187 +#ifdef CONFIG_PARAVIRT
13188 +2: popq %rdi
13189 +#endif
13190 + SET_RDI_INTO_CR3
13191 +
13192 +#ifdef CONFIG_PAX_KERNEXEC
13193 + GET_CR0_INTO_RDI
13194 + bts $16,%rdi
13195 + SET_RDI_INTO_CR0
13196 +#endif
13197 +
13198 +#ifdef CONFIG_PARAVIRT
13199 + PV_RESTORE_REGS(CLBR_RDI)
13200 +#endif
13201 +
13202 + popq %rbx
13203 + popq %rdi
13204 + retq
13205 +ENDPROC(pax_enter_kernel_user)
13206 +
13207 +ENTRY(pax_exit_kernel_user)
13208 + push %rdi
13209 +
13210 +#ifdef CONFIG_PARAVIRT
13211 + pushq %rbx
13212 + PV_SAVE_REGS(CLBR_RDI)
13213 +#endif
13214 +
13215 +#ifdef CONFIG_PAX_KERNEXEC
13216 + GET_CR0_INTO_RDI
13217 + btr $16,%rdi
13218 + SET_RDI_INTO_CR0
13219 +#endif
13220 +
13221 + GET_CR3_INTO_RDI
13222 + add $__START_KERNEL_map,%rdi
13223 + sub phys_base(%rip),%rdi
13224 +
13225 +#ifdef CONFIG_PARAVIRT
13226 + cmpl $0, pv_info+PARAVIRT_enabled
13227 + jz 1f
13228 + mov %rdi,%rbx
13229 + i = 0
13230 + .rept USER_PGD_PTRS
13231 + mov i*8(%rbx),%rsi
13232 + mov $0x67,%sil
13233 + lea i*8(%rbx),%rdi
13234 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
13235 + i = i + 1
13236 + .endr
13237 + jmp 2f
13238 +1:
13239 +#endif
13240 +
13241 + i = 0
13242 + .rept USER_PGD_PTRS
13243 + movb $0x67,i*8(%rdi)
13244 + i = i + 1
13245 + .endr
13246 +
13247 +#ifdef CONFIG_PARAVIRT
13248 +2: PV_RESTORE_REGS(CLBR_RDI)
13249 + popq %rbx
13250 +#endif
13251 +
13252 + popq %rdi
13253 + retq
13254 +ENDPROC(pax_exit_kernel_user)
13255 +#endif
13256 +
13257 +.macro pax_erase_kstack
13258 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13259 + call pax_erase_kstack
13260 +#endif
13261 +.endm
13262 +
13263 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13264 +/*
13265 + * r10: thread_info
13266 + * rcx, rdx: can be clobbered
13267 + */
13268 +ENTRY(pax_erase_kstack)
13269 + pushq %rdi
13270 + pushq %rax
13271 +
13272 + GET_THREAD_INFO(%r10)
13273 + mov TI_lowest_stack(%r10), %rdi
13274 + mov $-0xBEEF, %rax
13275 + std
13276 +
13277 +1: mov %edi, %ecx
13278 + and $THREAD_SIZE_asm - 1, %ecx
13279 + shr $3, %ecx
13280 + repne scasq
13281 + jecxz 2f
13282 +
13283 + cmp $2*8, %ecx
13284 + jc 2f
13285 +
13286 + mov $2*8, %ecx
13287 + repe scasq
13288 + jecxz 2f
13289 + jne 1b
13290 +
13291 +2: cld
13292 + mov %esp, %ecx
13293 + sub %edi, %ecx
13294 + shr $3, %ecx
13295 + rep stosq
13296 +
13297 + mov TI_task_thread_sp0(%r10), %rdi
13298 + sub $256, %rdi
13299 + mov %rdi, TI_lowest_stack(%r10)
13300 +
13301 + popq %rax
13302 + popq %rdi
13303 + ret
13304 +ENDPROC(pax_erase_kstack)
13305 +#endif
13306
13307 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
13308 #ifdef CONFIG_TRACE_IRQFLAGS
13309 @@ -317,7 +569,7 @@ ENTRY(save_args)
13310 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
13311 movq_cfi rbp, 8 /* push %rbp */
13312 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
13313 - testl $3, CS(%rdi)
13314 + testb $3, CS(%rdi)
13315 je 1f
13316 SWAPGS
13317 /*
13318 @@ -409,7 +661,7 @@ ENTRY(ret_from_fork)
13319
13320 RESTORE_REST
13321
13322 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13323 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13324 je int_ret_from_sys_call
13325
13326 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
13327 @@ -455,7 +707,7 @@ END(ret_from_fork)
13328 ENTRY(system_call)
13329 CFI_STARTPROC simple
13330 CFI_SIGNAL_FRAME
13331 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
13332 + CFI_DEF_CFA rsp,0
13333 CFI_REGISTER rip,rcx
13334 /*CFI_REGISTER rflags,r11*/
13335 SWAPGS_UNSAFE_STACK
13336 @@ -468,12 +720,13 @@ ENTRY(system_call_after_swapgs)
13337
13338 movq %rsp,PER_CPU_VAR(old_rsp)
13339 movq PER_CPU_VAR(kernel_stack),%rsp
13340 + pax_enter_kernel_user
13341 /*
13342 * No need to follow this irqs off/on section - it's straight
13343 * and short:
13344 */
13345 ENABLE_INTERRUPTS(CLBR_NONE)
13346 - SAVE_ARGS 8,1
13347 + SAVE_ARGS 8*6,1
13348 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
13349 movq %rcx,RIP-ARGOFFSET(%rsp)
13350 CFI_REL_OFFSET rip,RIP-ARGOFFSET
13351 @@ -502,6 +755,7 @@ sysret_check:
13352 andl %edi,%edx
13353 jnz sysret_careful
13354 CFI_REMEMBER_STATE
13355 + pax_exit_kernel_user
13356 /*
13357 * sysretq will re-enable interrupts:
13358 */
13359 @@ -562,6 +816,9 @@ auditsys:
13360 movq %rax,%rsi /* 2nd arg: syscall number */
13361 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
13362 call audit_syscall_entry
13363 +
13364 + pax_erase_kstack
13365 +
13366 LOAD_ARGS 0 /* reload call-clobbered registers */
13367 jmp system_call_fastpath
13368
13369 @@ -592,6 +849,9 @@ tracesys:
13370 FIXUP_TOP_OF_STACK %rdi
13371 movq %rsp,%rdi
13372 call syscall_trace_enter
13373 +
13374 + pax_erase_kstack
13375 +
13376 /*
13377 * Reload arg registers from stack in case ptrace changed them.
13378 * We don't reload %rax because syscall_trace_enter() returned
13379 @@ -613,7 +873,7 @@ tracesys:
13380 GLOBAL(int_ret_from_sys_call)
13381 DISABLE_INTERRUPTS(CLBR_NONE)
13382 TRACE_IRQS_OFF
13383 - testl $3,CS-ARGOFFSET(%rsp)
13384 + testb $3,CS-ARGOFFSET(%rsp)
13385 je retint_restore_args
13386 movl $_TIF_ALLWORK_MASK,%edi
13387 /* edi: mask to check */
13388 @@ -800,6 +1060,16 @@ END(interrupt)
13389 CFI_ADJUST_CFA_OFFSET 10*8
13390 call save_args
13391 PARTIAL_FRAME 0
13392 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13393 + testb $3, CS(%rdi)
13394 + jnz 1f
13395 + pax_enter_kernel
13396 + jmp 2f
13397 +1: pax_enter_kernel_user
13398 +2:
13399 +#else
13400 + pax_enter_kernel
13401 +#endif
13402 call \func
13403 .endm
13404
13405 @@ -822,7 +1092,7 @@ ret_from_intr:
13406 CFI_ADJUST_CFA_OFFSET -8
13407 exit_intr:
13408 GET_THREAD_INFO(%rcx)
13409 - testl $3,CS-ARGOFFSET(%rsp)
13410 + testb $3,CS-ARGOFFSET(%rsp)
13411 je retint_kernel
13412
13413 /* Interrupt came from user space */
13414 @@ -844,12 +1114,14 @@ retint_swapgs: /* return to user-space
13415 * The iretq could re-enable interrupts:
13416 */
13417 DISABLE_INTERRUPTS(CLBR_ANY)
13418 + pax_exit_kernel_user
13419 TRACE_IRQS_IRETQ
13420 SWAPGS
13421 jmp restore_args
13422
13423 retint_restore_args: /* return to kernel space */
13424 DISABLE_INTERRUPTS(CLBR_ANY)
13425 + pax_exit_kernel
13426 /*
13427 * The iretq could re-enable interrupts:
13428 */
13429 @@ -1032,6 +1304,16 @@ ENTRY(\sym)
13430 CFI_ADJUST_CFA_OFFSET 15*8
13431 call error_entry
13432 DEFAULT_FRAME 0
13433 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13434 + testb $3, CS(%rsp)
13435 + jnz 1f
13436 + pax_enter_kernel
13437 + jmp 2f
13438 +1: pax_enter_kernel_user
13439 +2:
13440 +#else
13441 + pax_enter_kernel
13442 +#endif
13443 movq %rsp,%rdi /* pt_regs pointer */
13444 xorl %esi,%esi /* no error code */
13445 call \do_sym
13446 @@ -1049,6 +1331,16 @@ ENTRY(\sym)
13447 subq $15*8, %rsp
13448 call save_paranoid
13449 TRACE_IRQS_OFF
13450 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13451 + testb $3, CS(%rsp)
13452 + jnz 1f
13453 + pax_enter_kernel
13454 + jmp 2f
13455 +1: pax_enter_kernel_user
13456 +2:
13457 +#else
13458 + pax_enter_kernel
13459 +#endif
13460 movq %rsp,%rdi /* pt_regs pointer */
13461 xorl %esi,%esi /* no error code */
13462 call \do_sym
13463 @@ -1066,9 +1358,24 @@ ENTRY(\sym)
13464 subq $15*8, %rsp
13465 call save_paranoid
13466 TRACE_IRQS_OFF
13467 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13468 + testb $3, CS(%rsp)
13469 + jnz 1f
13470 + pax_enter_kernel
13471 + jmp 2f
13472 +1: pax_enter_kernel_user
13473 +2:
13474 +#else
13475 + pax_enter_kernel
13476 +#endif
13477 movq %rsp,%rdi /* pt_regs pointer */
13478 xorl %esi,%esi /* no error code */
13479 - PER_CPU(init_tss, %rbp)
13480 +#ifdef CONFIG_SMP
13481 + imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
13482 + lea init_tss(%rbp), %rbp
13483 +#else
13484 + lea init_tss(%rip), %rbp
13485 +#endif
13486 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
13487 call \do_sym
13488 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
13489 @@ -1085,6 +1392,16 @@ ENTRY(\sym)
13490 CFI_ADJUST_CFA_OFFSET 15*8
13491 call error_entry
13492 DEFAULT_FRAME 0
13493 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13494 + testb $3, CS(%rsp)
13495 + jnz 1f
13496 + pax_enter_kernel
13497 + jmp 2f
13498 +1: pax_enter_kernel_user
13499 +2:
13500 +#else
13501 + pax_enter_kernel
13502 +#endif
13503 movq %rsp,%rdi /* pt_regs pointer */
13504 movq ORIG_RAX(%rsp),%rsi /* get error code */
13505 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
13506 @@ -1104,6 +1421,16 @@ ENTRY(\sym)
13507 call save_paranoid
13508 DEFAULT_FRAME 0
13509 TRACE_IRQS_OFF
13510 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13511 + testb $3, CS(%rsp)
13512 + jnz 1f
13513 + pax_enter_kernel
13514 + jmp 2f
13515 +1: pax_enter_kernel_user
13516 +2:
13517 +#else
13518 + pax_enter_kernel
13519 +#endif
13520 movq %rsp,%rdi /* pt_regs pointer */
13521 movq ORIG_RAX(%rsp),%rsi /* get error code */
13522 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
13523 @@ -1405,14 +1732,27 @@ ENTRY(paranoid_exit)
13524 TRACE_IRQS_OFF
13525 testl %ebx,%ebx /* swapgs needed? */
13526 jnz paranoid_restore
13527 - testl $3,CS(%rsp)
13528 + testb $3,CS(%rsp)
13529 jnz paranoid_userspace
13530 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13531 + pax_exit_kernel
13532 + TRACE_IRQS_IRETQ 0
13533 + SWAPGS_UNSAFE_STACK
13534 + RESTORE_ALL 8
13535 + jmp irq_return
13536 +#endif
13537 paranoid_swapgs:
13538 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13539 + pax_exit_kernel_user
13540 +#else
13541 + pax_exit_kernel
13542 +#endif
13543 TRACE_IRQS_IRETQ 0
13544 SWAPGS_UNSAFE_STACK
13545 RESTORE_ALL 8
13546 jmp irq_return
13547 paranoid_restore:
13548 + pax_exit_kernel
13549 TRACE_IRQS_IRETQ 0
13550 RESTORE_ALL 8
13551 jmp irq_return
13552 @@ -1470,7 +1810,7 @@ ENTRY(error_entry)
13553 movq_cfi r14, R14+8
13554 movq_cfi r15, R15+8
13555 xorl %ebx,%ebx
13556 - testl $3,CS+8(%rsp)
13557 + testb $3,CS+8(%rsp)
13558 je error_kernelspace
13559 error_swapgs:
13560 SWAPGS
13561 @@ -1529,6 +1869,16 @@ ENTRY(nmi)
13562 CFI_ADJUST_CFA_OFFSET 15*8
13563 call save_paranoid
13564 DEFAULT_FRAME 0
13565 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13566 + testb $3, CS(%rsp)
13567 + jnz 1f
13568 + pax_enter_kernel
13569 + jmp 2f
13570 +1: pax_enter_kernel_user
13571 +2:
13572 +#else
13573 + pax_enter_kernel
13574 +#endif
13575 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
13576 movq %rsp,%rdi
13577 movq $-1,%rsi
13578 @@ -1539,11 +1889,25 @@ ENTRY(nmi)
13579 DISABLE_INTERRUPTS(CLBR_NONE)
13580 testl %ebx,%ebx /* swapgs needed? */
13581 jnz nmi_restore
13582 - testl $3,CS(%rsp)
13583 + testb $3,CS(%rsp)
13584 jnz nmi_userspace
13585 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13586 + pax_exit_kernel
13587 + SWAPGS_UNSAFE_STACK
13588 + RESTORE_ALL 8
13589 + jmp irq_return
13590 +#endif
13591 nmi_swapgs:
13592 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13593 + pax_exit_kernel_user
13594 +#else
13595 + pax_exit_kernel
13596 +#endif
13597 SWAPGS_UNSAFE_STACK
13598 + RESTORE_ALL 8
13599 + jmp irq_return
13600 nmi_restore:
13601 + pax_exit_kernel
13602 RESTORE_ALL 8
13603 jmp irq_return
13604 nmi_userspace:
13605 diff -urNp linux-2.6.32.41/arch/x86/kernel/ftrace.c linux-2.6.32.41/arch/x86/kernel/ftrace.c
13606 --- linux-2.6.32.41/arch/x86/kernel/ftrace.c 2011-03-27 14:31:47.000000000 -0400
13607 +++ linux-2.6.32.41/arch/x86/kernel/ftrace.c 2011-05-04 17:56:20.000000000 -0400
13608 @@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the
13609 static void *mod_code_newcode; /* holds the text to write to the IP */
13610
13611 static unsigned nmi_wait_count;
13612 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
13613 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
13614
13615 int ftrace_arch_read_dyn_info(char *buf, int size)
13616 {
13617 @@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf,
13618
13619 r = snprintf(buf, size, "%u %u",
13620 nmi_wait_count,
13621 - atomic_read(&nmi_update_count));
13622 + atomic_read_unchecked(&nmi_update_count));
13623 return r;
13624 }
13625
13626 @@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
13627 {
13628 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
13629 smp_rmb();
13630 + pax_open_kernel();
13631 ftrace_mod_code();
13632 - atomic_inc(&nmi_update_count);
13633 + pax_close_kernel();
13634 + atomic_inc_unchecked(&nmi_update_count);
13635 }
13636 /* Must have previous changes seen before executions */
13637 smp_mb();
13638 @@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, voi
13639
13640
13641
13642 -static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
13643 +static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
13644
13645 static unsigned char *ftrace_nop_replace(void)
13646 {
13647 @@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, uns
13648 {
13649 unsigned char replaced[MCOUNT_INSN_SIZE];
13650
13651 + ip = ktla_ktva(ip);
13652 +
13653 /*
13654 * Note: Due to modules and __init, code can
13655 * disappear and change, we need to protect against faulting
13656 @@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_fun
13657 unsigned char old[MCOUNT_INSN_SIZE], *new;
13658 int ret;
13659
13660 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
13661 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
13662 new = ftrace_call_replace(ip, (unsigned long)func);
13663 ret = ftrace_modify_code(ip, old, new);
13664
13665 @@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *da
13666 switch (faulted) {
13667 case 0:
13668 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
13669 - memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
13670 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
13671 break;
13672 case 1:
13673 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
13674 - memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
13675 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
13676 break;
13677 case 2:
13678 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
13679 - memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
13680 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
13681 break;
13682 }
13683
13684 @@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long
13685 {
13686 unsigned char code[MCOUNT_INSN_SIZE];
13687
13688 + ip = ktla_ktva(ip);
13689 +
13690 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
13691 return -EFAULT;
13692
13693 diff -urNp linux-2.6.32.41/arch/x86/kernel/head32.c linux-2.6.32.41/arch/x86/kernel/head32.c
13694 --- linux-2.6.32.41/arch/x86/kernel/head32.c 2011-03-27 14:31:47.000000000 -0400
13695 +++ linux-2.6.32.41/arch/x86/kernel/head32.c 2011-04-17 15:56:46.000000000 -0400
13696 @@ -16,6 +16,7 @@
13697 #include <asm/apic.h>
13698 #include <asm/io_apic.h>
13699 #include <asm/bios_ebda.h>
13700 +#include <asm/boot.h>
13701
13702 static void __init i386_default_early_setup(void)
13703 {
13704 @@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
13705 {
13706 reserve_trampoline_memory();
13707
13708 - reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
13709 + reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
13710
13711 #ifdef CONFIG_BLK_DEV_INITRD
13712 /* Reserve INITRD */
13713 diff -urNp linux-2.6.32.41/arch/x86/kernel/head_32.S linux-2.6.32.41/arch/x86/kernel/head_32.S
13714 --- linux-2.6.32.41/arch/x86/kernel/head_32.S 2011-03-27 14:31:47.000000000 -0400
13715 +++ linux-2.6.32.41/arch/x86/kernel/head_32.S 2011-04-17 15:56:46.000000000 -0400
13716 @@ -19,10 +19,17 @@
13717 #include <asm/setup.h>
13718 #include <asm/processor-flags.h>
13719 #include <asm/percpu.h>
13720 +#include <asm/msr-index.h>
13721
13722 /* Physical address */
13723 #define pa(X) ((X) - __PAGE_OFFSET)
13724
13725 +#ifdef CONFIG_PAX_KERNEXEC
13726 +#define ta(X) (X)
13727 +#else
13728 +#define ta(X) ((X) - __PAGE_OFFSET)
13729 +#endif
13730 +
13731 /*
13732 * References to members of the new_cpu_data structure.
13733 */
13734 @@ -52,11 +59,7 @@
13735 * and small than max_low_pfn, otherwise will waste some page table entries
13736 */
13737
13738 -#if PTRS_PER_PMD > 1
13739 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
13740 -#else
13741 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
13742 -#endif
13743 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
13744
13745 /* Enough space to fit pagetables for the low memory linear map */
13746 MAPPING_BEYOND_END = \
13747 @@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
13748 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
13749
13750 /*
13751 + * Real beginning of normal "text" segment
13752 + */
13753 +ENTRY(stext)
13754 +ENTRY(_stext)
13755 +
13756 +/*
13757 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
13758 * %esi points to the real-mode code as a 32-bit pointer.
13759 * CS and DS must be 4 GB flat segments, but we don't depend on
13760 @@ -80,6 +89,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
13761 * can.
13762 */
13763 __HEAD
13764 +
13765 +#ifdef CONFIG_PAX_KERNEXEC
13766 + jmp startup_32
13767 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
13768 +.fill PAGE_SIZE-5,1,0xcc
13769 +#endif
13770 +
13771 ENTRY(startup_32)
13772 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
13773 us to not reload segments */
13774 @@ -97,6 +113,57 @@ ENTRY(startup_32)
13775 movl %eax,%gs
13776 2:
13777
13778 +#ifdef CONFIG_SMP
13779 + movl $pa(cpu_gdt_table),%edi
13780 + movl $__per_cpu_load,%eax
13781 + movw %ax,__KERNEL_PERCPU + 2(%edi)
13782 + rorl $16,%eax
13783 + movb %al,__KERNEL_PERCPU + 4(%edi)
13784 + movb %ah,__KERNEL_PERCPU + 7(%edi)
13785 + movl $__per_cpu_end - 1,%eax
13786 + subl $__per_cpu_start,%eax
13787 + movw %ax,__KERNEL_PERCPU + 0(%edi)
13788 +#endif
13789 +
13790 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13791 + movl $NR_CPUS,%ecx
13792 + movl $pa(cpu_gdt_table),%edi
13793 +1:
13794 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
13795 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
13796 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
13797 + addl $PAGE_SIZE_asm,%edi
13798 + loop 1b
13799 +#endif
13800 +
13801 +#ifdef CONFIG_PAX_KERNEXEC
13802 + movl $pa(boot_gdt),%edi
13803 + movl $__LOAD_PHYSICAL_ADDR,%eax
13804 + movw %ax,__BOOT_CS + 2(%edi)
13805 + rorl $16,%eax
13806 + movb %al,__BOOT_CS + 4(%edi)
13807 + movb %ah,__BOOT_CS + 7(%edi)
13808 + rorl $16,%eax
13809 +
13810 + ljmp $(__BOOT_CS),$1f
13811 +1:
13812 +
13813 + movl $NR_CPUS,%ecx
13814 + movl $pa(cpu_gdt_table),%edi
13815 + addl $__PAGE_OFFSET,%eax
13816 +1:
13817 + movw %ax,__KERNEL_CS + 2(%edi)
13818 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
13819 + rorl $16,%eax
13820 + movb %al,__KERNEL_CS + 4(%edi)
13821 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
13822 + movb %ah,__KERNEL_CS + 7(%edi)
13823 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
13824 + rorl $16,%eax
13825 + addl $PAGE_SIZE_asm,%edi
13826 + loop 1b
13827 +#endif
13828 +
13829 /*
13830 * Clear BSS first so that there are no surprises...
13831 */
13832 @@ -140,9 +207,7 @@ ENTRY(startup_32)
13833 cmpl $num_subarch_entries, %eax
13834 jae bad_subarch
13835
13836 - movl pa(subarch_entries)(,%eax,4), %eax
13837 - subl $__PAGE_OFFSET, %eax
13838 - jmp *%eax
13839 + jmp *pa(subarch_entries)(,%eax,4)
13840
13841 bad_subarch:
13842 WEAK(lguest_entry)
13843 @@ -154,10 +219,10 @@ WEAK(xen_entry)
13844 __INITDATA
13845
13846 subarch_entries:
13847 - .long default_entry /* normal x86/PC */
13848 - .long lguest_entry /* lguest hypervisor */
13849 - .long xen_entry /* Xen hypervisor */
13850 - .long default_entry /* Moorestown MID */
13851 + .long ta(default_entry) /* normal x86/PC */
13852 + .long ta(lguest_entry) /* lguest hypervisor */
13853 + .long ta(xen_entry) /* Xen hypervisor */
13854 + .long ta(default_entry) /* Moorestown MID */
13855 num_subarch_entries = (. - subarch_entries) / 4
13856 .previous
13857 #endif /* CONFIG_PARAVIRT */
13858 @@ -218,8 +283,11 @@ default_entry:
13859 movl %eax, pa(max_pfn_mapped)
13860
13861 /* Do early initialization of the fixmap area */
13862 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
13863 - movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
13864 +#ifdef CONFIG_COMPAT_VDSO
13865 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
13866 +#else
13867 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
13868 +#endif
13869 #else /* Not PAE */
13870
13871 page_pde_offset = (__PAGE_OFFSET >> 20);
13872 @@ -249,8 +317,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
13873 movl %eax, pa(max_pfn_mapped)
13874
13875 /* Do early initialization of the fixmap area */
13876 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
13877 - movl %eax,pa(swapper_pg_dir+0xffc)
13878 +#ifdef CONFIG_COMPAT_VDSO
13879 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
13880 +#else
13881 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
13882 +#endif
13883 #endif
13884 jmp 3f
13885 /*
13886 @@ -297,6 +368,7 @@ ENTRY(startup_32_smp)
13887 orl %edx,%eax
13888 movl %eax,%cr4
13889
13890 +#ifdef CONFIG_X86_PAE
13891 btl $5, %eax # check if PAE is enabled
13892 jnc 6f
13893
13894 @@ -312,13 +384,17 @@ ENTRY(startup_32_smp)
13895 jnc 6f
13896
13897 /* Setup EFER (Extended Feature Enable Register) */
13898 - movl $0xc0000080, %ecx
13899 + movl $MSR_EFER, %ecx
13900 rdmsr
13901
13902 btsl $11, %eax
13903 /* Make changes effective */
13904 wrmsr
13905
13906 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
13907 + movl $1,pa(nx_enabled)
13908 +#endif
13909 +
13910 6:
13911
13912 /*
13913 @@ -344,9 +420,7 @@ ENTRY(startup_32_smp)
13914
13915 #ifdef CONFIG_SMP
13916 cmpb $0, ready
13917 - jz 1f /* Initial CPU cleans BSS */
13918 - jmp checkCPUtype
13919 -1:
13920 + jnz checkCPUtype /* Initial CPU cleans BSS */
13921 #endif /* CONFIG_SMP */
13922
13923 /*
13924 @@ -424,7 +498,7 @@ is386: movl $2,%ecx # set MP
13925 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
13926 movl %eax,%ss # after changing gdt.
13927
13928 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
13929 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
13930 movl %eax,%ds
13931 movl %eax,%es
13932
13933 @@ -438,15 +512,22 @@ is386: movl $2,%ecx # set MP
13934 */
13935 cmpb $0,ready
13936 jne 1f
13937 - movl $per_cpu__gdt_page,%eax
13938 + movl $cpu_gdt_table,%eax
13939 movl $per_cpu__stack_canary,%ecx
13940 +#ifdef CONFIG_SMP
13941 + addl $__per_cpu_load,%ecx
13942 +#endif
13943 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
13944 shrl $16, %ecx
13945 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
13946 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
13947 1:
13948 -#endif
13949 movl $(__KERNEL_STACK_CANARY),%eax
13950 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
13951 + movl $(__USER_DS),%eax
13952 +#else
13953 + xorl %eax,%eax
13954 +#endif
13955 movl %eax,%gs
13956
13957 xorl %eax,%eax # Clear LDT
13958 @@ -457,10 +538,6 @@ is386: movl $2,%ecx # set MP
13959 #ifdef CONFIG_SMP
13960 movb ready, %cl
13961 movb $1, ready
13962 - cmpb $0,%cl # the first CPU calls start_kernel
13963 - je 1f
13964 - movl (stack_start), %esp
13965 -1:
13966 #endif /* CONFIG_SMP */
13967 jmp *(initial_code)
13968
13969 @@ -546,22 +623,22 @@ early_page_fault:
13970 jmp early_fault
13971
13972 early_fault:
13973 - cld
13974 #ifdef CONFIG_PRINTK
13975 + cmpl $1,%ss:early_recursion_flag
13976 + je hlt_loop
13977 + incl %ss:early_recursion_flag
13978 + cld
13979 pusha
13980 movl $(__KERNEL_DS),%eax
13981 movl %eax,%ds
13982 movl %eax,%es
13983 - cmpl $2,early_recursion_flag
13984 - je hlt_loop
13985 - incl early_recursion_flag
13986 movl %cr2,%eax
13987 pushl %eax
13988 pushl %edx /* trapno */
13989 pushl $fault_msg
13990 call printk
13991 +; call dump_stack
13992 #endif
13993 - call dump_stack
13994 hlt_loop:
13995 hlt
13996 jmp hlt_loop
13997 @@ -569,8 +646,11 @@ hlt_loop:
13998 /* This is the default interrupt "handler" :-) */
13999 ALIGN
14000 ignore_int:
14001 - cld
14002 #ifdef CONFIG_PRINTK
14003 + cmpl $2,%ss:early_recursion_flag
14004 + je hlt_loop
14005 + incl %ss:early_recursion_flag
14006 + cld
14007 pushl %eax
14008 pushl %ecx
14009 pushl %edx
14010 @@ -579,9 +659,6 @@ ignore_int:
14011 movl $(__KERNEL_DS),%eax
14012 movl %eax,%ds
14013 movl %eax,%es
14014 - cmpl $2,early_recursion_flag
14015 - je hlt_loop
14016 - incl early_recursion_flag
14017 pushl 16(%esp)
14018 pushl 24(%esp)
14019 pushl 32(%esp)
14020 @@ -610,31 +687,47 @@ ENTRY(initial_page_table)
14021 /*
14022 * BSS section
14023 */
14024 -__PAGE_ALIGNED_BSS
14025 - .align PAGE_SIZE_asm
14026 #ifdef CONFIG_X86_PAE
14027 +.section .swapper_pg_pmd,"a",@progbits
14028 swapper_pg_pmd:
14029 .fill 1024*KPMDS,4,0
14030 #else
14031 +.section .swapper_pg_dir,"a",@progbits
14032 ENTRY(swapper_pg_dir)
14033 .fill 1024,4,0
14034 #endif
14035 +.section .swapper_pg_fixmap,"a",@progbits
14036 swapper_pg_fixmap:
14037 .fill 1024,4,0
14038 #ifdef CONFIG_X86_TRAMPOLINE
14039 +.section .trampoline_pg_dir,"a",@progbits
14040 ENTRY(trampoline_pg_dir)
14041 +#ifdef CONFIG_X86_PAE
14042 + .fill 4,8,0
14043 +#else
14044 .fill 1024,4,0
14045 #endif
14046 +#endif
14047 +
14048 +.section .empty_zero_page,"a",@progbits
14049 ENTRY(empty_zero_page)
14050 .fill 4096,1,0
14051
14052 /*
14053 + * The IDT has to be page-aligned to simplify the Pentium
14054 + * F0 0F bug workaround.. We have a special link segment
14055 + * for this.
14056 + */
14057 +.section .idt,"a",@progbits
14058 +ENTRY(idt_table)
14059 + .fill 256,8,0
14060 +
14061 +/*
14062 * This starts the data section.
14063 */
14064 #ifdef CONFIG_X86_PAE
14065 -__PAGE_ALIGNED_DATA
14066 - /* Page-aligned for the benefit of paravirt? */
14067 - .align PAGE_SIZE_asm
14068 +.section .swapper_pg_dir,"a",@progbits
14069 +
14070 ENTRY(swapper_pg_dir)
14071 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
14072 # if KPMDS == 3
14073 @@ -653,15 +746,24 @@ ENTRY(swapper_pg_dir)
14074 # error "Kernel PMDs should be 1, 2 or 3"
14075 # endif
14076 .align PAGE_SIZE_asm /* needs to be page-sized too */
14077 +
14078 +#ifdef CONFIG_PAX_PER_CPU_PGD
14079 +ENTRY(cpu_pgd)
14080 + .rept NR_CPUS
14081 + .fill 4,8,0
14082 + .endr
14083 +#endif
14084 +
14085 #endif
14086
14087 .data
14088 ENTRY(stack_start)
14089 - .long init_thread_union+THREAD_SIZE
14090 + .long init_thread_union+THREAD_SIZE-8
14091 .long __BOOT_DS
14092
14093 ready: .byte 0
14094
14095 +.section .rodata,"a",@progbits
14096 early_recursion_flag:
14097 .long 0
14098
14099 @@ -697,7 +799,7 @@ fault_msg:
14100 .word 0 # 32 bit align gdt_desc.address
14101 boot_gdt_descr:
14102 .word __BOOT_DS+7
14103 - .long boot_gdt - __PAGE_OFFSET
14104 + .long pa(boot_gdt)
14105
14106 .word 0 # 32-bit align idt_desc.address
14107 idt_descr:
14108 @@ -708,7 +810,7 @@ idt_descr:
14109 .word 0 # 32 bit align gdt_desc.address
14110 ENTRY(early_gdt_descr)
14111 .word GDT_ENTRIES*8-1
14112 - .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
14113 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
14114
14115 /*
14116 * The boot_gdt must mirror the equivalent in setup.S and is
14117 @@ -717,5 +819,65 @@ ENTRY(early_gdt_descr)
14118 .align L1_CACHE_BYTES
14119 ENTRY(boot_gdt)
14120 .fill GDT_ENTRY_BOOT_CS,8,0
14121 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
14122 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
14123 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
14124 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
14125 +
14126 + .align PAGE_SIZE_asm
14127 +ENTRY(cpu_gdt_table)
14128 + .rept NR_CPUS
14129 + .quad 0x0000000000000000 /* NULL descriptor */
14130 + .quad 0x0000000000000000 /* 0x0b reserved */
14131 + .quad 0x0000000000000000 /* 0x13 reserved */
14132 + .quad 0x0000000000000000 /* 0x1b reserved */
14133 +
14134 +#ifdef CONFIG_PAX_KERNEXEC
14135 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
14136 +#else
14137 + .quad 0x0000000000000000 /* 0x20 unused */
14138 +#endif
14139 +
14140 + .quad 0x0000000000000000 /* 0x28 unused */
14141 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
14142 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
14143 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
14144 + .quad 0x0000000000000000 /* 0x4b reserved */
14145 + .quad 0x0000000000000000 /* 0x53 reserved */
14146 + .quad 0x0000000000000000 /* 0x5b reserved */
14147 +
14148 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
14149 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
14150 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
14151 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
14152 +
14153 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
14154 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
14155 +
14156 + /*
14157 + * Segments used for calling PnP BIOS have byte granularity.
14158 + * The code segments and data segments have fixed 64k limits,
14159 + * the transfer segment sizes are set at run time.
14160 + */
14161 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
14162 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
14163 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
14164 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
14165 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
14166 +
14167 + /*
14168 + * The APM segments have byte granularity and their bases
14169 + * are set at run time. All have 64k limits.
14170 + */
14171 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
14172 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
14173 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
14174 +
14175 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
14176 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
14177 + .quad 0x0040910000000018 /* 0xe0 - STACK_CANARY */
14178 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
14179 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
14180 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
14181 +
14182 + /* Be sure this is zeroed to avoid false validations in Xen */
14183 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
14184 + .endr
14185 diff -urNp linux-2.6.32.41/arch/x86/kernel/head_64.S linux-2.6.32.41/arch/x86/kernel/head_64.S
14186 --- linux-2.6.32.41/arch/x86/kernel/head_64.S 2011-03-27 14:31:47.000000000 -0400
14187 +++ linux-2.6.32.41/arch/x86/kernel/head_64.S 2011-04-17 15:56:46.000000000 -0400
14188 @@ -19,6 +19,7 @@
14189 #include <asm/cache.h>
14190 #include <asm/processor-flags.h>
14191 #include <asm/percpu.h>
14192 +#include <asm/cpufeature.h>
14193
14194 #ifdef CONFIG_PARAVIRT
14195 #include <asm/asm-offsets.h>
14196 @@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
14197 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
14198 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
14199 L3_START_KERNEL = pud_index(__START_KERNEL_map)
14200 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
14201 +L3_VMALLOC_START = pud_index(VMALLOC_START)
14202 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
14203 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
14204
14205 .text
14206 __HEAD
14207 @@ -85,35 +90,22 @@ startup_64:
14208 */
14209 addq %rbp, init_level4_pgt + 0(%rip)
14210 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
14211 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
14212 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
14213 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
14214
14215 addq %rbp, level3_ident_pgt + 0(%rip)
14216 +#ifndef CONFIG_XEN
14217 + addq %rbp, level3_ident_pgt + 8(%rip)
14218 +#endif
14219
14220 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
14221 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
14222 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
14223
14224 - addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14225 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
14226 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
14227
14228 - /* Add an Identity mapping if I am above 1G */
14229 - leaq _text(%rip), %rdi
14230 - andq $PMD_PAGE_MASK, %rdi
14231 -
14232 - movq %rdi, %rax
14233 - shrq $PUD_SHIFT, %rax
14234 - andq $(PTRS_PER_PUD - 1), %rax
14235 - jz ident_complete
14236 -
14237 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
14238 - leaq level3_ident_pgt(%rip), %rbx
14239 - movq %rdx, 0(%rbx, %rax, 8)
14240 -
14241 - movq %rdi, %rax
14242 - shrq $PMD_SHIFT, %rax
14243 - andq $(PTRS_PER_PMD - 1), %rax
14244 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
14245 - leaq level2_spare_pgt(%rip), %rbx
14246 - movq %rdx, 0(%rbx, %rax, 8)
14247 -ident_complete:
14248 + addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14249 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
14250
14251 /*
14252 * Fixup the kernel text+data virtual addresses. Note that
14253 @@ -161,8 +153,8 @@ ENTRY(secondary_startup_64)
14254 * after the boot processor executes this code.
14255 */
14256
14257 - /* Enable PAE mode and PGE */
14258 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
14259 + /* Enable PAE mode and PSE/PGE */
14260 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
14261 movq %rax, %cr4
14262
14263 /* Setup early boot stage 4 level pagetables. */
14264 @@ -184,9 +176,13 @@ ENTRY(secondary_startup_64)
14265 movl $MSR_EFER, %ecx
14266 rdmsr
14267 btsl $_EFER_SCE, %eax /* Enable System Call */
14268 - btl $20,%edi /* No Execute supported? */
14269 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
14270 jnc 1f
14271 btsl $_EFER_NX, %eax
14272 + leaq init_level4_pgt(%rip), %rdi
14273 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
14274 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
14275 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
14276 1: wrmsr /* Make changes effective */
14277
14278 /* Setup cr0 */
14279 @@ -262,16 +258,16 @@ ENTRY(secondary_startup_64)
14280 .quad x86_64_start_kernel
14281 ENTRY(initial_gs)
14282 .quad INIT_PER_CPU_VAR(irq_stack_union)
14283 - __FINITDATA
14284
14285 ENTRY(stack_start)
14286 .quad init_thread_union+THREAD_SIZE-8
14287 .word 0
14288 + __FINITDATA
14289
14290 bad_address:
14291 jmp bad_address
14292
14293 - .section ".init.text","ax"
14294 + __INIT
14295 #ifdef CONFIG_EARLY_PRINTK
14296 .globl early_idt_handlers
14297 early_idt_handlers:
14298 @@ -316,18 +312,23 @@ ENTRY(early_idt_handler)
14299 #endif /* EARLY_PRINTK */
14300 1: hlt
14301 jmp 1b
14302 + .previous
14303
14304 #ifdef CONFIG_EARLY_PRINTK
14305 + __INITDATA
14306 early_recursion_flag:
14307 .long 0
14308 + .previous
14309
14310 + .section .rodata,"a",@progbits
14311 early_idt_msg:
14312 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
14313 early_idt_ripmsg:
14314 .asciz "RIP %s\n"
14315 -#endif /* CONFIG_EARLY_PRINTK */
14316 .previous
14317 +#endif /* CONFIG_EARLY_PRINTK */
14318
14319 + .section .rodata,"a",@progbits
14320 #define NEXT_PAGE(name) \
14321 .balign PAGE_SIZE; \
14322 ENTRY(name)
14323 @@ -350,13 +351,36 @@ NEXT_PAGE(init_level4_pgt)
14324 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14325 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
14326 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14327 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
14328 + .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
14329 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
14330 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
14331 .org init_level4_pgt + L4_START_KERNEL*8, 0
14332 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
14333 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
14334
14335 +#ifdef CONFIG_PAX_PER_CPU_PGD
14336 +NEXT_PAGE(cpu_pgd)
14337 + .rept NR_CPUS
14338 + .fill 512,8,0
14339 + .endr
14340 +#endif
14341 +
14342 NEXT_PAGE(level3_ident_pgt)
14343 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14344 +#ifdef CONFIG_XEN
14345 .fill 511,8,0
14346 +#else
14347 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
14348 + .fill 510,8,0
14349 +#endif
14350 +
14351 +NEXT_PAGE(level3_vmalloc_pgt)
14352 + .fill 512,8,0
14353 +
14354 +NEXT_PAGE(level3_vmemmap_pgt)
14355 + .fill L3_VMEMMAP_START,8,0
14356 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
14357
14358 NEXT_PAGE(level3_kernel_pgt)
14359 .fill L3_START_KERNEL,8,0
14360 @@ -364,20 +388,23 @@ NEXT_PAGE(level3_kernel_pgt)
14361 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
14362 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
14363
14364 +NEXT_PAGE(level2_vmemmap_pgt)
14365 + .fill 512,8,0
14366 +
14367 NEXT_PAGE(level2_fixmap_pgt)
14368 - .fill 506,8,0
14369 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
14370 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
14371 - .fill 5,8,0
14372 + .fill 507,8,0
14373 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
14374 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
14375 + .fill 4,8,0
14376
14377 -NEXT_PAGE(level1_fixmap_pgt)
14378 +NEXT_PAGE(level1_vsyscall_pgt)
14379 .fill 512,8,0
14380
14381 -NEXT_PAGE(level2_ident_pgt)
14382 - /* Since I easily can, map the first 1G.
14383 + /* Since I easily can, map the first 2G.
14384 * Don't set NX because code runs from these pages.
14385 */
14386 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
14387 +NEXT_PAGE(level2_ident_pgt)
14388 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
14389
14390 NEXT_PAGE(level2_kernel_pgt)
14391 /*
14392 @@ -390,33 +417,55 @@ NEXT_PAGE(level2_kernel_pgt)
14393 * If you want to increase this then increase MODULES_VADDR
14394 * too.)
14395 */
14396 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
14397 - KERNEL_IMAGE_SIZE/PMD_SIZE)
14398 -
14399 -NEXT_PAGE(level2_spare_pgt)
14400 - .fill 512, 8, 0
14401 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
14402
14403 #undef PMDS
14404 #undef NEXT_PAGE
14405
14406 - .data
14407 + .align PAGE_SIZE
14408 +ENTRY(cpu_gdt_table)
14409 + .rept NR_CPUS
14410 + .quad 0x0000000000000000 /* NULL descriptor */
14411 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
14412 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
14413 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
14414 + .quad 0x00cffb000000ffff /* __USER32_CS */
14415 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
14416 + .quad 0x00affb000000ffff /* __USER_CS */
14417 +
14418 +#ifdef CONFIG_PAX_KERNEXEC
14419 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
14420 +#else
14421 + .quad 0x0 /* unused */
14422 +#endif
14423 +
14424 + .quad 0,0 /* TSS */
14425 + .quad 0,0 /* LDT */
14426 + .quad 0,0,0 /* three TLS descriptors */
14427 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
14428 + /* asm/segment.h:GDT_ENTRIES must match this */
14429 +
14430 + /* zero the remaining page */
14431 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
14432 + .endr
14433 +
14434 .align 16
14435 .globl early_gdt_descr
14436 early_gdt_descr:
14437 .word GDT_ENTRIES*8-1
14438 early_gdt_descr_base:
14439 - .quad INIT_PER_CPU_VAR(gdt_page)
14440 + .quad cpu_gdt_table
14441
14442 ENTRY(phys_base)
14443 /* This must match the first entry in level2_kernel_pgt */
14444 .quad 0x0000000000000000
14445
14446 #include "../../x86/xen/xen-head.S"
14447 -
14448 - .section .bss, "aw", @nobits
14449 +
14450 + .section .rodata,"a",@progbits
14451 .align L1_CACHE_BYTES
14452 ENTRY(idt_table)
14453 - .skip IDT_ENTRIES * 16
14454 + .fill 512,8,0
14455
14456 __PAGE_ALIGNED_BSS
14457 .align PAGE_SIZE
14458 diff -urNp linux-2.6.32.41/arch/x86/kernel/i386_ksyms_32.c linux-2.6.32.41/arch/x86/kernel/i386_ksyms_32.c
14459 --- linux-2.6.32.41/arch/x86/kernel/i386_ksyms_32.c 2011-03-27 14:31:47.000000000 -0400
14460 +++ linux-2.6.32.41/arch/x86/kernel/i386_ksyms_32.c 2011-04-17 15:56:46.000000000 -0400
14461 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
14462 EXPORT_SYMBOL(cmpxchg8b_emu);
14463 #endif
14464
14465 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
14466 +
14467 /* Networking helper routines. */
14468 EXPORT_SYMBOL(csum_partial_copy_generic);
14469 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
14470 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
14471
14472 EXPORT_SYMBOL(__get_user_1);
14473 EXPORT_SYMBOL(__get_user_2);
14474 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
14475
14476 EXPORT_SYMBOL(csum_partial);
14477 EXPORT_SYMBOL(empty_zero_page);
14478 +
14479 +#ifdef CONFIG_PAX_KERNEXEC
14480 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
14481 +#endif
14482 diff -urNp linux-2.6.32.41/arch/x86/kernel/i8259.c linux-2.6.32.41/arch/x86/kernel/i8259.c
14483 --- linux-2.6.32.41/arch/x86/kernel/i8259.c 2011-03-27 14:31:47.000000000 -0400
14484 +++ linux-2.6.32.41/arch/x86/kernel/i8259.c 2011-05-04 17:56:28.000000000 -0400
14485 @@ -208,7 +208,7 @@ spurious_8259A_irq:
14486 "spurious 8259A interrupt: IRQ%d.\n", irq);
14487 spurious_irq_mask |= irqmask;
14488 }
14489 - atomic_inc(&irq_err_count);
14490 + atomic_inc_unchecked(&irq_err_count);
14491 /*
14492 * Theoretically we do not have to handle this IRQ,
14493 * but in Linux this does not cause problems and is
14494 diff -urNp linux-2.6.32.41/arch/x86/kernel/init_task.c linux-2.6.32.41/arch/x86/kernel/init_task.c
14495 --- linux-2.6.32.41/arch/x86/kernel/init_task.c 2011-03-27 14:31:47.000000000 -0400
14496 +++ linux-2.6.32.41/arch/x86/kernel/init_task.c 2011-04-17 15:56:46.000000000 -0400
14497 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
14498 * way process stacks are handled. This is done by having a special
14499 * "init_task" linker map entry..
14500 */
14501 -union thread_union init_thread_union __init_task_data =
14502 - { INIT_THREAD_INFO(init_task) };
14503 +union thread_union init_thread_union __init_task_data;
14504
14505 /*
14506 * Initial task structure.
14507 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
14508 * section. Since TSS's are completely CPU-local, we want them
14509 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
14510 */
14511 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
14512 -
14513 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
14514 +EXPORT_SYMBOL(init_tss);
14515 diff -urNp linux-2.6.32.41/arch/x86/kernel/ioport.c linux-2.6.32.41/arch/x86/kernel/ioport.c
14516 --- linux-2.6.32.41/arch/x86/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
14517 +++ linux-2.6.32.41/arch/x86/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
14518 @@ -6,6 +6,7 @@
14519 #include <linux/sched.h>
14520 #include <linux/kernel.h>
14521 #include <linux/capability.h>
14522 +#include <linux/security.h>
14523 #include <linux/errno.h>
14524 #include <linux/types.h>
14525 #include <linux/ioport.h>
14526 @@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long
14527
14528 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
14529 return -EINVAL;
14530 +#ifdef CONFIG_GRKERNSEC_IO
14531 + if (turn_on && grsec_disable_privio) {
14532 + gr_handle_ioperm();
14533 + return -EPERM;
14534 + }
14535 +#endif
14536 if (turn_on && !capable(CAP_SYS_RAWIO))
14537 return -EPERM;
14538
14539 @@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long
14540 * because the ->io_bitmap_max value must match the bitmap
14541 * contents:
14542 */
14543 - tss = &per_cpu(init_tss, get_cpu());
14544 + tss = init_tss + get_cpu();
14545
14546 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
14547
14548 @@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, s
14549 return -EINVAL;
14550 /* Trying to gain more privileges? */
14551 if (level > old) {
14552 +#ifdef CONFIG_GRKERNSEC_IO
14553 + if (grsec_disable_privio) {
14554 + gr_handle_iopl();
14555 + return -EPERM;
14556 + }
14557 +#endif
14558 if (!capable(CAP_SYS_RAWIO))
14559 return -EPERM;
14560 }
14561 diff -urNp linux-2.6.32.41/arch/x86/kernel/irq_32.c linux-2.6.32.41/arch/x86/kernel/irq_32.c
14562 --- linux-2.6.32.41/arch/x86/kernel/irq_32.c 2011-03-27 14:31:47.000000000 -0400
14563 +++ linux-2.6.32.41/arch/x86/kernel/irq_32.c 2011-04-23 13:26:46.000000000 -0400
14564 @@ -35,7 +35,7 @@ static int check_stack_overflow(void)
14565 __asm__ __volatile__("andl %%esp,%0" :
14566 "=r" (sp) : "0" (THREAD_SIZE - 1));
14567
14568 - return sp < (sizeof(struct thread_info) + STACK_WARN);
14569 + return sp < STACK_WARN;
14570 }
14571
14572 static void print_stack_overflow(void)
14573 @@ -54,9 +54,9 @@ static inline void print_stack_overflow(
14574 * per-CPU IRQ handling contexts (thread information and stack)
14575 */
14576 union irq_ctx {
14577 - struct thread_info tinfo;
14578 - u32 stack[THREAD_SIZE/sizeof(u32)];
14579 -} __attribute__((aligned(PAGE_SIZE)));
14580 + unsigned long previous_esp;
14581 + u32 stack[THREAD_SIZE/sizeof(u32)];
14582 +} __attribute__((aligned(THREAD_SIZE)));
14583
14584 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
14585 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
14586 @@ -78,10 +78,9 @@ static void call_on_stack(void *func, vo
14587 static inline int
14588 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
14589 {
14590 - union irq_ctx *curctx, *irqctx;
14591 + union irq_ctx *irqctx;
14592 u32 *isp, arg1, arg2;
14593
14594 - curctx = (union irq_ctx *) current_thread_info();
14595 irqctx = __get_cpu_var(hardirq_ctx);
14596
14597 /*
14598 @@ -90,21 +89,17 @@ execute_on_irq_stack(int overflow, struc
14599 * handler) we can't do that and just have to keep using the
14600 * current stack (which is the irq stack already after all)
14601 */
14602 - if (unlikely(curctx == irqctx))
14603 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
14604 return 0;
14605
14606 /* build the stack frame on the IRQ stack */
14607 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
14608 - irqctx->tinfo.task = curctx->tinfo.task;
14609 - irqctx->tinfo.previous_esp = current_stack_pointer;
14610 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
14611 + irqctx->previous_esp = current_stack_pointer;
14612 + add_preempt_count(HARDIRQ_OFFSET);
14613
14614 - /*
14615 - * Copy the softirq bits in preempt_count so that the
14616 - * softirq checks work in the hardirq context.
14617 - */
14618 - irqctx->tinfo.preempt_count =
14619 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
14620 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
14621 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14622 + __set_fs(MAKE_MM_SEG(0));
14623 +#endif
14624
14625 if (unlikely(overflow))
14626 call_on_stack(print_stack_overflow, isp);
14627 @@ -116,6 +111,12 @@ execute_on_irq_stack(int overflow, struc
14628 : "0" (irq), "1" (desc), "2" (isp),
14629 "D" (desc->handle_irq)
14630 : "memory", "cc", "ecx");
14631 +
14632 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14633 + __set_fs(current_thread_info()->addr_limit);
14634 +#endif
14635 +
14636 + sub_preempt_count(HARDIRQ_OFFSET);
14637 return 1;
14638 }
14639
14640 @@ -124,28 +125,11 @@ execute_on_irq_stack(int overflow, struc
14641 */
14642 void __cpuinit irq_ctx_init(int cpu)
14643 {
14644 - union irq_ctx *irqctx;
14645 -
14646 if (per_cpu(hardirq_ctx, cpu))
14647 return;
14648
14649 - irqctx = &per_cpu(hardirq_stack, cpu);
14650 - irqctx->tinfo.task = NULL;
14651 - irqctx->tinfo.exec_domain = NULL;
14652 - irqctx->tinfo.cpu = cpu;
14653 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
14654 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
14655 -
14656 - per_cpu(hardirq_ctx, cpu) = irqctx;
14657 -
14658 - irqctx = &per_cpu(softirq_stack, cpu);
14659 - irqctx->tinfo.task = NULL;
14660 - irqctx->tinfo.exec_domain = NULL;
14661 - irqctx->tinfo.cpu = cpu;
14662 - irqctx->tinfo.preempt_count = 0;
14663 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
14664 -
14665 - per_cpu(softirq_ctx, cpu) = irqctx;
14666 + per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
14667 + per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
14668
14669 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
14670 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
14671 @@ -159,7 +143,6 @@ void irq_ctx_exit(int cpu)
14672 asmlinkage void do_softirq(void)
14673 {
14674 unsigned long flags;
14675 - struct thread_info *curctx;
14676 union irq_ctx *irqctx;
14677 u32 *isp;
14678
14679 @@ -169,15 +152,22 @@ asmlinkage void do_softirq(void)
14680 local_irq_save(flags);
14681
14682 if (local_softirq_pending()) {
14683 - curctx = current_thread_info();
14684 irqctx = __get_cpu_var(softirq_ctx);
14685 - irqctx->tinfo.task = curctx->task;
14686 - irqctx->tinfo.previous_esp = current_stack_pointer;
14687 + irqctx->previous_esp = current_stack_pointer;
14688
14689 /* build the stack frame on the softirq stack */
14690 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
14691 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
14692 +
14693 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14694 + __set_fs(MAKE_MM_SEG(0));
14695 +#endif
14696
14697 call_on_stack(__do_softirq, isp);
14698 +
14699 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14700 + __set_fs(current_thread_info()->addr_limit);
14701 +#endif
14702 +
14703 /*
14704 * Shouldnt happen, we returned above if in_interrupt():
14705 */
14706 diff -urNp linux-2.6.32.41/arch/x86/kernel/irq.c linux-2.6.32.41/arch/x86/kernel/irq.c
14707 --- linux-2.6.32.41/arch/x86/kernel/irq.c 2011-03-27 14:31:47.000000000 -0400
14708 +++ linux-2.6.32.41/arch/x86/kernel/irq.c 2011-05-04 17:56:28.000000000 -0400
14709 @@ -15,7 +15,7 @@
14710 #include <asm/mce.h>
14711 #include <asm/hw_irq.h>
14712
14713 -atomic_t irq_err_count;
14714 +atomic_unchecked_t irq_err_count;
14715
14716 /* Function pointer for generic interrupt vector handling */
14717 void (*generic_interrupt_extension)(void) = NULL;
14718 @@ -114,9 +114,9 @@ static int show_other_interrupts(struct
14719 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
14720 seq_printf(p, " Machine check polls\n");
14721 #endif
14722 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
14723 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
14724 #if defined(CONFIG_X86_IO_APIC)
14725 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
14726 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
14727 #endif
14728 return 0;
14729 }
14730 @@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
14731
14732 u64 arch_irq_stat(void)
14733 {
14734 - u64 sum = atomic_read(&irq_err_count);
14735 + u64 sum = atomic_read_unchecked(&irq_err_count);
14736
14737 #ifdef CONFIG_X86_IO_APIC
14738 - sum += atomic_read(&irq_mis_count);
14739 + sum += atomic_read_unchecked(&irq_mis_count);
14740 #endif
14741 return sum;
14742 }
14743 diff -urNp linux-2.6.32.41/arch/x86/kernel/kgdb.c linux-2.6.32.41/arch/x86/kernel/kgdb.c
14744 --- linux-2.6.32.41/arch/x86/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
14745 +++ linux-2.6.32.41/arch/x86/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
14746 @@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vec
14747
14748 /* clear the trace bit */
14749 linux_regs->flags &= ~X86_EFLAGS_TF;
14750 - atomic_set(&kgdb_cpu_doing_single_step, -1);
14751 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
14752
14753 /* set the trace bit if we're stepping */
14754 if (remcomInBuffer[0] == 's') {
14755 linux_regs->flags |= X86_EFLAGS_TF;
14756 kgdb_single_step = 1;
14757 - atomic_set(&kgdb_cpu_doing_single_step,
14758 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
14759 raw_smp_processor_id());
14760 }
14761
14762 @@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args
14763 break;
14764
14765 case DIE_DEBUG:
14766 - if (atomic_read(&kgdb_cpu_doing_single_step) ==
14767 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
14768 raw_smp_processor_id()) {
14769 if (user_mode(regs))
14770 return single_step_cont(regs, args);
14771 @@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception
14772 return instruction_pointer(regs);
14773 }
14774
14775 -struct kgdb_arch arch_kgdb_ops = {
14776 +const struct kgdb_arch arch_kgdb_ops = {
14777 /* Breakpoint instruction: */
14778 .gdb_bpt_instr = { 0xcc },
14779 .flags = KGDB_HW_BREAKPOINT,
14780 diff -urNp linux-2.6.32.41/arch/x86/kernel/kprobes.c linux-2.6.32.41/arch/x86/kernel/kprobes.c
14781 --- linux-2.6.32.41/arch/x86/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
14782 +++ linux-2.6.32.41/arch/x86/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
14783 @@ -166,9 +166,13 @@ static void __kprobes set_jmp_op(void *f
14784 char op;
14785 s32 raddr;
14786 } __attribute__((packed)) * jop;
14787 - jop = (struct __arch_jmp_op *)from;
14788 +
14789 + jop = (struct __arch_jmp_op *)(ktla_ktva(from));
14790 +
14791 + pax_open_kernel();
14792 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
14793 jop->op = RELATIVEJUMP_INSTRUCTION;
14794 + pax_close_kernel();
14795 }
14796
14797 /*
14798 @@ -193,7 +197,7 @@ static int __kprobes can_boost(kprobe_op
14799 kprobe_opcode_t opcode;
14800 kprobe_opcode_t *orig_opcodes = opcodes;
14801
14802 - if (search_exception_tables((unsigned long)opcodes))
14803 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
14804 return 0; /* Page fault may occur on this address. */
14805
14806 retry:
14807 @@ -337,7 +341,9 @@ static void __kprobes fix_riprel(struct
14808 disp = (u8 *) p->addr + *((s32 *) insn) -
14809 (u8 *) p->ainsn.insn;
14810 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
14811 + pax_open_kernel();
14812 *(s32 *)insn = (s32) disp;
14813 + pax_close_kernel();
14814 }
14815 }
14816 #endif
14817 @@ -345,16 +351,18 @@ static void __kprobes fix_riprel(struct
14818
14819 static void __kprobes arch_copy_kprobe(struct kprobe *p)
14820 {
14821 - memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
14822 + pax_open_kernel();
14823 + memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
14824 + pax_close_kernel();
14825
14826 fix_riprel(p);
14827
14828 - if (can_boost(p->addr))
14829 + if (can_boost(ktla_ktva(p->addr)))
14830 p->ainsn.boostable = 0;
14831 else
14832 p->ainsn.boostable = -1;
14833
14834 - p->opcode = *p->addr;
14835 + p->opcode = *(ktla_ktva(p->addr));
14836 }
14837
14838 int __kprobes arch_prepare_kprobe(struct kprobe *p)
14839 @@ -432,7 +440,7 @@ static void __kprobes prepare_singlestep
14840 if (p->opcode == BREAKPOINT_INSTRUCTION)
14841 regs->ip = (unsigned long)p->addr;
14842 else
14843 - regs->ip = (unsigned long)p->ainsn.insn;
14844 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
14845 }
14846
14847 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
14848 @@ -453,7 +461,7 @@ static void __kprobes setup_singlestep(s
14849 if (p->ainsn.boostable == 1 && !p->post_handler) {
14850 /* Boost up -- we can execute copied instructions directly */
14851 reset_current_kprobe();
14852 - regs->ip = (unsigned long)p->ainsn.insn;
14853 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
14854 preempt_enable_no_resched();
14855 return;
14856 }
14857 @@ -523,7 +531,7 @@ static int __kprobes kprobe_handler(stru
14858 struct kprobe_ctlblk *kcb;
14859
14860 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
14861 - if (*addr != BREAKPOINT_INSTRUCTION) {
14862 + if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
14863 /*
14864 * The breakpoint instruction was removed right
14865 * after we hit it. Another cpu has removed
14866 @@ -775,7 +783,7 @@ static void __kprobes resume_execution(s
14867 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
14868 {
14869 unsigned long *tos = stack_addr(regs);
14870 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
14871 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
14872 unsigned long orig_ip = (unsigned long)p->addr;
14873 kprobe_opcode_t *insn = p->ainsn.insn;
14874
14875 @@ -958,7 +966,7 @@ int __kprobes kprobe_exceptions_notify(s
14876 struct die_args *args = data;
14877 int ret = NOTIFY_DONE;
14878
14879 - if (args->regs && user_mode_vm(args->regs))
14880 + if (args->regs && user_mode(args->regs))
14881 return ret;
14882
14883 switch (val) {
14884 diff -urNp linux-2.6.32.41/arch/x86/kernel/ldt.c linux-2.6.32.41/arch/x86/kernel/ldt.c
14885 --- linux-2.6.32.41/arch/x86/kernel/ldt.c 2011-03-27 14:31:47.000000000 -0400
14886 +++ linux-2.6.32.41/arch/x86/kernel/ldt.c 2011-04-17 15:56:46.000000000 -0400
14887 @@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, i
14888 if (reload) {
14889 #ifdef CONFIG_SMP
14890 preempt_disable();
14891 - load_LDT(pc);
14892 + load_LDT_nolock(pc);
14893 if (!cpumask_equal(mm_cpumask(current->mm),
14894 cpumask_of(smp_processor_id())))
14895 smp_call_function(flush_ldt, current->mm, 1);
14896 preempt_enable();
14897 #else
14898 - load_LDT(pc);
14899 + load_LDT_nolock(pc);
14900 #endif
14901 }
14902 if (oldsize) {
14903 @@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t
14904 return err;
14905
14906 for (i = 0; i < old->size; i++)
14907 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
14908 + write_ldt_entry(new->ldt, i, old->ldt + i);
14909 return 0;
14910 }
14911
14912 @@ -115,6 +115,24 @@ int init_new_context(struct task_struct
14913 retval = copy_ldt(&mm->context, &old_mm->context);
14914 mutex_unlock(&old_mm->context.lock);
14915 }
14916 +
14917 + if (tsk == current) {
14918 + mm->context.vdso = 0;
14919 +
14920 +#ifdef CONFIG_X86_32
14921 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
14922 + mm->context.user_cs_base = 0UL;
14923 + mm->context.user_cs_limit = ~0UL;
14924 +
14925 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
14926 + cpus_clear(mm->context.cpu_user_cs_mask);
14927 +#endif
14928 +
14929 +#endif
14930 +#endif
14931 +
14932 + }
14933 +
14934 return retval;
14935 }
14936
14937 @@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, u
14938 }
14939 }
14940
14941 +#ifdef CONFIG_PAX_SEGMEXEC
14942 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
14943 + error = -EINVAL;
14944 + goto out_unlock;
14945 + }
14946 +#endif
14947 +
14948 fill_ldt(&ldt, &ldt_info);
14949 if (oldmode)
14950 ldt.avl = 0;
14951 diff -urNp linux-2.6.32.41/arch/x86/kernel/machine_kexec_32.c linux-2.6.32.41/arch/x86/kernel/machine_kexec_32.c
14952 --- linux-2.6.32.41/arch/x86/kernel/machine_kexec_32.c 2011-03-27 14:31:47.000000000 -0400
14953 +++ linux-2.6.32.41/arch/x86/kernel/machine_kexec_32.c 2011-04-17 15:56:46.000000000 -0400
14954 @@ -26,7 +26,7 @@
14955 #include <asm/system.h>
14956 #include <asm/cacheflush.h>
14957
14958 -static void set_idt(void *newidt, __u16 limit)
14959 +static void set_idt(struct desc_struct *newidt, __u16 limit)
14960 {
14961 struct desc_ptr curidt;
14962
14963 @@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16
14964 }
14965
14966
14967 -static void set_gdt(void *newgdt, __u16 limit)
14968 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
14969 {
14970 struct desc_ptr curgdt;
14971
14972 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
14973 }
14974
14975 control_page = page_address(image->control_code_page);
14976 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
14977 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
14978
14979 relocate_kernel_ptr = control_page;
14980 page_list[PA_CONTROL_PAGE] = __pa(control_page);
14981 diff -urNp linux-2.6.32.41/arch/x86/kernel/microcode_amd.c linux-2.6.32.41/arch/x86/kernel/microcode_amd.c
14982 --- linux-2.6.32.41/arch/x86/kernel/microcode_amd.c 2011-04-17 17:00:52.000000000 -0400
14983 +++ linux-2.6.32.41/arch/x86/kernel/microcode_amd.c 2011-04-17 17:03:05.000000000 -0400
14984 @@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int c
14985 uci->mc = NULL;
14986 }
14987
14988 -static struct microcode_ops microcode_amd_ops = {
14989 +static const struct microcode_ops microcode_amd_ops = {
14990 .request_microcode_user = request_microcode_user,
14991 .request_microcode_fw = request_microcode_fw,
14992 .collect_cpu_info = collect_cpu_info_amd,
14993 @@ -372,7 +372,7 @@ static struct microcode_ops microcode_am
14994 .microcode_fini_cpu = microcode_fini_cpu_amd,
14995 };
14996
14997 -struct microcode_ops * __init init_amd_microcode(void)
14998 +const struct microcode_ops * __init init_amd_microcode(void)
14999 {
15000 return &microcode_amd_ops;
15001 }
15002 diff -urNp linux-2.6.32.41/arch/x86/kernel/microcode_core.c linux-2.6.32.41/arch/x86/kernel/microcode_core.c
15003 --- linux-2.6.32.41/arch/x86/kernel/microcode_core.c 2011-03-27 14:31:47.000000000 -0400
15004 +++ linux-2.6.32.41/arch/x86/kernel/microcode_core.c 2011-04-17 15:56:46.000000000 -0400
15005 @@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
15006
15007 #define MICROCODE_VERSION "2.00"
15008
15009 -static struct microcode_ops *microcode_ops;
15010 +static const struct microcode_ops *microcode_ops;
15011
15012 /*
15013 * Synchronization.
15014 diff -urNp linux-2.6.32.41/arch/x86/kernel/microcode_intel.c linux-2.6.32.41/arch/x86/kernel/microcode_intel.c
15015 --- linux-2.6.32.41/arch/x86/kernel/microcode_intel.c 2011-03-27 14:31:47.000000000 -0400
15016 +++ linux-2.6.32.41/arch/x86/kernel/microcode_intel.c 2011-04-17 15:56:46.000000000 -0400
15017 @@ -443,13 +443,13 @@ static enum ucode_state request_microcod
15018
15019 static int get_ucode_user(void *to, const void *from, size_t n)
15020 {
15021 - return copy_from_user(to, from, n);
15022 + return copy_from_user(to, (__force const void __user *)from, n);
15023 }
15024
15025 static enum ucode_state
15026 request_microcode_user(int cpu, const void __user *buf, size_t size)
15027 {
15028 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
15029 + return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
15030 }
15031
15032 static void microcode_fini_cpu(int cpu)
15033 @@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
15034 uci->mc = NULL;
15035 }
15036
15037 -static struct microcode_ops microcode_intel_ops = {
15038 +static const struct microcode_ops microcode_intel_ops = {
15039 .request_microcode_user = request_microcode_user,
15040 .request_microcode_fw = request_microcode_fw,
15041 .collect_cpu_info = collect_cpu_info,
15042 @@ -468,7 +468,7 @@ static struct microcode_ops microcode_in
15043 .microcode_fini_cpu = microcode_fini_cpu,
15044 };
15045
15046 -struct microcode_ops * __init init_intel_microcode(void)
15047 +const struct microcode_ops * __init init_intel_microcode(void)
15048 {
15049 return &microcode_intel_ops;
15050 }
15051 diff -urNp linux-2.6.32.41/arch/x86/kernel/module.c linux-2.6.32.41/arch/x86/kernel/module.c
15052 --- linux-2.6.32.41/arch/x86/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
15053 +++ linux-2.6.32.41/arch/x86/kernel/module.c 2011-04-17 15:56:46.000000000 -0400
15054 @@ -34,7 +34,7 @@
15055 #define DEBUGP(fmt...)
15056 #endif
15057
15058 -void *module_alloc(unsigned long size)
15059 +static void *__module_alloc(unsigned long size, pgprot_t prot)
15060 {
15061 struct vm_struct *area;
15062
15063 @@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
15064 if (!area)
15065 return NULL;
15066
15067 - return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
15068 - PAGE_KERNEL_EXEC);
15069 + return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
15070 +}
15071 +
15072 +void *module_alloc(unsigned long size)
15073 +{
15074 +
15075 +#ifdef CONFIG_PAX_KERNEXEC
15076 + return __module_alloc(size, PAGE_KERNEL);
15077 +#else
15078 + return __module_alloc(size, PAGE_KERNEL_EXEC);
15079 +#endif
15080 +
15081 }
15082
15083 /* Free memory returned from module_alloc */
15084 @@ -58,6 +68,40 @@ void module_free(struct module *mod, voi
15085 vfree(module_region);
15086 }
15087
15088 +#ifdef CONFIG_PAX_KERNEXEC
15089 +#ifdef CONFIG_X86_32
15090 +void *module_alloc_exec(unsigned long size)
15091 +{
15092 + struct vm_struct *area;
15093 +
15094 + if (size == 0)
15095 + return NULL;
15096 +
15097 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
15098 + return area ? area->addr : NULL;
15099 +}
15100 +EXPORT_SYMBOL(module_alloc_exec);
15101 +
15102 +void module_free_exec(struct module *mod, void *module_region)
15103 +{
15104 + vunmap(module_region);
15105 +}
15106 +EXPORT_SYMBOL(module_free_exec);
15107 +#else
15108 +void module_free_exec(struct module *mod, void *module_region)
15109 +{
15110 + module_free(mod, module_region);
15111 +}
15112 +EXPORT_SYMBOL(module_free_exec);
15113 +
15114 +void *module_alloc_exec(unsigned long size)
15115 +{
15116 + return __module_alloc(size, PAGE_KERNEL_RX);
15117 +}
15118 +EXPORT_SYMBOL(module_alloc_exec);
15119 +#endif
15120 +#endif
15121 +
15122 /* We don't need anything special. */
15123 int module_frob_arch_sections(Elf_Ehdr *hdr,
15124 Elf_Shdr *sechdrs,
15125 @@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15126 unsigned int i;
15127 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
15128 Elf32_Sym *sym;
15129 - uint32_t *location;
15130 + uint32_t *plocation, location;
15131
15132 DEBUGP("Applying relocate section %u to %u\n", relsec,
15133 sechdrs[relsec].sh_info);
15134 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
15135 /* This is where to make the change */
15136 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
15137 - + rel[i].r_offset;
15138 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
15139 + location = (uint32_t)plocation;
15140 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
15141 + plocation = ktla_ktva((void *)plocation);
15142 /* This is the symbol it is referring to. Note that all
15143 undefined symbols have been resolved. */
15144 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
15145 @@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15146 switch (ELF32_R_TYPE(rel[i].r_info)) {
15147 case R_386_32:
15148 /* We add the value into the location given */
15149 - *location += sym->st_value;
15150 + pax_open_kernel();
15151 + *plocation += sym->st_value;
15152 + pax_close_kernel();
15153 break;
15154 case R_386_PC32:
15155 /* Add the value, subtract its postition */
15156 - *location += sym->st_value - (uint32_t)location;
15157 + pax_open_kernel();
15158 + *plocation += sym->st_value - location;
15159 + pax_close_kernel();
15160 break;
15161 default:
15162 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
15163 @@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
15164 case R_X86_64_NONE:
15165 break;
15166 case R_X86_64_64:
15167 + pax_open_kernel();
15168 *(u64 *)loc = val;
15169 + pax_close_kernel();
15170 break;
15171 case R_X86_64_32:
15172 + pax_open_kernel();
15173 *(u32 *)loc = val;
15174 + pax_close_kernel();
15175 if (val != *(u32 *)loc)
15176 goto overflow;
15177 break;
15178 case R_X86_64_32S:
15179 + pax_open_kernel();
15180 *(s32 *)loc = val;
15181 + pax_close_kernel();
15182 if ((s64)val != *(s32 *)loc)
15183 goto overflow;
15184 break;
15185 case R_X86_64_PC32:
15186 val -= (u64)loc;
15187 + pax_open_kernel();
15188 *(u32 *)loc = val;
15189 + pax_close_kernel();
15190 +
15191 #if 0
15192 if ((s64)val != *(s32 *)loc)
15193 goto overflow;
15194 diff -urNp linux-2.6.32.41/arch/x86/kernel/paravirt.c linux-2.6.32.41/arch/x86/kernel/paravirt.c
15195 --- linux-2.6.32.41/arch/x86/kernel/paravirt.c 2011-03-27 14:31:47.000000000 -0400
15196 +++ linux-2.6.32.41/arch/x86/kernel/paravirt.c 2011-05-16 21:46:57.000000000 -0400
15197 @@ -122,7 +122,7 @@ unsigned paravirt_patch_jmp(void *insnbu
15198 * corresponding structure. */
15199 static void *get_call_destination(u8 type)
15200 {
15201 - struct paravirt_patch_template tmpl = {
15202 + const struct paravirt_patch_template tmpl = {
15203 .pv_init_ops = pv_init_ops,
15204 .pv_time_ops = pv_time_ops,
15205 .pv_cpu_ops = pv_cpu_ops,
15206 @@ -133,6 +133,9 @@ static void *get_call_destination(u8 typ
15207 .pv_lock_ops = pv_lock_ops,
15208 #endif
15209 };
15210 +
15211 + pax_track_stack();
15212 +
15213 return *((void **)&tmpl + type);
15214 }
15215
15216 @@ -145,14 +148,14 @@ unsigned paravirt_patch_default(u8 type,
15217 if (opfunc == NULL)
15218 /* If there's no function, patch it with a ud2a (BUG) */
15219 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
15220 - else if (opfunc == _paravirt_nop)
15221 + else if (opfunc == (void *)_paravirt_nop)
15222 /* If the operation is a nop, then nop the callsite */
15223 ret = paravirt_patch_nop();
15224
15225 /* identity functions just return their single argument */
15226 - else if (opfunc == _paravirt_ident_32)
15227 + else if (opfunc == (void *)_paravirt_ident_32)
15228 ret = paravirt_patch_ident_32(insnbuf, len);
15229 - else if (opfunc == _paravirt_ident_64)
15230 + else if (opfunc == (void *)_paravirt_ident_64)
15231 ret = paravirt_patch_ident_64(insnbuf, len);
15232
15233 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
15234 @@ -178,7 +181,7 @@ unsigned paravirt_patch_insns(void *insn
15235 if (insn_len > len || start == NULL)
15236 insn_len = len;
15237 else
15238 - memcpy(insnbuf, start, insn_len);
15239 + memcpy(insnbuf, ktla_ktva(start), insn_len);
15240
15241 return insn_len;
15242 }
15243 @@ -294,22 +297,22 @@ void arch_flush_lazy_mmu_mode(void)
15244 preempt_enable();
15245 }
15246
15247 -struct pv_info pv_info = {
15248 +struct pv_info pv_info __read_only = {
15249 .name = "bare hardware",
15250 .paravirt_enabled = 0,
15251 .kernel_rpl = 0,
15252 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
15253 };
15254
15255 -struct pv_init_ops pv_init_ops = {
15256 +struct pv_init_ops pv_init_ops __read_only = {
15257 .patch = native_patch,
15258 };
15259
15260 -struct pv_time_ops pv_time_ops = {
15261 +struct pv_time_ops pv_time_ops __read_only = {
15262 .sched_clock = native_sched_clock,
15263 };
15264
15265 -struct pv_irq_ops pv_irq_ops = {
15266 +struct pv_irq_ops pv_irq_ops __read_only = {
15267 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
15268 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
15269 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
15270 @@ -321,7 +324,7 @@ struct pv_irq_ops pv_irq_ops = {
15271 #endif
15272 };
15273
15274 -struct pv_cpu_ops pv_cpu_ops = {
15275 +struct pv_cpu_ops pv_cpu_ops __read_only = {
15276 .cpuid = native_cpuid,
15277 .get_debugreg = native_get_debugreg,
15278 .set_debugreg = native_set_debugreg,
15279 @@ -382,7 +385,7 @@ struct pv_cpu_ops pv_cpu_ops = {
15280 .end_context_switch = paravirt_nop,
15281 };
15282
15283 -struct pv_apic_ops pv_apic_ops = {
15284 +struct pv_apic_ops pv_apic_ops __read_only = {
15285 #ifdef CONFIG_X86_LOCAL_APIC
15286 .startup_ipi_hook = paravirt_nop,
15287 #endif
15288 @@ -396,7 +399,7 @@ struct pv_apic_ops pv_apic_ops = {
15289 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
15290 #endif
15291
15292 -struct pv_mmu_ops pv_mmu_ops = {
15293 +struct pv_mmu_ops pv_mmu_ops __read_only = {
15294
15295 .read_cr2 = native_read_cr2,
15296 .write_cr2 = native_write_cr2,
15297 @@ -467,6 +470,12 @@ struct pv_mmu_ops pv_mmu_ops = {
15298 },
15299
15300 .set_fixmap = native_set_fixmap,
15301 +
15302 +#ifdef CONFIG_PAX_KERNEXEC
15303 + .pax_open_kernel = native_pax_open_kernel,
15304 + .pax_close_kernel = native_pax_close_kernel,
15305 +#endif
15306 +
15307 };
15308
15309 EXPORT_SYMBOL_GPL(pv_time_ops);
15310 diff -urNp linux-2.6.32.41/arch/x86/kernel/paravirt-spinlocks.c linux-2.6.32.41/arch/x86/kernel/paravirt-spinlocks.c
15311 --- linux-2.6.32.41/arch/x86/kernel/paravirt-spinlocks.c 2011-03-27 14:31:47.000000000 -0400
15312 +++ linux-2.6.32.41/arch/x86/kernel/paravirt-spinlocks.c 2011-04-17 15:56:46.000000000 -0400
15313 @@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *
15314 __raw_spin_lock(lock);
15315 }
15316
15317 -struct pv_lock_ops pv_lock_ops = {
15318 +struct pv_lock_ops pv_lock_ops __read_only = {
15319 #ifdef CONFIG_SMP
15320 .spin_is_locked = __ticket_spin_is_locked,
15321 .spin_is_contended = __ticket_spin_is_contended,
15322 diff -urNp linux-2.6.32.41/arch/x86/kernel/pci-calgary_64.c linux-2.6.32.41/arch/x86/kernel/pci-calgary_64.c
15323 --- linux-2.6.32.41/arch/x86/kernel/pci-calgary_64.c 2011-03-27 14:31:47.000000000 -0400
15324 +++ linux-2.6.32.41/arch/x86/kernel/pci-calgary_64.c 2011-04-17 15:56:46.000000000 -0400
15325 @@ -477,7 +477,7 @@ static void calgary_free_coherent(struct
15326 free_pages((unsigned long)vaddr, get_order(size));
15327 }
15328
15329 -static struct dma_map_ops calgary_dma_ops = {
15330 +static const struct dma_map_ops calgary_dma_ops = {
15331 .alloc_coherent = calgary_alloc_coherent,
15332 .free_coherent = calgary_free_coherent,
15333 .map_sg = calgary_map_sg,
15334 diff -urNp linux-2.6.32.41/arch/x86/kernel/pci-dma.c linux-2.6.32.41/arch/x86/kernel/pci-dma.c
15335 --- linux-2.6.32.41/arch/x86/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
15336 +++ linux-2.6.32.41/arch/x86/kernel/pci-dma.c 2011-04-17 15:56:46.000000000 -0400
15337 @@ -14,7 +14,7 @@
15338
15339 static int forbid_dac __read_mostly;
15340
15341 -struct dma_map_ops *dma_ops;
15342 +const struct dma_map_ops *dma_ops;
15343 EXPORT_SYMBOL(dma_ops);
15344
15345 static int iommu_sac_force __read_mostly;
15346 @@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
15347
15348 int dma_supported(struct device *dev, u64 mask)
15349 {
15350 - struct dma_map_ops *ops = get_dma_ops(dev);
15351 + const struct dma_map_ops *ops = get_dma_ops(dev);
15352
15353 #ifdef CONFIG_PCI
15354 if (mask > 0xffffffff && forbid_dac > 0) {
15355 diff -urNp linux-2.6.32.41/arch/x86/kernel/pci-gart_64.c linux-2.6.32.41/arch/x86/kernel/pci-gart_64.c
15356 --- linux-2.6.32.41/arch/x86/kernel/pci-gart_64.c 2011-03-27 14:31:47.000000000 -0400
15357 +++ linux-2.6.32.41/arch/x86/kernel/pci-gart_64.c 2011-04-17 15:56:46.000000000 -0400
15358 @@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct ag
15359 return -1;
15360 }
15361
15362 -static struct dma_map_ops gart_dma_ops = {
15363 +static const struct dma_map_ops gart_dma_ops = {
15364 .map_sg = gart_map_sg,
15365 .unmap_sg = gart_unmap_sg,
15366 .map_page = gart_map_page,
15367 diff -urNp linux-2.6.32.41/arch/x86/kernel/pci-nommu.c linux-2.6.32.41/arch/x86/kernel/pci-nommu.c
15368 --- linux-2.6.32.41/arch/x86/kernel/pci-nommu.c 2011-03-27 14:31:47.000000000 -0400
15369 +++ linux-2.6.32.41/arch/x86/kernel/pci-nommu.c 2011-04-17 15:56:46.000000000 -0400
15370 @@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(str
15371 flush_write_buffers();
15372 }
15373
15374 -struct dma_map_ops nommu_dma_ops = {
15375 +const struct dma_map_ops nommu_dma_ops = {
15376 .alloc_coherent = dma_generic_alloc_coherent,
15377 .free_coherent = nommu_free_coherent,
15378 .map_sg = nommu_map_sg,
15379 diff -urNp linux-2.6.32.41/arch/x86/kernel/pci-swiotlb.c linux-2.6.32.41/arch/x86/kernel/pci-swiotlb.c
15380 --- linux-2.6.32.41/arch/x86/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
15381 +++ linux-2.6.32.41/arch/x86/kernel/pci-swiotlb.c 2011-04-17 15:56:46.000000000 -0400
15382 @@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(
15383 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
15384 }
15385
15386 -static struct dma_map_ops swiotlb_dma_ops = {
15387 +static const struct dma_map_ops swiotlb_dma_ops = {
15388 .mapping_error = swiotlb_dma_mapping_error,
15389 .alloc_coherent = x86_swiotlb_alloc_coherent,
15390 .free_coherent = swiotlb_free_coherent,
15391 diff -urNp linux-2.6.32.41/arch/x86/kernel/process_32.c linux-2.6.32.41/arch/x86/kernel/process_32.c
15392 --- linux-2.6.32.41/arch/x86/kernel/process_32.c 2011-03-27 14:31:47.000000000 -0400
15393 +++ linux-2.6.32.41/arch/x86/kernel/process_32.c 2011-05-16 21:46:57.000000000 -0400
15394 @@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __as
15395 unsigned long thread_saved_pc(struct task_struct *tsk)
15396 {
15397 return ((unsigned long *)tsk->thread.sp)[3];
15398 +//XXX return tsk->thread.eip;
15399 }
15400
15401 #ifndef CONFIG_SMP
15402 @@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, i
15403 unsigned short ss, gs;
15404 const char *board;
15405
15406 - if (user_mode_vm(regs)) {
15407 + if (user_mode(regs)) {
15408 sp = regs->sp;
15409 ss = regs->ss & 0xffff;
15410 - gs = get_user_gs(regs);
15411 } else {
15412 sp = (unsigned long) (&regs->sp);
15413 savesegment(ss, ss);
15414 - savesegment(gs, gs);
15415 }
15416 + gs = get_user_gs(regs);
15417
15418 printk("\n");
15419
15420 @@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), voi
15421 regs.bx = (unsigned long) fn;
15422 regs.dx = (unsigned long) arg;
15423
15424 - regs.ds = __USER_DS;
15425 - regs.es = __USER_DS;
15426 + regs.ds = __KERNEL_DS;
15427 + regs.es = __KERNEL_DS;
15428 regs.fs = __KERNEL_PERCPU;
15429 - regs.gs = __KERNEL_STACK_CANARY;
15430 + savesegment(gs, regs.gs);
15431 regs.orig_ax = -1;
15432 regs.ip = (unsigned long) kernel_thread_helper;
15433 regs.cs = __KERNEL_CS | get_kernel_rpl();
15434 @@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flag
15435 struct task_struct *tsk;
15436 int err;
15437
15438 - childregs = task_pt_regs(p);
15439 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
15440 *childregs = *regs;
15441 childregs->ax = 0;
15442 childregs->sp = sp;
15443
15444 p->thread.sp = (unsigned long) childregs;
15445 p->thread.sp0 = (unsigned long) (childregs+1);
15446 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
15447
15448 p->thread.ip = (unsigned long) ret_from_fork;
15449
15450 @@ -346,7 +347,7 @@ __switch_to(struct task_struct *prev_p,
15451 struct thread_struct *prev = &prev_p->thread,
15452 *next = &next_p->thread;
15453 int cpu = smp_processor_id();
15454 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
15455 + struct tss_struct *tss = init_tss + cpu;
15456 bool preload_fpu;
15457
15458 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
15459 @@ -381,6 +382,10 @@ __switch_to(struct task_struct *prev_p,
15460 */
15461 lazy_save_gs(prev->gs);
15462
15463 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15464 + __set_fs(task_thread_info(next_p)->addr_limit);
15465 +#endif
15466 +
15467 /*
15468 * Load the per-thread Thread-Local Storage descriptor.
15469 */
15470 @@ -416,6 +421,9 @@ __switch_to(struct task_struct *prev_p,
15471 */
15472 arch_end_context_switch(next_p);
15473
15474 + percpu_write(current_task, next_p);
15475 + percpu_write(current_tinfo, &next_p->tinfo);
15476 +
15477 if (preload_fpu)
15478 __math_state_restore();
15479
15480 @@ -425,8 +433,6 @@ __switch_to(struct task_struct *prev_p,
15481 if (prev->gs | next->gs)
15482 lazy_load_gs(next->gs);
15483
15484 - percpu_write(current_task, next_p);
15485 -
15486 return prev_p;
15487 }
15488
15489 @@ -496,4 +502,3 @@ unsigned long get_wchan(struct task_stru
15490 } while (count++ < 16);
15491 return 0;
15492 }
15493 -
15494 diff -urNp linux-2.6.32.41/arch/x86/kernel/process_64.c linux-2.6.32.41/arch/x86/kernel/process_64.c
15495 --- linux-2.6.32.41/arch/x86/kernel/process_64.c 2011-03-27 14:31:47.000000000 -0400
15496 +++ linux-2.6.32.41/arch/x86/kernel/process_64.c 2011-05-16 21:46:57.000000000 -0400
15497 @@ -91,7 +91,7 @@ static void __exit_idle(void)
15498 void exit_idle(void)
15499 {
15500 /* idle loop has pid 0 */
15501 - if (current->pid)
15502 + if (task_pid_nr(current))
15503 return;
15504 __exit_idle();
15505 }
15506 @@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, i
15507 if (!board)
15508 board = "";
15509 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
15510 - current->pid, current->comm, print_tainted(),
15511 + task_pid_nr(current), current->comm, print_tainted(),
15512 init_utsname()->release,
15513 (int)strcspn(init_utsname()->version, " "),
15514 init_utsname()->version, board);
15515 @@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flag
15516 struct pt_regs *childregs;
15517 struct task_struct *me = current;
15518
15519 - childregs = ((struct pt_regs *)
15520 - (THREAD_SIZE + task_stack_page(p))) - 1;
15521 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
15522 *childregs = *regs;
15523
15524 childregs->ax = 0;
15525 @@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flag
15526 p->thread.sp = (unsigned long) childregs;
15527 p->thread.sp0 = (unsigned long) (childregs+1);
15528 p->thread.usersp = me->thread.usersp;
15529 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
15530
15531 set_tsk_thread_flag(p, TIF_FORK);
15532
15533 @@ -380,7 +380,7 @@ __switch_to(struct task_struct *prev_p,
15534 struct thread_struct *prev = &prev_p->thread;
15535 struct thread_struct *next = &next_p->thread;
15536 int cpu = smp_processor_id();
15537 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
15538 + struct tss_struct *tss = init_tss + cpu;
15539 unsigned fsindex, gsindex;
15540 bool preload_fpu;
15541
15542 @@ -476,10 +476,9 @@ __switch_to(struct task_struct *prev_p,
15543 prev->usersp = percpu_read(old_rsp);
15544 percpu_write(old_rsp, next->usersp);
15545 percpu_write(current_task, next_p);
15546 + percpu_write(current_tinfo, &next_p->tinfo);
15547
15548 - percpu_write(kernel_stack,
15549 - (unsigned long)task_stack_page(next_p) +
15550 - THREAD_SIZE - KERNEL_STACK_OFFSET);
15551 + percpu_write(kernel_stack, next->sp0);
15552
15553 /*
15554 * Now maybe reload the debug registers and handle I/O bitmaps
15555 @@ -560,12 +559,11 @@ unsigned long get_wchan(struct task_stru
15556 if (!p || p == current || p->state == TASK_RUNNING)
15557 return 0;
15558 stack = (unsigned long)task_stack_page(p);
15559 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
15560 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
15561 return 0;
15562 fp = *(u64 *)(p->thread.sp);
15563 do {
15564 - if (fp < (unsigned long)stack ||
15565 - fp >= (unsigned long)stack+THREAD_SIZE)
15566 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
15567 return 0;
15568 ip = *(u64 *)(fp+8);
15569 if (!in_sched_functions(ip))
15570 diff -urNp linux-2.6.32.41/arch/x86/kernel/process.c linux-2.6.32.41/arch/x86/kernel/process.c
15571 --- linux-2.6.32.41/arch/x86/kernel/process.c 2011-04-22 19:16:29.000000000 -0400
15572 +++ linux-2.6.32.41/arch/x86/kernel/process.c 2011-05-22 23:02:03.000000000 -0400
15573 @@ -51,16 +51,33 @@ void free_thread_xstate(struct task_stru
15574
15575 void free_thread_info(struct thread_info *ti)
15576 {
15577 - free_thread_xstate(ti->task);
15578 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
15579 }
15580
15581 +static struct kmem_cache *task_struct_cachep;
15582 +
15583 void arch_task_cache_init(void)
15584 {
15585 - task_xstate_cachep =
15586 - kmem_cache_create("task_xstate", xstate_size,
15587 + /* create a slab on which task_structs can be allocated */
15588 + task_struct_cachep =
15589 + kmem_cache_create("task_struct", sizeof(struct task_struct),
15590 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
15591 +
15592 + task_xstate_cachep =
15593 + kmem_cache_create("task_xstate", xstate_size,
15594 __alignof__(union thread_xstate),
15595 - SLAB_PANIC | SLAB_NOTRACK, NULL);
15596 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
15597 +}
15598 +
15599 +struct task_struct *alloc_task_struct(void)
15600 +{
15601 + return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
15602 +}
15603 +
15604 +void free_task_struct(struct task_struct *task)
15605 +{
15606 + free_thread_xstate(task);
15607 + kmem_cache_free(task_struct_cachep, task);
15608 }
15609
15610 /*
15611 @@ -73,7 +90,7 @@ void exit_thread(void)
15612 unsigned long *bp = t->io_bitmap_ptr;
15613
15614 if (bp) {
15615 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
15616 + struct tss_struct *tss = init_tss + get_cpu();
15617
15618 t->io_bitmap_ptr = NULL;
15619 clear_thread_flag(TIF_IO_BITMAP);
15620 @@ -93,6 +110,9 @@ void flush_thread(void)
15621
15622 clear_tsk_thread_flag(tsk, TIF_DEBUG);
15623
15624 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
15625 + loadsegment(gs, 0);
15626 +#endif
15627 tsk->thread.debugreg0 = 0;
15628 tsk->thread.debugreg1 = 0;
15629 tsk->thread.debugreg2 = 0;
15630 @@ -307,7 +327,7 @@ void default_idle(void)
15631 EXPORT_SYMBOL(default_idle);
15632 #endif
15633
15634 -void stop_this_cpu(void *dummy)
15635 +__noreturn void stop_this_cpu(void *dummy)
15636 {
15637 local_irq_disable();
15638 /*
15639 @@ -568,16 +588,35 @@ static int __init idle_setup(char *str)
15640 }
15641 early_param("idle", idle_setup);
15642
15643 -unsigned long arch_align_stack(unsigned long sp)
15644 +#ifdef CONFIG_PAX_RANDKSTACK
15645 +asmlinkage void pax_randomize_kstack(void)
15646 {
15647 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
15648 - sp -= get_random_int() % 8192;
15649 - return sp & ~0xf;
15650 -}
15651 + struct thread_struct *thread = &current->thread;
15652 + unsigned long time;
15653
15654 -unsigned long arch_randomize_brk(struct mm_struct *mm)
15655 -{
15656 - unsigned long range_end = mm->brk + 0x02000000;
15657 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
15658 + if (!randomize_va_space)
15659 + return;
15660 +
15661 + rdtscl(time);
15662 +
15663 + /* P4 seems to return a 0 LSB, ignore it */
15664 +#ifdef CONFIG_MPENTIUM4
15665 + time &= 0x3EUL;
15666 + time <<= 2;
15667 +#elif defined(CONFIG_X86_64)
15668 + time &= 0xFUL;
15669 + time <<= 4;
15670 +#else
15671 + time &= 0x1FUL;
15672 + time <<= 3;
15673 +#endif
15674 +
15675 + thread->sp0 ^= time;
15676 + load_sp0(init_tss + smp_processor_id(), thread);
15677 +
15678 +#ifdef CONFIG_X86_64
15679 + percpu_write(kernel_stack, thread->sp0);
15680 +#endif
15681 }
15682 +#endif
15683
15684 diff -urNp linux-2.6.32.41/arch/x86/kernel/ptrace.c linux-2.6.32.41/arch/x86/kernel/ptrace.c
15685 --- linux-2.6.32.41/arch/x86/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
15686 +++ linux-2.6.32.41/arch/x86/kernel/ptrace.c 2011-04-17 15:56:46.000000000 -0400
15687 @@ -925,7 +925,7 @@ static const struct user_regset_view use
15688 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
15689 {
15690 int ret;
15691 - unsigned long __user *datap = (unsigned long __user *)data;
15692 + unsigned long __user *datap = (__force unsigned long __user *)data;
15693
15694 switch (request) {
15695 /* read the word at location addr in the USER area. */
15696 @@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *chi
15697 if (addr < 0)
15698 return -EIO;
15699 ret = do_get_thread_area(child, addr,
15700 - (struct user_desc __user *) data);
15701 + (__force struct user_desc __user *) data);
15702 break;
15703
15704 case PTRACE_SET_THREAD_AREA:
15705 if (addr < 0)
15706 return -EIO;
15707 ret = do_set_thread_area(child, addr,
15708 - (struct user_desc __user *) data, 0);
15709 + (__force struct user_desc __user *) data, 0);
15710 break;
15711 #endif
15712
15713 @@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *chi
15714 #ifdef CONFIG_X86_PTRACE_BTS
15715 case PTRACE_BTS_CONFIG:
15716 ret = ptrace_bts_config
15717 - (child, data, (struct ptrace_bts_config __user *)addr);
15718 + (child, data, (__force struct ptrace_bts_config __user *)addr);
15719 break;
15720
15721 case PTRACE_BTS_STATUS:
15722 ret = ptrace_bts_status
15723 - (child, data, (struct ptrace_bts_config __user *)addr);
15724 + (child, data, (__force struct ptrace_bts_config __user *)addr);
15725 break;
15726
15727 case PTRACE_BTS_SIZE:
15728 @@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *chi
15729
15730 case PTRACE_BTS_GET:
15731 ret = ptrace_bts_read_record
15732 - (child, data, (struct bts_struct __user *) addr);
15733 + (child, data, (__force struct bts_struct __user *) addr);
15734 break;
15735
15736 case PTRACE_BTS_CLEAR:
15737 @@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *chi
15738
15739 case PTRACE_BTS_DRAIN:
15740 ret = ptrace_bts_drain
15741 - (child, data, (struct bts_struct __user *) addr);
15742 + (child, data, (__force struct bts_struct __user *) addr);
15743 break;
15744 #endif /* CONFIG_X86_PTRACE_BTS */
15745
15746 @@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *ts
15747 info.si_code = si_code;
15748
15749 /* User-mode ip? */
15750 - info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
15751 + info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
15752
15753 /* Send us the fake SIGTRAP */
15754 force_sig_info(SIGTRAP, &info, tsk);
15755 @@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *ts
15756 * We must return the syscall number to actually look up in the table.
15757 * This can be -1L to skip running any syscall at all.
15758 */
15759 -asmregparm long syscall_trace_enter(struct pt_regs *regs)
15760 +long syscall_trace_enter(struct pt_regs *regs)
15761 {
15762 long ret = 0;
15763
15764 @@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(stru
15765 return ret ?: regs->orig_ax;
15766 }
15767
15768 -asmregparm void syscall_trace_leave(struct pt_regs *regs)
15769 +void syscall_trace_leave(struct pt_regs *regs)
15770 {
15771 if (unlikely(current->audit_context))
15772 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
15773 diff -urNp linux-2.6.32.41/arch/x86/kernel/reboot.c linux-2.6.32.41/arch/x86/kernel/reboot.c
15774 --- linux-2.6.32.41/arch/x86/kernel/reboot.c 2011-03-27 14:31:47.000000000 -0400
15775 +++ linux-2.6.32.41/arch/x86/kernel/reboot.c 2011-05-22 23:02:03.000000000 -0400
15776 @@ -33,7 +33,7 @@ void (*pm_power_off)(void);
15777 EXPORT_SYMBOL(pm_power_off);
15778
15779 static const struct desc_ptr no_idt = {};
15780 -static int reboot_mode;
15781 +static unsigned short reboot_mode;
15782 enum reboot_type reboot_type = BOOT_KBD;
15783 int reboot_force;
15784
15785 @@ -292,12 +292,12 @@ core_initcall(reboot_init);
15786 controller to pulse the CPU reset line, which is more thorough, but
15787 doesn't work with at least one type of 486 motherboard. It is easy
15788 to stop this code working; hence the copious comments. */
15789 -static const unsigned long long
15790 -real_mode_gdt_entries [3] =
15791 +static struct desc_struct
15792 +real_mode_gdt_entries [3] __read_only =
15793 {
15794 - 0x0000000000000000ULL, /* Null descriptor */
15795 - 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
15796 - 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
15797 + GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
15798 + GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
15799 + GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
15800 };
15801
15802 static const struct desc_ptr
15803 @@ -346,7 +346,7 @@ static const unsigned char jump_to_bios
15804 * specified by the code and length parameters.
15805 * We assume that length will aways be less that 100!
15806 */
15807 -void machine_real_restart(const unsigned char *code, int length)
15808 +__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
15809 {
15810 local_irq_disable();
15811
15812 @@ -366,8 +366,8 @@ void machine_real_restart(const unsigned
15813 /* Remap the kernel at virtual address zero, as well as offset zero
15814 from the kernel segment. This assumes the kernel segment starts at
15815 virtual address PAGE_OFFSET. */
15816 - memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
15817 - sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
15818 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
15819 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
15820
15821 /*
15822 * Use `swapper_pg_dir' as our page directory.
15823 @@ -379,16 +379,15 @@ void machine_real_restart(const unsigned
15824 boot)". This seems like a fairly standard thing that gets set by
15825 REBOOT.COM programs, and the previous reset routine did this
15826 too. */
15827 - *((unsigned short *)0x472) = reboot_mode;
15828 + *(unsigned short *)(__va(0x472)) = reboot_mode;
15829
15830 /* For the switch to real mode, copy some code to low memory. It has
15831 to be in the first 64k because it is running in 16-bit mode, and it
15832 has to have the same physical and virtual address, because it turns
15833 off paging. Copy it near the end of the first page, out of the way
15834 of BIOS variables. */
15835 - memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
15836 - real_mode_switch, sizeof (real_mode_switch));
15837 - memcpy((void *)(0x1000 - 100), code, length);
15838 + memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
15839 + memcpy(__va(0x1000 - 100), code, length);
15840
15841 /* Set up the IDT for real mode. */
15842 load_idt(&real_mode_idt);
15843 @@ -416,6 +415,7 @@ void machine_real_restart(const unsigned
15844 __asm__ __volatile__ ("ljmp $0x0008,%0"
15845 :
15846 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
15847 + do { } while (1);
15848 }
15849 #ifdef CONFIG_APM_MODULE
15850 EXPORT_SYMBOL(machine_real_restart);
15851 @@ -536,7 +536,7 @@ void __attribute__((weak)) mach_reboot_f
15852 {
15853 }
15854
15855 -static void native_machine_emergency_restart(void)
15856 +__noreturn static void native_machine_emergency_restart(void)
15857 {
15858 int i;
15859
15860 @@ -651,13 +651,13 @@ void native_machine_shutdown(void)
15861 #endif
15862 }
15863
15864 -static void __machine_emergency_restart(int emergency)
15865 +static __noreturn void __machine_emergency_restart(int emergency)
15866 {
15867 reboot_emergency = emergency;
15868 machine_ops.emergency_restart();
15869 }
15870
15871 -static void native_machine_restart(char *__unused)
15872 +static __noreturn void native_machine_restart(char *__unused)
15873 {
15874 printk("machine restart\n");
15875
15876 @@ -666,7 +666,7 @@ static void native_machine_restart(char
15877 __machine_emergency_restart(0);
15878 }
15879
15880 -static void native_machine_halt(void)
15881 +static __noreturn void native_machine_halt(void)
15882 {
15883 /* stop other cpus and apics */
15884 machine_shutdown();
15885 @@ -677,7 +677,7 @@ static void native_machine_halt(void)
15886 stop_this_cpu(NULL);
15887 }
15888
15889 -static void native_machine_power_off(void)
15890 +__noreturn static void native_machine_power_off(void)
15891 {
15892 if (pm_power_off) {
15893 if (!reboot_force)
15894 @@ -686,6 +686,7 @@ static void native_machine_power_off(voi
15895 }
15896 /* a fallback in case there is no PM info available */
15897 tboot_shutdown(TB_SHUTDOWN_HALT);
15898 + do { } while (1);
15899 }
15900
15901 struct machine_ops machine_ops = {
15902 diff -urNp linux-2.6.32.41/arch/x86/kernel/setup.c linux-2.6.32.41/arch/x86/kernel/setup.c
15903 --- linux-2.6.32.41/arch/x86/kernel/setup.c 2011-04-17 17:00:52.000000000 -0400
15904 +++ linux-2.6.32.41/arch/x86/kernel/setup.c 2011-04-17 17:03:05.000000000 -0400
15905 @@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
15906
15907 if (!boot_params.hdr.root_flags)
15908 root_mountflags &= ~MS_RDONLY;
15909 - init_mm.start_code = (unsigned long) _text;
15910 - init_mm.end_code = (unsigned long) _etext;
15911 + init_mm.start_code = ktla_ktva((unsigned long) _text);
15912 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
15913 init_mm.end_data = (unsigned long) _edata;
15914 init_mm.brk = _brk_end;
15915
15916 - code_resource.start = virt_to_phys(_text);
15917 - code_resource.end = virt_to_phys(_etext)-1;
15918 - data_resource.start = virt_to_phys(_etext);
15919 + code_resource.start = virt_to_phys(ktla_ktva(_text));
15920 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
15921 + data_resource.start = virt_to_phys(_sdata);
15922 data_resource.end = virt_to_phys(_edata)-1;
15923 bss_resource.start = virt_to_phys(&__bss_start);
15924 bss_resource.end = virt_to_phys(&__bss_stop)-1;
15925 diff -urNp linux-2.6.32.41/arch/x86/kernel/setup_percpu.c linux-2.6.32.41/arch/x86/kernel/setup_percpu.c
15926 --- linux-2.6.32.41/arch/x86/kernel/setup_percpu.c 2011-03-27 14:31:47.000000000 -0400
15927 +++ linux-2.6.32.41/arch/x86/kernel/setup_percpu.c 2011-06-04 20:36:29.000000000 -0400
15928 @@ -25,19 +25,17 @@
15929 # define DBG(x...)
15930 #endif
15931
15932 -DEFINE_PER_CPU(int, cpu_number);
15933 +#ifdef CONFIG_SMP
15934 +DEFINE_PER_CPU(unsigned int, cpu_number);
15935 EXPORT_PER_CPU_SYMBOL(cpu_number);
15936 +#endif
15937
15938 -#ifdef CONFIG_X86_64
15939 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
15940 -#else
15941 -#define BOOT_PERCPU_OFFSET 0
15942 -#endif
15943
15944 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
15945 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
15946
15947 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
15948 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
15949 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
15950 };
15951 EXPORT_SYMBOL(__per_cpu_offset);
15952 @@ -159,10 +157,10 @@ static inline void setup_percpu_segment(
15953 {
15954 #ifdef CONFIG_X86_32
15955 struct desc_struct gdt;
15956 + unsigned long base = per_cpu_offset(cpu);
15957
15958 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
15959 - 0x2 | DESCTYPE_S, 0x8);
15960 - gdt.s = 1;
15961 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
15962 + 0x83 | DESCTYPE_S, 0xC);
15963 write_gdt_entry(get_cpu_gdt_table(cpu),
15964 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
15965 #endif
15966 @@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
15967 /* alrighty, percpu areas up and running */
15968 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
15969 for_each_possible_cpu(cpu) {
15970 +#ifdef CONFIG_CC_STACKPROTECTOR
15971 +#ifdef CONFIG_X86_32
15972 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
15973 +#endif
15974 +#endif
15975 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
15976 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
15977 per_cpu(cpu_number, cpu) = cpu;
15978 @@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
15979 early_per_cpu_map(x86_cpu_to_node_map, cpu);
15980 #endif
15981 #endif
15982 +#ifdef CONFIG_CC_STACKPROTECTOR
15983 +#ifdef CONFIG_X86_32
15984 + if (!cpu)
15985 + per_cpu(stack_canary.canary, cpu) = canary;
15986 +#endif
15987 +#endif
15988 /*
15989 * Up to this point, the boot CPU has been using .data.init
15990 * area. Reload any changed state for the boot CPU.
15991 diff -urNp linux-2.6.32.41/arch/x86/kernel/signal.c linux-2.6.32.41/arch/x86/kernel/signal.c
15992 --- linux-2.6.32.41/arch/x86/kernel/signal.c 2011-03-27 14:31:47.000000000 -0400
15993 +++ linux-2.6.32.41/arch/x86/kernel/signal.c 2011-05-22 23:02:03.000000000 -0400
15994 @@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsi
15995 * Align the stack pointer according to the i386 ABI,
15996 * i.e. so that on function entry ((sp + 4) & 15) == 0.
15997 */
15998 - sp = ((sp + 4) & -16ul) - 4;
15999 + sp = ((sp - 12) & -16ul) - 4;
16000 #else /* !CONFIG_X86_32 */
16001 sp = round_down(sp, 16) - 8;
16002 #endif
16003 @@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, str
16004 * Return an always-bogus address instead so we will die with SIGSEGV.
16005 */
16006 if (onsigstack && !likely(on_sig_stack(sp)))
16007 - return (void __user *)-1L;
16008 + return (__force void __user *)-1L;
16009
16010 /* save i387 state */
16011 if (used_math() && save_i387_xstate(*fpstate) < 0)
16012 - return (void __user *)-1L;
16013 + return (__force void __user *)-1L;
16014
16015 return (void __user *)sp;
16016 }
16017 @@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigactio
16018 }
16019
16020 if (current->mm->context.vdso)
16021 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16022 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16023 else
16024 - restorer = &frame->retcode;
16025 + restorer = (void __user *)&frame->retcode;
16026 if (ka->sa.sa_flags & SA_RESTORER)
16027 restorer = ka->sa.sa_restorer;
16028
16029 @@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigactio
16030 * reasons and because gdb uses it as a signature to notice
16031 * signal handler stack frames.
16032 */
16033 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
16034 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
16035
16036 if (err)
16037 return -EFAULT;
16038 @@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, str
16039 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
16040
16041 /* Set up to return from userspace. */
16042 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16043 + if (current->mm->context.vdso)
16044 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16045 + else
16046 + restorer = (void __user *)&frame->retcode;
16047 if (ka->sa.sa_flags & SA_RESTORER)
16048 restorer = ka->sa.sa_restorer;
16049 put_user_ex(restorer, &frame->pretcode);
16050 @@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, str
16051 * reasons and because gdb uses it as a signature to notice
16052 * signal handler stack frames.
16053 */
16054 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
16055 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
16056 } put_user_catch(err);
16057
16058 if (err)
16059 @@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *re
16060 int signr;
16061 sigset_t *oldset;
16062
16063 + pax_track_stack();
16064 +
16065 /*
16066 * We want the common case to go fast, which is why we may in certain
16067 * cases get here from kernel mode. Just return without doing anything
16068 @@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *re
16069 * X86_32: vm86 regs switched out by assembly code before reaching
16070 * here, so testing against kernel CS suffices.
16071 */
16072 - if (!user_mode(regs))
16073 + if (!user_mode_novm(regs))
16074 return;
16075
16076 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
16077 diff -urNp linux-2.6.32.41/arch/x86/kernel/smpboot.c linux-2.6.32.41/arch/x86/kernel/smpboot.c
16078 --- linux-2.6.32.41/arch/x86/kernel/smpboot.c 2011-03-27 14:31:47.000000000 -0400
16079 +++ linux-2.6.32.41/arch/x86/kernel/smpboot.c 2011-05-11 18:25:15.000000000 -0400
16080 @@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct
16081 */
16082 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
16083
16084 -void cpu_hotplug_driver_lock()
16085 +void cpu_hotplug_driver_lock(void)
16086 {
16087 - mutex_lock(&x86_cpu_hotplug_driver_mutex);
16088 + mutex_lock(&x86_cpu_hotplug_driver_mutex);
16089 }
16090
16091 -void cpu_hotplug_driver_unlock()
16092 +void cpu_hotplug_driver_unlock(void)
16093 {
16094 - mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16095 + mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16096 }
16097
16098 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
16099 @@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int api
16100 set_idle_for_cpu(cpu, c_idle.idle);
16101 do_rest:
16102 per_cpu(current_task, cpu) = c_idle.idle;
16103 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
16104 #ifdef CONFIG_X86_32
16105 /* Stack for startup_32 can be just as for start_secondary onwards */
16106 irq_ctx_init(cpu);
16107 @@ -750,11 +751,13 @@ do_rest:
16108 #else
16109 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
16110 initial_gs = per_cpu_offset(cpu);
16111 - per_cpu(kernel_stack, cpu) =
16112 - (unsigned long)task_stack_page(c_idle.idle) -
16113 - KERNEL_STACK_OFFSET + THREAD_SIZE;
16114 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
16115 #endif
16116 +
16117 + pax_open_kernel();
16118 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
16119 + pax_close_kernel();
16120 +
16121 initial_code = (unsigned long)start_secondary;
16122 stack_start.sp = (void *) c_idle.idle->thread.sp;
16123
16124 @@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int
16125
16126 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
16127
16128 +#ifdef CONFIG_PAX_PER_CPU_PGD
16129 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
16130 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16131 + KERNEL_PGD_PTRS);
16132 +#endif
16133 +
16134 err = do_boot_cpu(apicid, cpu);
16135
16136 if (err) {
16137 diff -urNp linux-2.6.32.41/arch/x86/kernel/step.c linux-2.6.32.41/arch/x86/kernel/step.c
16138 --- linux-2.6.32.41/arch/x86/kernel/step.c 2011-03-27 14:31:47.000000000 -0400
16139 +++ linux-2.6.32.41/arch/x86/kernel/step.c 2011-04-17 15:56:46.000000000 -0400
16140 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
16141 struct desc_struct *desc;
16142 unsigned long base;
16143
16144 - seg &= ~7UL;
16145 + seg >>= 3;
16146
16147 mutex_lock(&child->mm->context.lock);
16148 - if (unlikely((seg >> 3) >= child->mm->context.size))
16149 + if (unlikely(seg >= child->mm->context.size))
16150 addr = -1L; /* bogus selector, access would fault */
16151 else {
16152 desc = child->mm->context.ldt + seg;
16153 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
16154 addr += base;
16155 }
16156 mutex_unlock(&child->mm->context.lock);
16157 - }
16158 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
16159 + addr = ktla_ktva(addr);
16160
16161 return addr;
16162 }
16163 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
16164 unsigned char opcode[15];
16165 unsigned long addr = convert_ip_to_linear(child, regs);
16166
16167 + if (addr == -EINVAL)
16168 + return 0;
16169 +
16170 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
16171 for (i = 0; i < copied; i++) {
16172 switch (opcode[i]) {
16173 @@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
16174
16175 #ifdef CONFIG_X86_64
16176 case 0x40 ... 0x4f:
16177 - if (regs->cs != __USER_CS)
16178 + if ((regs->cs & 0xffff) != __USER_CS)
16179 /* 32-bit mode: register increment */
16180 return 0;
16181 /* 64-bit mode: REX prefix */
16182 diff -urNp linux-2.6.32.41/arch/x86/kernel/syscall_table_32.S linux-2.6.32.41/arch/x86/kernel/syscall_table_32.S
16183 --- linux-2.6.32.41/arch/x86/kernel/syscall_table_32.S 2011-03-27 14:31:47.000000000 -0400
16184 +++ linux-2.6.32.41/arch/x86/kernel/syscall_table_32.S 2011-04-17 15:56:46.000000000 -0400
16185 @@ -1,3 +1,4 @@
16186 +.section .rodata,"a",@progbits
16187 ENTRY(sys_call_table)
16188 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
16189 .long sys_exit
16190 diff -urNp linux-2.6.32.41/arch/x86/kernel/sys_i386_32.c linux-2.6.32.41/arch/x86/kernel/sys_i386_32.c
16191 --- linux-2.6.32.41/arch/x86/kernel/sys_i386_32.c 2011-03-27 14:31:47.000000000 -0400
16192 +++ linux-2.6.32.41/arch/x86/kernel/sys_i386_32.c 2011-04-17 15:56:46.000000000 -0400
16193 @@ -24,6 +24,21 @@
16194
16195 #include <asm/syscalls.h>
16196
16197 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
16198 +{
16199 + unsigned long pax_task_size = TASK_SIZE;
16200 +
16201 +#ifdef CONFIG_PAX_SEGMEXEC
16202 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
16203 + pax_task_size = SEGMEXEC_TASK_SIZE;
16204 +#endif
16205 +
16206 + if (len > pax_task_size || addr > pax_task_size - len)
16207 + return -EINVAL;
16208 +
16209 + return 0;
16210 +}
16211 +
16212 /*
16213 * Perform the select(nd, in, out, ex, tv) and mmap() system
16214 * calls. Linux/i386 didn't use to be able to handle more than
16215 @@ -58,6 +73,212 @@ out:
16216 return err;
16217 }
16218
16219 +unsigned long
16220 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
16221 + unsigned long len, unsigned long pgoff, unsigned long flags)
16222 +{
16223 + struct mm_struct *mm = current->mm;
16224 + struct vm_area_struct *vma;
16225 + unsigned long start_addr, pax_task_size = TASK_SIZE;
16226 +
16227 +#ifdef CONFIG_PAX_SEGMEXEC
16228 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
16229 + pax_task_size = SEGMEXEC_TASK_SIZE;
16230 +#endif
16231 +
16232 + pax_task_size -= PAGE_SIZE;
16233 +
16234 + if (len > pax_task_size)
16235 + return -ENOMEM;
16236 +
16237 + if (flags & MAP_FIXED)
16238 + return addr;
16239 +
16240 +#ifdef CONFIG_PAX_RANDMMAP
16241 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16242 +#endif
16243 +
16244 + if (addr) {
16245 + addr = PAGE_ALIGN(addr);
16246 + if (pax_task_size - len >= addr) {
16247 + vma = find_vma(mm, addr);
16248 + if (check_heap_stack_gap(vma, addr, len))
16249 + return addr;
16250 + }
16251 + }
16252 + if (len > mm->cached_hole_size) {
16253 + start_addr = addr = mm->free_area_cache;
16254 + } else {
16255 + start_addr = addr = mm->mmap_base;
16256 + mm->cached_hole_size = 0;
16257 + }
16258 +
16259 +#ifdef CONFIG_PAX_PAGEEXEC
16260 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
16261 + start_addr = 0x00110000UL;
16262 +
16263 +#ifdef CONFIG_PAX_RANDMMAP
16264 + if (mm->pax_flags & MF_PAX_RANDMMAP)
16265 + start_addr += mm->delta_mmap & 0x03FFF000UL;
16266 +#endif
16267 +
16268 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
16269 + start_addr = addr = mm->mmap_base;
16270 + else
16271 + addr = start_addr;
16272 + }
16273 +#endif
16274 +
16275 +full_search:
16276 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
16277 + /* At this point: (!vma || addr < vma->vm_end). */
16278 + if (pax_task_size - len < addr) {
16279 + /*
16280 + * Start a new search - just in case we missed
16281 + * some holes.
16282 + */
16283 + if (start_addr != mm->mmap_base) {
16284 + start_addr = addr = mm->mmap_base;
16285 + mm->cached_hole_size = 0;
16286 + goto full_search;
16287 + }
16288 + return -ENOMEM;
16289 + }
16290 + if (check_heap_stack_gap(vma, addr, len))
16291 + break;
16292 + if (addr + mm->cached_hole_size < vma->vm_start)
16293 + mm->cached_hole_size = vma->vm_start - addr;
16294 + addr = vma->vm_end;
16295 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
16296 + start_addr = addr = mm->mmap_base;
16297 + mm->cached_hole_size = 0;
16298 + goto full_search;
16299 + }
16300 + }
16301 +
16302 + /*
16303 + * Remember the place where we stopped the search:
16304 + */
16305 + mm->free_area_cache = addr + len;
16306 + return addr;
16307 +}
16308 +
16309 +unsigned long
16310 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16311 + const unsigned long len, const unsigned long pgoff,
16312 + const unsigned long flags)
16313 +{
16314 + struct vm_area_struct *vma;
16315 + struct mm_struct *mm = current->mm;
16316 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
16317 +
16318 +#ifdef CONFIG_PAX_SEGMEXEC
16319 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
16320 + pax_task_size = SEGMEXEC_TASK_SIZE;
16321 +#endif
16322 +
16323 + pax_task_size -= PAGE_SIZE;
16324 +
16325 + /* requested length too big for entire address space */
16326 + if (len > pax_task_size)
16327 + return -ENOMEM;
16328 +
16329 + if (flags & MAP_FIXED)
16330 + return addr;
16331 +
16332 +#ifdef CONFIG_PAX_PAGEEXEC
16333 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
16334 + goto bottomup;
16335 +#endif
16336 +
16337 +#ifdef CONFIG_PAX_RANDMMAP
16338 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16339 +#endif
16340 +
16341 + /* requesting a specific address */
16342 + if (addr) {
16343 + addr = PAGE_ALIGN(addr);
16344 + if (pax_task_size - len >= addr) {
16345 + vma = find_vma(mm, addr);
16346 + if (check_heap_stack_gap(vma, addr, len))
16347 + return addr;
16348 + }
16349 + }
16350 +
16351 + /* check if free_area_cache is useful for us */
16352 + if (len <= mm->cached_hole_size) {
16353 + mm->cached_hole_size = 0;
16354 + mm->free_area_cache = mm->mmap_base;
16355 + }
16356 +
16357 + /* either no address requested or can't fit in requested address hole */
16358 + addr = mm->free_area_cache;
16359 +
16360 + /* make sure it can fit in the remaining address space */
16361 + if (addr > len) {
16362 + vma = find_vma(mm, addr-len);
16363 + if (check_heap_stack_gap(vma, addr - len, len))
16364 + /* remember the address as a hint for next time */
16365 + return (mm->free_area_cache = addr-len);
16366 + }
16367 +
16368 + if (mm->mmap_base < len)
16369 + goto bottomup;
16370 +
16371 + addr = mm->mmap_base-len;
16372 +
16373 + do {
16374 + /*
16375 + * Lookup failure means no vma is above this address,
16376 + * else if new region fits below vma->vm_start,
16377 + * return with success:
16378 + */
16379 + vma = find_vma(mm, addr);
16380 + if (check_heap_stack_gap(vma, addr, len))
16381 + /* remember the address as a hint for next time */
16382 + return (mm->free_area_cache = addr);
16383 +
16384 + /* remember the largest hole we saw so far */
16385 + if (addr + mm->cached_hole_size < vma->vm_start)
16386 + mm->cached_hole_size = vma->vm_start - addr;
16387 +
16388 + /* try just below the current vma->vm_start */
16389 + addr = skip_heap_stack_gap(vma, len);
16390 + } while (!IS_ERR_VALUE(addr));
16391 +
16392 +bottomup:
16393 + /*
16394 + * A failed mmap() very likely causes application failure,
16395 + * so fall back to the bottom-up function here. This scenario
16396 + * can happen with large stack limits and large mmap()
16397 + * allocations.
16398 + */
16399 +
16400 +#ifdef CONFIG_PAX_SEGMEXEC
16401 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
16402 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
16403 + else
16404 +#endif
16405 +
16406 + mm->mmap_base = TASK_UNMAPPED_BASE;
16407 +
16408 +#ifdef CONFIG_PAX_RANDMMAP
16409 + if (mm->pax_flags & MF_PAX_RANDMMAP)
16410 + mm->mmap_base += mm->delta_mmap;
16411 +#endif
16412 +
16413 + mm->free_area_cache = mm->mmap_base;
16414 + mm->cached_hole_size = ~0UL;
16415 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
16416 + /*
16417 + * Restore the topdown base:
16418 + */
16419 + mm->mmap_base = base;
16420 + mm->free_area_cache = base;
16421 + mm->cached_hole_size = ~0UL;
16422 +
16423 + return addr;
16424 +}
16425
16426 struct sel_arg_struct {
16427 unsigned long n;
16428 @@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int fi
16429 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
16430 case SEMTIMEDOP:
16431 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
16432 - (const struct timespec __user *)fifth);
16433 + (__force const struct timespec __user *)fifth);
16434
16435 case SEMGET:
16436 return sys_semget(first, second, third);
16437 @@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int fi
16438 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
16439 if (ret)
16440 return ret;
16441 - return put_user(raddr, (ulong __user *) third);
16442 + return put_user(raddr, (__force ulong __user *) third);
16443 }
16444 case 1: /* iBCS2 emulator entry point */
16445 if (!segment_eq(get_fs(), get_ds()))
16446 @@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldol
16447
16448 return error;
16449 }
16450 -
16451 -
16452 -/*
16453 - * Do a system call from kernel instead of calling sys_execve so we
16454 - * end up with proper pt_regs.
16455 - */
16456 -int kernel_execve(const char *filename, char *const argv[], char *const envp[])
16457 -{
16458 - long __res;
16459 - asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
16460 - : "=a" (__res)
16461 - : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
16462 - return __res;
16463 -}
16464 diff -urNp linux-2.6.32.41/arch/x86/kernel/sys_x86_64.c linux-2.6.32.41/arch/x86/kernel/sys_x86_64.c
16465 --- linux-2.6.32.41/arch/x86/kernel/sys_x86_64.c 2011-03-27 14:31:47.000000000 -0400
16466 +++ linux-2.6.32.41/arch/x86/kernel/sys_x86_64.c 2011-04-17 15:56:46.000000000 -0400
16467 @@ -32,8 +32,8 @@ out:
16468 return error;
16469 }
16470
16471 -static void find_start_end(unsigned long flags, unsigned long *begin,
16472 - unsigned long *end)
16473 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
16474 + unsigned long *begin, unsigned long *end)
16475 {
16476 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
16477 unsigned long new_begin;
16478 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long
16479 *begin = new_begin;
16480 }
16481 } else {
16482 - *begin = TASK_UNMAPPED_BASE;
16483 + *begin = mm->mmap_base;
16484 *end = TASK_SIZE;
16485 }
16486 }
16487 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
16488 if (flags & MAP_FIXED)
16489 return addr;
16490
16491 - find_start_end(flags, &begin, &end);
16492 + find_start_end(mm, flags, &begin, &end);
16493
16494 if (len > end)
16495 return -ENOMEM;
16496
16497 +#ifdef CONFIG_PAX_RANDMMAP
16498 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16499 +#endif
16500 +
16501 if (addr) {
16502 addr = PAGE_ALIGN(addr);
16503 vma = find_vma(mm, addr);
16504 - if (end - len >= addr &&
16505 - (!vma || addr + len <= vma->vm_start))
16506 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
16507 return addr;
16508 }
16509 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
16510 @@ -106,7 +109,7 @@ full_search:
16511 }
16512 return -ENOMEM;
16513 }
16514 - if (!vma || addr + len <= vma->vm_start) {
16515 + if (check_heap_stack_gap(vma, addr, len)) {
16516 /*
16517 * Remember the place where we stopped the search:
16518 */
16519 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
16520 {
16521 struct vm_area_struct *vma;
16522 struct mm_struct *mm = current->mm;
16523 - unsigned long addr = addr0;
16524 + unsigned long base = mm->mmap_base, addr = addr0;
16525
16526 /* requested length too big for entire address space */
16527 if (len > TASK_SIZE)
16528 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
16529 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
16530 goto bottomup;
16531
16532 +#ifdef CONFIG_PAX_RANDMMAP
16533 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16534 +#endif
16535 +
16536 /* requesting a specific address */
16537 if (addr) {
16538 addr = PAGE_ALIGN(addr);
16539 - vma = find_vma(mm, addr);
16540 - if (TASK_SIZE - len >= addr &&
16541 - (!vma || addr + len <= vma->vm_start))
16542 - return addr;
16543 + if (TASK_SIZE - len >= addr) {
16544 + vma = find_vma(mm, addr);
16545 + if (check_heap_stack_gap(vma, addr, len))
16546 + return addr;
16547 + }
16548 }
16549
16550 /* check if free_area_cache is useful for us */
16551 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
16552 /* make sure it can fit in the remaining address space */
16553 if (addr > len) {
16554 vma = find_vma(mm, addr-len);
16555 - if (!vma || addr <= vma->vm_start)
16556 + if (check_heap_stack_gap(vma, addr - len, len))
16557 /* remember the address as a hint for next time */
16558 return mm->free_area_cache = addr-len;
16559 }
16560 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
16561 * return with success:
16562 */
16563 vma = find_vma(mm, addr);
16564 - if (!vma || addr+len <= vma->vm_start)
16565 + if (check_heap_stack_gap(vma, addr, len))
16566 /* remember the address as a hint for next time */
16567 return mm->free_area_cache = addr;
16568
16569 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
16570 mm->cached_hole_size = vma->vm_start - addr;
16571
16572 /* try just below the current vma->vm_start */
16573 - addr = vma->vm_start-len;
16574 - } while (len < vma->vm_start);
16575 + addr = skip_heap_stack_gap(vma, len);
16576 + } while (!IS_ERR_VALUE(addr));
16577
16578 bottomup:
16579 /*
16580 @@ -198,13 +206,21 @@ bottomup:
16581 * can happen with large stack limits and large mmap()
16582 * allocations.
16583 */
16584 + mm->mmap_base = TASK_UNMAPPED_BASE;
16585 +
16586 +#ifdef CONFIG_PAX_RANDMMAP
16587 + if (mm->pax_flags & MF_PAX_RANDMMAP)
16588 + mm->mmap_base += mm->delta_mmap;
16589 +#endif
16590 +
16591 + mm->free_area_cache = mm->mmap_base;
16592 mm->cached_hole_size = ~0UL;
16593 - mm->free_area_cache = TASK_UNMAPPED_BASE;
16594 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
16595 /*
16596 * Restore the topdown base:
16597 */
16598 - mm->free_area_cache = mm->mmap_base;
16599 + mm->mmap_base = base;
16600 + mm->free_area_cache = base;
16601 mm->cached_hole_size = ~0UL;
16602
16603 return addr;
16604 diff -urNp linux-2.6.32.41/arch/x86/kernel/tboot.c linux-2.6.32.41/arch/x86/kernel/tboot.c
16605 --- linux-2.6.32.41/arch/x86/kernel/tboot.c 2011-03-27 14:31:47.000000000 -0400
16606 +++ linux-2.6.32.41/arch/x86/kernel/tboot.c 2011-05-22 23:02:03.000000000 -0400
16607 @@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
16608
16609 void tboot_shutdown(u32 shutdown_type)
16610 {
16611 - void (*shutdown)(void);
16612 + void (* __noreturn shutdown)(void);
16613
16614 if (!tboot_enabled())
16615 return;
16616 @@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
16617
16618 switch_to_tboot_pt();
16619
16620 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
16621 + shutdown = (void *)tboot->shutdown_entry;
16622 shutdown();
16623
16624 /* should not reach here */
16625 @@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
16626 tboot_shutdown(acpi_shutdown_map[sleep_state]);
16627 }
16628
16629 -static atomic_t ap_wfs_count;
16630 +static atomic_unchecked_t ap_wfs_count;
16631
16632 static int tboot_wait_for_aps(int num_aps)
16633 {
16634 @@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(
16635 {
16636 switch (action) {
16637 case CPU_DYING:
16638 - atomic_inc(&ap_wfs_count);
16639 + atomic_inc_unchecked(&ap_wfs_count);
16640 if (num_online_cpus() == 1)
16641 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
16642 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
16643 return NOTIFY_BAD;
16644 break;
16645 }
16646 @@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
16647
16648 tboot_create_trampoline();
16649
16650 - atomic_set(&ap_wfs_count, 0);
16651 + atomic_set_unchecked(&ap_wfs_count, 0);
16652 register_hotcpu_notifier(&tboot_cpu_notifier);
16653 return 0;
16654 }
16655 diff -urNp linux-2.6.32.41/arch/x86/kernel/time.c linux-2.6.32.41/arch/x86/kernel/time.c
16656 --- linux-2.6.32.41/arch/x86/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
16657 +++ linux-2.6.32.41/arch/x86/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
16658 @@ -26,17 +26,13 @@
16659 int timer_ack;
16660 #endif
16661
16662 -#ifdef CONFIG_X86_64
16663 -volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
16664 -#endif
16665 -
16666 unsigned long profile_pc(struct pt_regs *regs)
16667 {
16668 unsigned long pc = instruction_pointer(regs);
16669
16670 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
16671 + if (!user_mode(regs) && in_lock_functions(pc)) {
16672 #ifdef CONFIG_FRAME_POINTER
16673 - return *(unsigned long *)(regs->bp + sizeof(long));
16674 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
16675 #else
16676 unsigned long *sp =
16677 (unsigned long *)kernel_stack_pointer(regs);
16678 @@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
16679 * or above a saved flags. Eflags has bits 22-31 zero,
16680 * kernel addresses don't.
16681 */
16682 +
16683 +#ifdef CONFIG_PAX_KERNEXEC
16684 + return ktla_ktva(sp[0]);
16685 +#else
16686 if (sp[0] >> 22)
16687 return sp[0];
16688 if (sp[1] >> 22)
16689 return sp[1];
16690 #endif
16691 +
16692 +#endif
16693 }
16694 return pc;
16695 }
16696 diff -urNp linux-2.6.32.41/arch/x86/kernel/tls.c linux-2.6.32.41/arch/x86/kernel/tls.c
16697 --- linux-2.6.32.41/arch/x86/kernel/tls.c 2011-03-27 14:31:47.000000000 -0400
16698 +++ linux-2.6.32.41/arch/x86/kernel/tls.c 2011-04-17 15:56:46.000000000 -0400
16699 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
16700 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
16701 return -EINVAL;
16702
16703 +#ifdef CONFIG_PAX_SEGMEXEC
16704 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
16705 + return -EINVAL;
16706 +#endif
16707 +
16708 set_tls_desc(p, idx, &info, 1);
16709
16710 return 0;
16711 diff -urNp linux-2.6.32.41/arch/x86/kernel/trampoline_32.S linux-2.6.32.41/arch/x86/kernel/trampoline_32.S
16712 --- linux-2.6.32.41/arch/x86/kernel/trampoline_32.S 2011-03-27 14:31:47.000000000 -0400
16713 +++ linux-2.6.32.41/arch/x86/kernel/trampoline_32.S 2011-04-17 15:56:46.000000000 -0400
16714 @@ -32,6 +32,12 @@
16715 #include <asm/segment.h>
16716 #include <asm/page_types.h>
16717
16718 +#ifdef CONFIG_PAX_KERNEXEC
16719 +#define ta(X) (X)
16720 +#else
16721 +#define ta(X) ((X) - __PAGE_OFFSET)
16722 +#endif
16723 +
16724 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
16725 __CPUINITRODATA
16726 .code16
16727 @@ -60,7 +66,7 @@ r_base = .
16728 inc %ax # protected mode (PE) bit
16729 lmsw %ax # into protected mode
16730 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
16731 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
16732 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
16733
16734 # These need to be in the same 64K segment as the above;
16735 # hence we don't use the boot_gdt_descr defined in head.S
16736 diff -urNp linux-2.6.32.41/arch/x86/kernel/trampoline_64.S linux-2.6.32.41/arch/x86/kernel/trampoline_64.S
16737 --- linux-2.6.32.41/arch/x86/kernel/trampoline_64.S 2011-03-27 14:31:47.000000000 -0400
16738 +++ linux-2.6.32.41/arch/x86/kernel/trampoline_64.S 2011-04-17 15:56:46.000000000 -0400
16739 @@ -91,7 +91,7 @@ startup_32:
16740 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
16741 movl %eax, %ds
16742
16743 - movl $X86_CR4_PAE, %eax
16744 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
16745 movl %eax, %cr4 # Enable PAE mode
16746
16747 # Setup trampoline 4 level pagetables
16748 @@ -138,7 +138,7 @@ tidt:
16749 # so the kernel can live anywhere
16750 .balign 4
16751 tgdt:
16752 - .short tgdt_end - tgdt # gdt limit
16753 + .short tgdt_end - tgdt - 1 # gdt limit
16754 .long tgdt - r_base
16755 .short 0
16756 .quad 0x00cf9b000000ffff # __KERNEL32_CS
16757 diff -urNp linux-2.6.32.41/arch/x86/kernel/traps.c linux-2.6.32.41/arch/x86/kernel/traps.c
16758 --- linux-2.6.32.41/arch/x86/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
16759 +++ linux-2.6.32.41/arch/x86/kernel/traps.c 2011-04-17 15:56:46.000000000 -0400
16760 @@ -69,12 +69,6 @@ asmlinkage int system_call(void);
16761
16762 /* Do we ignore FPU interrupts ? */
16763 char ignore_fpu_irq;
16764 -
16765 -/*
16766 - * The IDT has to be page-aligned to simplify the Pentium
16767 - * F0 0F bug workaround.
16768 - */
16769 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
16770 #endif
16771
16772 DECLARE_BITMAP(used_vectors, NR_VECTORS);
16773 @@ -112,19 +106,19 @@ static inline void preempt_conditional_c
16774 static inline void
16775 die_if_kernel(const char *str, struct pt_regs *regs, long err)
16776 {
16777 - if (!user_mode_vm(regs))
16778 + if (!user_mode(regs))
16779 die(str, regs, err);
16780 }
16781 #endif
16782
16783 static void __kprobes
16784 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
16785 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
16786 long error_code, siginfo_t *info)
16787 {
16788 struct task_struct *tsk = current;
16789
16790 #ifdef CONFIG_X86_32
16791 - if (regs->flags & X86_VM_MASK) {
16792 + if (v8086_mode(regs)) {
16793 /*
16794 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
16795 * On nmi (interrupt 2), do_trap should not be called.
16796 @@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str
16797 }
16798 #endif
16799
16800 - if (!user_mode(regs))
16801 + if (!user_mode_novm(regs))
16802 goto kernel_trap;
16803
16804 #ifdef CONFIG_X86_32
16805 @@ -158,7 +152,7 @@ trap_signal:
16806 printk_ratelimit()) {
16807 printk(KERN_INFO
16808 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
16809 - tsk->comm, tsk->pid, str,
16810 + tsk->comm, task_pid_nr(tsk), str,
16811 regs->ip, regs->sp, error_code);
16812 print_vma_addr(" in ", regs->ip);
16813 printk("\n");
16814 @@ -175,8 +169,20 @@ kernel_trap:
16815 if (!fixup_exception(regs)) {
16816 tsk->thread.error_code = error_code;
16817 tsk->thread.trap_no = trapnr;
16818 +
16819 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16820 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
16821 + str = "PAX: suspicious stack segment fault";
16822 +#endif
16823 +
16824 die(str, regs, error_code);
16825 }
16826 +
16827 +#ifdef CONFIG_PAX_REFCOUNT
16828 + if (trapnr == 4)
16829 + pax_report_refcount_overflow(regs);
16830 +#endif
16831 +
16832 return;
16833
16834 #ifdef CONFIG_X86_32
16835 @@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *re
16836 conditional_sti(regs);
16837
16838 #ifdef CONFIG_X86_32
16839 - if (regs->flags & X86_VM_MASK)
16840 + if (v8086_mode(regs))
16841 goto gp_in_vm86;
16842 #endif
16843
16844 tsk = current;
16845 - if (!user_mode(regs))
16846 + if (!user_mode_novm(regs))
16847 goto gp_in_kernel;
16848
16849 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
16850 + if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
16851 + struct mm_struct *mm = tsk->mm;
16852 + unsigned long limit;
16853 +
16854 + down_write(&mm->mmap_sem);
16855 + limit = mm->context.user_cs_limit;
16856 + if (limit < TASK_SIZE) {
16857 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
16858 + up_write(&mm->mmap_sem);
16859 + return;
16860 + }
16861 + up_write(&mm->mmap_sem);
16862 + }
16863 +#endif
16864 +
16865 tsk->thread.error_code = error_code;
16866 tsk->thread.trap_no = 13;
16867
16868 @@ -305,6 +327,13 @@ gp_in_kernel:
16869 if (notify_die(DIE_GPF, "general protection fault", regs,
16870 error_code, 13, SIGSEGV) == NOTIFY_STOP)
16871 return;
16872 +
16873 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16874 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
16875 + die("PAX: suspicious general protection fault", regs, error_code);
16876 + else
16877 +#endif
16878 +
16879 die("general protection fault", regs, error_code);
16880 }
16881
16882 @@ -558,7 +587,7 @@ dotraplinkage void __kprobes do_debug(st
16883 }
16884
16885 #ifdef CONFIG_X86_32
16886 - if (regs->flags & X86_VM_MASK)
16887 + if (v8086_mode(regs))
16888 goto debug_vm86;
16889 #endif
16890
16891 @@ -570,7 +599,7 @@ dotraplinkage void __kprobes do_debug(st
16892 * kernel space (but re-enable TF when returning to user mode).
16893 */
16894 if (condition & DR_STEP) {
16895 - if (!user_mode(regs))
16896 + if (!user_mode_novm(regs))
16897 goto clear_TF_reenable;
16898 }
16899
16900 @@ -757,7 +786,7 @@ do_simd_coprocessor_error(struct pt_regs
16901 * Handle strange cache flush from user space exception
16902 * in all other cases. This is undocumented behaviour.
16903 */
16904 - if (regs->flags & X86_VM_MASK) {
16905 + if (v8086_mode(regs)) {
16906 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
16907 return;
16908 }
16909 @@ -798,7 +827,7 @@ asmlinkage void __attribute__((weak)) sm
16910 void __math_state_restore(void)
16911 {
16912 struct thread_info *thread = current_thread_info();
16913 - struct task_struct *tsk = thread->task;
16914 + struct task_struct *tsk = current;
16915
16916 /*
16917 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
16918 @@ -825,8 +854,7 @@ void __math_state_restore(void)
16919 */
16920 asmlinkage void math_state_restore(void)
16921 {
16922 - struct thread_info *thread = current_thread_info();
16923 - struct task_struct *tsk = thread->task;
16924 + struct task_struct *tsk = current;
16925
16926 if (!tsk_used_math(tsk)) {
16927 local_irq_enable();
16928 diff -urNp linux-2.6.32.41/arch/x86/kernel/vm86_32.c linux-2.6.32.41/arch/x86/kernel/vm86_32.c
16929 --- linux-2.6.32.41/arch/x86/kernel/vm86_32.c 2011-03-27 14:31:47.000000000 -0400
16930 +++ linux-2.6.32.41/arch/x86/kernel/vm86_32.c 2011-04-17 15:56:46.000000000 -0400
16931 @@ -41,6 +41,7 @@
16932 #include <linux/ptrace.h>
16933 #include <linux/audit.h>
16934 #include <linux/stddef.h>
16935 +#include <linux/grsecurity.h>
16936
16937 #include <asm/uaccess.h>
16938 #include <asm/io.h>
16939 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
16940 do_exit(SIGSEGV);
16941 }
16942
16943 - tss = &per_cpu(init_tss, get_cpu());
16944 + tss = init_tss + get_cpu();
16945 current->thread.sp0 = current->thread.saved_sp0;
16946 current->thread.sysenter_cs = __KERNEL_CS;
16947 load_sp0(tss, &current->thread);
16948 @@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
16949 struct task_struct *tsk;
16950 int tmp, ret = -EPERM;
16951
16952 +#ifdef CONFIG_GRKERNSEC_VM86
16953 + if (!capable(CAP_SYS_RAWIO)) {
16954 + gr_handle_vm86();
16955 + goto out;
16956 + }
16957 +#endif
16958 +
16959 tsk = current;
16960 if (tsk->thread.saved_sp0)
16961 goto out;
16962 @@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
16963 int tmp, ret;
16964 struct vm86plus_struct __user *v86;
16965
16966 +#ifdef CONFIG_GRKERNSEC_VM86
16967 + if (!capable(CAP_SYS_RAWIO)) {
16968 + gr_handle_vm86();
16969 + ret = -EPERM;
16970 + goto out;
16971 + }
16972 +#endif
16973 +
16974 tsk = current;
16975 switch (regs->bx) {
16976 case VM86_REQUEST_IRQ:
16977 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
16978 tsk->thread.saved_fs = info->regs32->fs;
16979 tsk->thread.saved_gs = get_user_gs(info->regs32);
16980
16981 - tss = &per_cpu(init_tss, get_cpu());
16982 + tss = init_tss + get_cpu();
16983 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
16984 if (cpu_has_sep)
16985 tsk->thread.sysenter_cs = 0;
16986 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
16987 goto cannot_handle;
16988 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
16989 goto cannot_handle;
16990 - intr_ptr = (unsigned long __user *) (i << 2);
16991 + intr_ptr = (__force unsigned long __user *) (i << 2);
16992 if (get_user(segoffs, intr_ptr))
16993 goto cannot_handle;
16994 if ((segoffs >> 16) == BIOSSEG)
16995 diff -urNp linux-2.6.32.41/arch/x86/kernel/vmi_32.c linux-2.6.32.41/arch/x86/kernel/vmi_32.c
16996 --- linux-2.6.32.41/arch/x86/kernel/vmi_32.c 2011-03-27 14:31:47.000000000 -0400
16997 +++ linux-2.6.32.41/arch/x86/kernel/vmi_32.c 2011-04-17 15:56:46.000000000 -0400
16998 @@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1)))
16999 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
17000
17001 #define call_vrom_func(rom,func) \
17002 - (((VROMFUNC *)(rom->func))())
17003 + (((VROMFUNC *)(ktva_ktla(rom.func)))())
17004
17005 #define call_vrom_long_func(rom,func,arg) \
17006 - (((VROMLONGFUNC *)(rom->func)) (arg))
17007 +({\
17008 + u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
17009 + struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
17010 + __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
17011 + __reloc;\
17012 +})
17013
17014 -static struct vrom_header *vmi_rom;
17015 +static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
17016 static int disable_pge;
17017 static int disable_pse;
17018 static int disable_sep;
17019 @@ -76,10 +81,10 @@ static struct {
17020 void (*set_initial_ap_state)(int, int);
17021 void (*halt)(void);
17022 void (*set_lazy_mode)(int mode);
17023 -} vmi_ops;
17024 +} vmi_ops __read_only;
17025
17026 /* Cached VMI operations */
17027 -struct vmi_timer_ops vmi_timer_ops;
17028 +struct vmi_timer_ops vmi_timer_ops __read_only;
17029
17030 /*
17031 * VMI patching routines.
17032 @@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
17033 static inline void patch_offset(void *insnbuf,
17034 unsigned long ip, unsigned long dest)
17035 {
17036 - *(unsigned long *)(insnbuf+1) = dest-ip-5;
17037 + *(unsigned long *)(insnbuf+1) = dest-ip-5;
17038 }
17039
17040 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
17041 @@ -102,6 +107,7 @@ static unsigned patch_internal(int call,
17042 {
17043 u64 reloc;
17044 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
17045 +
17046 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
17047 switch(rel->type) {
17048 case VMI_RELOCATION_CALL_REL:
17049 @@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud
17050
17051 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
17052 {
17053 - const pte_t pte = { .pte = 0 };
17054 + const pte_t pte = __pte(0ULL);
17055 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
17056 }
17057
17058 static void vmi_pmd_clear(pmd_t *pmd)
17059 {
17060 - const pte_t pte = { .pte = 0 };
17061 + const pte_t pte = __pte(0ULL);
17062 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
17063 }
17064 #endif
17065 @@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, un
17066 ap.ss = __KERNEL_DS;
17067 ap.esp = (unsigned long) start_esp;
17068
17069 - ap.ds = __USER_DS;
17070 - ap.es = __USER_DS;
17071 + ap.ds = __KERNEL_DS;
17072 + ap.es = __KERNEL_DS;
17073 ap.fs = __KERNEL_PERCPU;
17074 - ap.gs = __KERNEL_STACK_CANARY;
17075 + savesegment(gs, ap.gs);
17076
17077 ap.eflags = 0;
17078
17079 @@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
17080 paravirt_leave_lazy_mmu();
17081 }
17082
17083 +#ifdef CONFIG_PAX_KERNEXEC
17084 +static unsigned long vmi_pax_open_kernel(void)
17085 +{
17086 + return 0;
17087 +}
17088 +
17089 +static unsigned long vmi_pax_close_kernel(void)
17090 +{
17091 + return 0;
17092 +}
17093 +#endif
17094 +
17095 static inline int __init check_vmi_rom(struct vrom_header *rom)
17096 {
17097 struct pci_header *pci;
17098 @@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(s
17099 return 0;
17100 if (rom->vrom_signature != VMI_SIGNATURE)
17101 return 0;
17102 + if (rom->rom_length * 512 > sizeof(*rom)) {
17103 + printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
17104 + return 0;
17105 + }
17106 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
17107 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
17108 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
17109 @@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(v
17110 struct vrom_header *romstart;
17111 romstart = (struct vrom_header *)isa_bus_to_virt(base);
17112 if (check_vmi_rom(romstart)) {
17113 - vmi_rom = romstart;
17114 + vmi_rom = *romstart;
17115 return 1;
17116 }
17117 }
17118 @@ -836,6 +858,11 @@ static inline int __init activate_vmi(vo
17119
17120 para_fill(pv_irq_ops.safe_halt, Halt);
17121
17122 +#ifdef CONFIG_PAX_KERNEXEC
17123 + pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
17124 + pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
17125 +#endif
17126 +
17127 /*
17128 * Alternative instruction rewriting doesn't happen soon enough
17129 * to convert VMI_IRET to a call instead of a jump; so we have
17130 @@ -853,16 +880,16 @@ static inline int __init activate_vmi(vo
17131
17132 void __init vmi_init(void)
17133 {
17134 - if (!vmi_rom)
17135 + if (!vmi_rom.rom_signature)
17136 probe_vmi_rom();
17137 else
17138 - check_vmi_rom(vmi_rom);
17139 + check_vmi_rom(&vmi_rom);
17140
17141 /* In case probing for or validating the ROM failed, basil */
17142 - if (!vmi_rom)
17143 + if (!vmi_rom.rom_signature)
17144 return;
17145
17146 - reserve_top_address(-vmi_rom->virtual_top);
17147 + reserve_top_address(-vmi_rom.virtual_top);
17148
17149 #ifdef CONFIG_X86_IO_APIC
17150 /* This is virtual hardware; timer routing is wired correctly */
17151 @@ -874,7 +901,7 @@ void __init vmi_activate(void)
17152 {
17153 unsigned long flags;
17154
17155 - if (!vmi_rom)
17156 + if (!vmi_rom.rom_signature)
17157 return;
17158
17159 local_irq_save(flags);
17160 diff -urNp linux-2.6.32.41/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.41/arch/x86/kernel/vmlinux.lds.S
17161 --- linux-2.6.32.41/arch/x86/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
17162 +++ linux-2.6.32.41/arch/x86/kernel/vmlinux.lds.S 2011-04-17 15:56:46.000000000 -0400
17163 @@ -26,6 +26,13 @@
17164 #include <asm/page_types.h>
17165 #include <asm/cache.h>
17166 #include <asm/boot.h>
17167 +#include <asm/segment.h>
17168 +
17169 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17170 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
17171 +#else
17172 +#define __KERNEL_TEXT_OFFSET 0
17173 +#endif
17174
17175 #undef i386 /* in case the preprocessor is a 32bit one */
17176
17177 @@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
17178 #ifdef CONFIG_X86_32
17179 OUTPUT_ARCH(i386)
17180 ENTRY(phys_startup_32)
17181 -jiffies = jiffies_64;
17182 #else
17183 OUTPUT_ARCH(i386:x86-64)
17184 ENTRY(phys_startup_64)
17185 -jiffies_64 = jiffies;
17186 #endif
17187
17188 PHDRS {
17189 text PT_LOAD FLAGS(5); /* R_E */
17190 - data PT_LOAD FLAGS(7); /* RWE */
17191 +#ifdef CONFIG_X86_32
17192 + module PT_LOAD FLAGS(5); /* R_E */
17193 +#endif
17194 +#ifdef CONFIG_XEN
17195 + rodata PT_LOAD FLAGS(5); /* R_E */
17196 +#else
17197 + rodata PT_LOAD FLAGS(4); /* R__ */
17198 +#endif
17199 + data PT_LOAD FLAGS(6); /* RW_ */
17200 #ifdef CONFIG_X86_64
17201 user PT_LOAD FLAGS(5); /* R_E */
17202 +#endif
17203 + init.begin PT_LOAD FLAGS(6); /* RW_ */
17204 #ifdef CONFIG_SMP
17205 percpu PT_LOAD FLAGS(6); /* RW_ */
17206 #endif
17207 + text.init PT_LOAD FLAGS(5); /* R_E */
17208 + text.exit PT_LOAD FLAGS(5); /* R_E */
17209 init PT_LOAD FLAGS(7); /* RWE */
17210 -#endif
17211 note PT_NOTE FLAGS(0); /* ___ */
17212 }
17213
17214 SECTIONS
17215 {
17216 #ifdef CONFIG_X86_32
17217 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
17218 - phys_startup_32 = startup_32 - LOAD_OFFSET;
17219 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
17220 #else
17221 - . = __START_KERNEL;
17222 - phys_startup_64 = startup_64 - LOAD_OFFSET;
17223 + . = __START_KERNEL;
17224 #endif
17225
17226 /* Text and read-only data */
17227 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
17228 - _text = .;
17229 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17230 /* bootstrapping code */
17231 +#ifdef CONFIG_X86_32
17232 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17233 +#else
17234 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17235 +#endif
17236 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17237 + _text = .;
17238 HEAD_TEXT
17239 #ifdef CONFIG_X86_32
17240 . = ALIGN(PAGE_SIZE);
17241 @@ -82,28 +102,71 @@ SECTIONS
17242 IRQENTRY_TEXT
17243 *(.fixup)
17244 *(.gnu.warning)
17245 - /* End of text section */
17246 - _etext = .;
17247 } :text = 0x9090
17248
17249 - NOTES :text :note
17250 + . += __KERNEL_TEXT_OFFSET;
17251 +
17252 +#ifdef CONFIG_X86_32
17253 + . = ALIGN(PAGE_SIZE);
17254 + .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
17255 + *(.vmi.rom)
17256 + } :module
17257 +
17258 + . = ALIGN(PAGE_SIZE);
17259 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
17260 +
17261 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
17262 + MODULES_EXEC_VADDR = .;
17263 + BYTE(0)
17264 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
17265 + . = ALIGN(HPAGE_SIZE);
17266 + MODULES_EXEC_END = . - 1;
17267 +#endif
17268 +
17269 + } :module
17270 +#endif
17271
17272 - EXCEPTION_TABLE(16) :text = 0x9090
17273 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
17274 + /* End of text section */
17275 + _etext = . - __KERNEL_TEXT_OFFSET;
17276 + }
17277 +
17278 +#ifdef CONFIG_X86_32
17279 + . = ALIGN(PAGE_SIZE);
17280 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
17281 + *(.idt)
17282 + . = ALIGN(PAGE_SIZE);
17283 + *(.empty_zero_page)
17284 + *(.swapper_pg_fixmap)
17285 + *(.swapper_pg_pmd)
17286 + *(.swapper_pg_dir)
17287 + *(.trampoline_pg_dir)
17288 + } :rodata
17289 +#endif
17290 +
17291 + . = ALIGN(PAGE_SIZE);
17292 + NOTES :rodata :note
17293 +
17294 + EXCEPTION_TABLE(16) :rodata
17295
17296 RO_DATA(PAGE_SIZE)
17297
17298 /* Data */
17299 .data : AT(ADDR(.data) - LOAD_OFFSET) {
17300 +
17301 +#ifdef CONFIG_PAX_KERNEXEC
17302 + . = ALIGN(HPAGE_SIZE);
17303 +#else
17304 + . = ALIGN(PAGE_SIZE);
17305 +#endif
17306 +
17307 /* Start of data section */
17308 _sdata = .;
17309
17310 /* init_task */
17311 INIT_TASK_DATA(THREAD_SIZE)
17312
17313 -#ifdef CONFIG_X86_32
17314 - /* 32 bit has nosave before _edata */
17315 NOSAVE_DATA
17316 -#endif
17317
17318 PAGE_ALIGNED_DATA(PAGE_SIZE)
17319
17320 @@ -112,6 +175,8 @@ SECTIONS
17321 DATA_DATA
17322 CONSTRUCTORS
17323
17324 + jiffies = jiffies_64;
17325 +
17326 /* rarely changed data like cpu maps */
17327 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
17328
17329 @@ -166,12 +231,6 @@ SECTIONS
17330 }
17331 vgetcpu_mode = VVIRT(.vgetcpu_mode);
17332
17333 - . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
17334 - .jiffies : AT(VLOAD(.jiffies)) {
17335 - *(.jiffies)
17336 - }
17337 - jiffies = VVIRT(.jiffies);
17338 -
17339 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
17340 *(.vsyscall_3)
17341 }
17342 @@ -187,12 +246,19 @@ SECTIONS
17343 #endif /* CONFIG_X86_64 */
17344
17345 /* Init code and data - will be freed after init */
17346 - . = ALIGN(PAGE_SIZE);
17347 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
17348 + BYTE(0)
17349 +
17350 +#ifdef CONFIG_PAX_KERNEXEC
17351 + . = ALIGN(HPAGE_SIZE);
17352 +#else
17353 + . = ALIGN(PAGE_SIZE);
17354 +#endif
17355 +
17356 __init_begin = .; /* paired with __init_end */
17357 - }
17358 + } :init.begin
17359
17360 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
17361 +#ifdef CONFIG_SMP
17362 /*
17363 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
17364 * output PHDR, so the next output section - .init.text - should
17365 @@ -201,12 +267,27 @@ SECTIONS
17366 PERCPU_VADDR(0, :percpu)
17367 #endif
17368
17369 - INIT_TEXT_SECTION(PAGE_SIZE)
17370 -#ifdef CONFIG_X86_64
17371 - :init
17372 -#endif
17373 + . = ALIGN(PAGE_SIZE);
17374 + init_begin = .;
17375 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
17376 + VMLINUX_SYMBOL(_sinittext) = .;
17377 + INIT_TEXT
17378 + VMLINUX_SYMBOL(_einittext) = .;
17379 + . = ALIGN(PAGE_SIZE);
17380 + } :text.init
17381
17382 - INIT_DATA_SECTION(16)
17383 + /*
17384 + * .exit.text is discard at runtime, not link time, to deal with
17385 + * references from .altinstructions and .eh_frame
17386 + */
17387 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17388 + EXIT_TEXT
17389 + . = ALIGN(16);
17390 + } :text.exit
17391 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
17392 +
17393 + . = ALIGN(PAGE_SIZE);
17394 + INIT_DATA_SECTION(16) :init
17395
17396 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
17397 __x86_cpu_dev_start = .;
17398 @@ -232,19 +313,11 @@ SECTIONS
17399 *(.altinstr_replacement)
17400 }
17401
17402 - /*
17403 - * .exit.text is discard at runtime, not link time, to deal with
17404 - * references from .altinstructions and .eh_frame
17405 - */
17406 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
17407 - EXIT_TEXT
17408 - }
17409 -
17410 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
17411 EXIT_DATA
17412 }
17413
17414 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
17415 +#ifndef CONFIG_SMP
17416 PERCPU(PAGE_SIZE)
17417 #endif
17418
17419 @@ -267,12 +340,6 @@ SECTIONS
17420 . = ALIGN(PAGE_SIZE);
17421 }
17422
17423 -#ifdef CONFIG_X86_64
17424 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
17425 - NOSAVE_DATA
17426 - }
17427 -#endif
17428 -
17429 /* BSS */
17430 . = ALIGN(PAGE_SIZE);
17431 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
17432 @@ -288,6 +355,7 @@ SECTIONS
17433 __brk_base = .;
17434 . += 64 * 1024; /* 64k alignment slop space */
17435 *(.brk_reservation) /* areas brk users have reserved */
17436 + . = ALIGN(HPAGE_SIZE);
17437 __brk_limit = .;
17438 }
17439
17440 @@ -316,13 +384,12 @@ SECTIONS
17441 * for the boot processor.
17442 */
17443 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
17444 -INIT_PER_CPU(gdt_page);
17445 INIT_PER_CPU(irq_stack_union);
17446
17447 /*
17448 * Build-time check on the image size:
17449 */
17450 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
17451 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
17452 "kernel image bigger than KERNEL_IMAGE_SIZE");
17453
17454 #ifdef CONFIG_SMP
17455 diff -urNp linux-2.6.32.41/arch/x86/kernel/vsyscall_64.c linux-2.6.32.41/arch/x86/kernel/vsyscall_64.c
17456 --- linux-2.6.32.41/arch/x86/kernel/vsyscall_64.c 2011-03-27 14:31:47.000000000 -0400
17457 +++ linux-2.6.32.41/arch/x86/kernel/vsyscall_64.c 2011-04-23 12:56:10.000000000 -0400
17458 @@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wa
17459
17460 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
17461 /* copy vsyscall data */
17462 + strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
17463 vsyscall_gtod_data.clock.vread = clock->vread;
17464 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
17465 vsyscall_gtod_data.clock.mask = clock->mask;
17466 @@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s
17467 We do this here because otherwise user space would do it on
17468 its own in a likely inferior way (no access to jiffies).
17469 If you don't like it pass NULL. */
17470 - if (tcache && tcache->blob[0] == (j = __jiffies)) {
17471 + if (tcache && tcache->blob[0] == (j = jiffies)) {
17472 p = tcache->blob[1];
17473 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
17474 /* Load per CPU data from RDTSCP */
17475 diff -urNp linux-2.6.32.41/arch/x86/kernel/x8664_ksyms_64.c linux-2.6.32.41/arch/x86/kernel/x8664_ksyms_64.c
17476 --- linux-2.6.32.41/arch/x86/kernel/x8664_ksyms_64.c 2011-03-27 14:31:47.000000000 -0400
17477 +++ linux-2.6.32.41/arch/x86/kernel/x8664_ksyms_64.c 2011-04-17 15:56:46.000000000 -0400
17478 @@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
17479
17480 EXPORT_SYMBOL(copy_user_generic);
17481 EXPORT_SYMBOL(__copy_user_nocache);
17482 -EXPORT_SYMBOL(copy_from_user);
17483 -EXPORT_SYMBOL(copy_to_user);
17484 EXPORT_SYMBOL(__copy_from_user_inatomic);
17485
17486 EXPORT_SYMBOL(copy_page);
17487 diff -urNp linux-2.6.32.41/arch/x86/kernel/xsave.c linux-2.6.32.41/arch/x86/kernel/xsave.c
17488 --- linux-2.6.32.41/arch/x86/kernel/xsave.c 2011-03-27 14:31:47.000000000 -0400
17489 +++ linux-2.6.32.41/arch/x86/kernel/xsave.c 2011-04-17 15:56:46.000000000 -0400
17490 @@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_
17491 fx_sw_user->xstate_size > fx_sw_user->extended_size)
17492 return -1;
17493
17494 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
17495 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
17496 fx_sw_user->extended_size -
17497 FP_XSTATE_MAGIC2_SIZE));
17498 /*
17499 @@ -196,7 +196,7 @@ fx_only:
17500 * the other extended state.
17501 */
17502 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
17503 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
17504 + return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
17505 }
17506
17507 /*
17508 @@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf
17509 if (task_thread_info(tsk)->status & TS_XSAVE)
17510 err = restore_user_xstate(buf);
17511 else
17512 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
17513 + err = fxrstor_checking((struct i387_fxsave_struct __user *)
17514 buf);
17515 if (unlikely(err)) {
17516 /*
17517 diff -urNp linux-2.6.32.41/arch/x86/kvm/emulate.c linux-2.6.32.41/arch/x86/kvm/emulate.c
17518 --- linux-2.6.32.41/arch/x86/kvm/emulate.c 2011-03-27 14:31:47.000000000 -0400
17519 +++ linux-2.6.32.41/arch/x86/kvm/emulate.c 2011-04-17 15:56:46.000000000 -0400
17520 @@ -81,8 +81,8 @@
17521 #define Src2CL (1<<29)
17522 #define Src2ImmByte (2<<29)
17523 #define Src2One (3<<29)
17524 -#define Src2Imm16 (4<<29)
17525 -#define Src2Mask (7<<29)
17526 +#define Src2Imm16 (4U<<29)
17527 +#define Src2Mask (7U<<29)
17528
17529 enum {
17530 Group1_80, Group1_81, Group1_82, Group1_83,
17531 @@ -411,6 +411,7 @@ static u32 group2_table[] = {
17532
17533 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
17534 do { \
17535 + unsigned long _tmp; \
17536 __asm__ __volatile__ ( \
17537 _PRE_EFLAGS("0", "4", "2") \
17538 _op _suffix " %"_x"3,%1; " \
17539 @@ -424,8 +425,6 @@ static u32 group2_table[] = {
17540 /* Raw emulation: instruction has two explicit operands. */
17541 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
17542 do { \
17543 - unsigned long _tmp; \
17544 - \
17545 switch ((_dst).bytes) { \
17546 case 2: \
17547 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
17548 @@ -441,7 +440,6 @@ static u32 group2_table[] = {
17549
17550 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
17551 do { \
17552 - unsigned long _tmp; \
17553 switch ((_dst).bytes) { \
17554 case 1: \
17555 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
17556 diff -urNp linux-2.6.32.41/arch/x86/kvm/lapic.c linux-2.6.32.41/arch/x86/kvm/lapic.c
17557 --- linux-2.6.32.41/arch/x86/kvm/lapic.c 2011-03-27 14:31:47.000000000 -0400
17558 +++ linux-2.6.32.41/arch/x86/kvm/lapic.c 2011-04-17 15:56:46.000000000 -0400
17559 @@ -52,7 +52,7 @@
17560 #define APIC_BUS_CYCLE_NS 1
17561
17562 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
17563 -#define apic_debug(fmt, arg...)
17564 +#define apic_debug(fmt, arg...) do {} while (0)
17565
17566 #define APIC_LVT_NUM 6
17567 /* 14 is the version for Xeon and Pentium 8.4.8*/
17568 diff -urNp linux-2.6.32.41/arch/x86/kvm/paging_tmpl.h linux-2.6.32.41/arch/x86/kvm/paging_tmpl.h
17569 --- linux-2.6.32.41/arch/x86/kvm/paging_tmpl.h 2011-03-27 14:31:47.000000000 -0400
17570 +++ linux-2.6.32.41/arch/x86/kvm/paging_tmpl.h 2011-05-16 21:46:57.000000000 -0400
17571 @@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_
17572 int level = PT_PAGE_TABLE_LEVEL;
17573 unsigned long mmu_seq;
17574
17575 + pax_track_stack();
17576 +
17577 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
17578 kvm_mmu_audit(vcpu, "pre page fault");
17579
17580 diff -urNp linux-2.6.32.41/arch/x86/kvm/svm.c linux-2.6.32.41/arch/x86/kvm/svm.c
17581 --- linux-2.6.32.41/arch/x86/kvm/svm.c 2011-03-27 14:31:47.000000000 -0400
17582 +++ linux-2.6.32.41/arch/x86/kvm/svm.c 2011-04-17 15:56:46.000000000 -0400
17583 @@ -2483,9 +2483,12 @@ static int handle_exit(struct kvm_run *k
17584 static void reload_tss(struct kvm_vcpu *vcpu)
17585 {
17586 int cpu = raw_smp_processor_id();
17587 -
17588 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
17589 +
17590 + pax_open_kernel();
17591 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
17592 + pax_close_kernel();
17593 +
17594 load_TR_desc();
17595 }
17596
17597 @@ -2946,7 +2949,7 @@ static bool svm_gb_page_enable(void)
17598 return true;
17599 }
17600
17601 -static struct kvm_x86_ops svm_x86_ops = {
17602 +static const struct kvm_x86_ops svm_x86_ops = {
17603 .cpu_has_kvm_support = has_svm,
17604 .disabled_by_bios = is_disabled,
17605 .hardware_setup = svm_hardware_setup,
17606 diff -urNp linux-2.6.32.41/arch/x86/kvm/vmx.c linux-2.6.32.41/arch/x86/kvm/vmx.c
17607 --- linux-2.6.32.41/arch/x86/kvm/vmx.c 2011-03-27 14:31:47.000000000 -0400
17608 +++ linux-2.6.32.41/arch/x86/kvm/vmx.c 2011-05-04 17:56:20.000000000 -0400
17609 @@ -570,7 +570,11 @@ static void reload_tss(void)
17610
17611 kvm_get_gdt(&gdt);
17612 descs = (void *)gdt.base;
17613 +
17614 + pax_open_kernel();
17615 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
17616 + pax_close_kernel();
17617 +
17618 load_TR_desc();
17619 }
17620
17621 @@ -1409,8 +1413,11 @@ static __init int hardware_setup(void)
17622 if (!cpu_has_vmx_flexpriority())
17623 flexpriority_enabled = 0;
17624
17625 - if (!cpu_has_vmx_tpr_shadow())
17626 - kvm_x86_ops->update_cr8_intercept = NULL;
17627 + if (!cpu_has_vmx_tpr_shadow()) {
17628 + pax_open_kernel();
17629 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
17630 + pax_close_kernel();
17631 + }
17632
17633 if (enable_ept && !cpu_has_vmx_ept_2m_page())
17634 kvm_disable_largepages();
17635 @@ -2361,7 +2368,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
17636 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
17637
17638 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
17639 - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
17640 + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
17641 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
17642 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
17643 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
17644 @@ -3717,6 +3724,12 @@ static void vmx_vcpu_run(struct kvm_vcpu
17645 "jmp .Lkvm_vmx_return \n\t"
17646 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
17647 ".Lkvm_vmx_return: "
17648 +
17649 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17650 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
17651 + ".Lkvm_vmx_return2: "
17652 +#endif
17653 +
17654 /* Save guest registers, load host registers, keep flags */
17655 "xchg %0, (%%"R"sp) \n\t"
17656 "mov %%"R"ax, %c[rax](%0) \n\t"
17657 @@ -3763,8 +3776,13 @@ static void vmx_vcpu_run(struct kvm_vcpu
17658 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
17659 #endif
17660 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
17661 +
17662 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17663 + ,[cs]"i"(__KERNEL_CS)
17664 +#endif
17665 +
17666 : "cc", "memory"
17667 - , R"bx", R"di", R"si"
17668 + , R"ax", R"bx", R"di", R"si"
17669 #ifdef CONFIG_X86_64
17670 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
17671 #endif
17672 @@ -3781,7 +3799,16 @@ static void vmx_vcpu_run(struct kvm_vcpu
17673 if (vmx->rmode.irq.pending)
17674 fixup_rmode_irq(vmx);
17675
17676 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
17677 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
17678 +
17679 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17680 + loadsegment(fs, __KERNEL_PERCPU);
17681 +#endif
17682 +
17683 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17684 + __set_fs(current_thread_info()->addr_limit);
17685 +#endif
17686 +
17687 vmx->launched = 1;
17688
17689 vmx_complete_interrupts(vmx);
17690 @@ -3956,7 +3983,7 @@ static bool vmx_gb_page_enable(void)
17691 return false;
17692 }
17693
17694 -static struct kvm_x86_ops vmx_x86_ops = {
17695 +static const struct kvm_x86_ops vmx_x86_ops = {
17696 .cpu_has_kvm_support = cpu_has_kvm_support,
17697 .disabled_by_bios = vmx_disabled_by_bios,
17698 .hardware_setup = hardware_setup,
17699 diff -urNp linux-2.6.32.41/arch/x86/kvm/x86.c linux-2.6.32.41/arch/x86/kvm/x86.c
17700 --- linux-2.6.32.41/arch/x86/kvm/x86.c 2011-05-10 22:12:01.000000000 -0400
17701 +++ linux-2.6.32.41/arch/x86/kvm/x86.c 2011-05-10 22:12:26.000000000 -0400
17702 @@ -82,7 +82,7 @@ static void update_cr8_intercept(struct
17703 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
17704 struct kvm_cpuid_entry2 __user *entries);
17705
17706 -struct kvm_x86_ops *kvm_x86_ops;
17707 +const struct kvm_x86_ops *kvm_x86_ops;
17708 EXPORT_SYMBOL_GPL(kvm_x86_ops);
17709
17710 int ignore_msrs = 0;
17711 @@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
17712 struct kvm_cpuid2 *cpuid,
17713 struct kvm_cpuid_entry2 __user *entries)
17714 {
17715 - int r;
17716 + int r, i;
17717
17718 r = -E2BIG;
17719 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
17720 goto out;
17721 r = -EFAULT;
17722 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
17723 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
17724 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
17725 goto out;
17726 + for (i = 0; i < cpuid->nent; ++i) {
17727 + struct kvm_cpuid_entry2 cpuid_entry;
17728 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
17729 + goto out;
17730 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
17731 + }
17732 vcpu->arch.cpuid_nent = cpuid->nent;
17733 kvm_apic_set_version(vcpu);
17734 return 0;
17735 @@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
17736 struct kvm_cpuid2 *cpuid,
17737 struct kvm_cpuid_entry2 __user *entries)
17738 {
17739 - int r;
17740 + int r, i;
17741
17742 vcpu_load(vcpu);
17743 r = -E2BIG;
17744 if (cpuid->nent < vcpu->arch.cpuid_nent)
17745 goto out;
17746 r = -EFAULT;
17747 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
17748 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
17749 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
17750 goto out;
17751 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
17752 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
17753 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
17754 + goto out;
17755 + }
17756 return 0;
17757
17758 out:
17759 @@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
17760 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
17761 struct kvm_interrupt *irq)
17762 {
17763 - if (irq->irq < 0 || irq->irq >= 256)
17764 + if (irq->irq >= 256)
17765 return -EINVAL;
17766 if (irqchip_in_kernel(vcpu->kvm))
17767 return -ENXIO;
17768 @@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cp
17769 .notifier_call = kvmclock_cpufreq_notifier
17770 };
17771
17772 -int kvm_arch_init(void *opaque)
17773 +int kvm_arch_init(const void *opaque)
17774 {
17775 int r, cpu;
17776 - struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
17777 + const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
17778
17779 if (kvm_x86_ops) {
17780 printk(KERN_ERR "kvm: already loaded the other module\n");
17781 diff -urNp linux-2.6.32.41/arch/x86/lib/atomic64_32.c linux-2.6.32.41/arch/x86/lib/atomic64_32.c
17782 --- linux-2.6.32.41/arch/x86/lib/atomic64_32.c 2011-03-27 14:31:47.000000000 -0400
17783 +++ linux-2.6.32.41/arch/x86/lib/atomic64_32.c 2011-05-04 17:56:28.000000000 -0400
17784 @@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u6
17785 }
17786 EXPORT_SYMBOL(atomic64_cmpxchg);
17787
17788 +u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
17789 +{
17790 + return cmpxchg8b(&ptr->counter, old_val, new_val);
17791 +}
17792 +EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
17793 +
17794 /**
17795 * atomic64_xchg - xchg atomic64 variable
17796 * @ptr: pointer to type atomic64_t
17797 @@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 n
17798 EXPORT_SYMBOL(atomic64_xchg);
17799
17800 /**
17801 + * atomic64_xchg_unchecked - xchg atomic64 variable
17802 + * @ptr: pointer to type atomic64_unchecked_t
17803 + * @new_val: value to assign
17804 + *
17805 + * Atomically xchgs the value of @ptr to @new_val and returns
17806 + * the old value.
17807 + */
17808 +u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
17809 +{
17810 + /*
17811 + * Try first with a (possibly incorrect) assumption about
17812 + * what we have there. We'll do two loops most likely,
17813 + * but we'll get an ownership MESI transaction straight away
17814 + * instead of a read transaction followed by a
17815 + * flush-for-ownership transaction:
17816 + */
17817 + u64 old_val, real_val = 0;
17818 +
17819 + do {
17820 + old_val = real_val;
17821 +
17822 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
17823 +
17824 + } while (real_val != old_val);
17825 +
17826 + return old_val;
17827 +}
17828 +EXPORT_SYMBOL(atomic64_xchg_unchecked);
17829 +
17830 +/**
17831 * atomic64_set - set atomic64 variable
17832 * @ptr: pointer to type atomic64_t
17833 * @new_val: value to assign
17834 @@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 n
17835 EXPORT_SYMBOL(atomic64_set);
17836
17837 /**
17838 -EXPORT_SYMBOL(atomic64_read);
17839 + * atomic64_unchecked_set - set atomic64 variable
17840 + * @ptr: pointer to type atomic64_unchecked_t
17841 + * @new_val: value to assign
17842 + *
17843 + * Atomically sets the value of @ptr to @new_val.
17844 + */
17845 +void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
17846 +{
17847 + atomic64_xchg_unchecked(ptr, new_val);
17848 +}
17849 +EXPORT_SYMBOL(atomic64_set_unchecked);
17850 +
17851 +/**
17852 * atomic64_add_return - add and return
17853 * @delta: integer value to add
17854 * @ptr: pointer to type atomic64_t
17855 @@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 del
17856 }
17857 EXPORT_SYMBOL(atomic64_add_return);
17858
17859 +/**
17860 + * atomic64_add_return_unchecked - add and return
17861 + * @delta: integer value to add
17862 + * @ptr: pointer to type atomic64_unchecked_t
17863 + *
17864 + * Atomically adds @delta to @ptr and returns @delta + *@ptr
17865 + */
17866 +noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
17867 +{
17868 + /*
17869 + * Try first with a (possibly incorrect) assumption about
17870 + * what we have there. We'll do two loops most likely,
17871 + * but we'll get an ownership MESI transaction straight away
17872 + * instead of a read transaction followed by a
17873 + * flush-for-ownership transaction:
17874 + */
17875 + u64 old_val, new_val, real_val = 0;
17876 +
17877 + do {
17878 + old_val = real_val;
17879 + new_val = old_val + delta;
17880 +
17881 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
17882 +
17883 + } while (real_val != old_val);
17884 +
17885 + return new_val;
17886 +}
17887 +EXPORT_SYMBOL(atomic64_add_return_unchecked);
17888 +
17889 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
17890 {
17891 return atomic64_add_return(-delta, ptr);
17892 }
17893 EXPORT_SYMBOL(atomic64_sub_return);
17894
17895 +u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
17896 +{
17897 + return atomic64_add_return_unchecked(-delta, ptr);
17898 +}
17899 +EXPORT_SYMBOL(atomic64_sub_return_unchecked);
17900 +
17901 u64 atomic64_inc_return(atomic64_t *ptr)
17902 {
17903 return atomic64_add_return(1, ptr);
17904 }
17905 EXPORT_SYMBOL(atomic64_inc_return);
17906
17907 +u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
17908 +{
17909 + return atomic64_add_return_unchecked(1, ptr);
17910 +}
17911 +EXPORT_SYMBOL(atomic64_inc_return_unchecked);
17912 +
17913 u64 atomic64_dec_return(atomic64_t *ptr)
17914 {
17915 return atomic64_sub_return(1, ptr);
17916 }
17917 EXPORT_SYMBOL(atomic64_dec_return);
17918
17919 +u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
17920 +{
17921 + return atomic64_sub_return_unchecked(1, ptr);
17922 +}
17923 +EXPORT_SYMBOL(atomic64_dec_return_unchecked);
17924 +
17925 /**
17926 * atomic64_add - add integer to atomic64 variable
17927 * @delta: integer value to add
17928 @@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t
17929 EXPORT_SYMBOL(atomic64_add);
17930
17931 /**
17932 + * atomic64_add_unchecked - add integer to atomic64 variable
17933 + * @delta: integer value to add
17934 + * @ptr: pointer to type atomic64_unchecked_t
17935 + *
17936 + * Atomically adds @delta to @ptr.
17937 + */
17938 +void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
17939 +{
17940 + atomic64_add_return_unchecked(delta, ptr);
17941 +}
17942 +EXPORT_SYMBOL(atomic64_add_unchecked);
17943 +
17944 +/**
17945 * atomic64_sub - subtract the atomic64 variable
17946 * @delta: integer value to subtract
17947 * @ptr: pointer to type atomic64_t
17948 @@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t
17949 EXPORT_SYMBOL(atomic64_sub);
17950
17951 /**
17952 + * atomic64_sub_unchecked - subtract the atomic64 variable
17953 + * @delta: integer value to subtract
17954 + * @ptr: pointer to type atomic64_unchecked_t
17955 + *
17956 + * Atomically subtracts @delta from @ptr.
17957 + */
17958 +void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
17959 +{
17960 + atomic64_add_unchecked(-delta, ptr);
17961 +}
17962 +EXPORT_SYMBOL(atomic64_sub_unchecked);
17963 +
17964 +/**
17965 * atomic64_sub_and_test - subtract value from variable and test result
17966 * @delta: integer value to subtract
17967 * @ptr: pointer to type atomic64_t
17968 @@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
17969 EXPORT_SYMBOL(atomic64_inc);
17970
17971 /**
17972 + * atomic64_inc_unchecked - increment atomic64 variable
17973 + * @ptr: pointer to type atomic64_unchecked_t
17974 + *
17975 + * Atomically increments @ptr by 1.
17976 + */
17977 +void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
17978 +{
17979 + atomic64_add_unchecked(1, ptr);
17980 +}
17981 +EXPORT_SYMBOL(atomic64_inc_unchecked);
17982 +
17983 +/**
17984 * atomic64_dec - decrement atomic64 variable
17985 * @ptr: pointer to type atomic64_t
17986 *
17987 @@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
17988 EXPORT_SYMBOL(atomic64_dec);
17989
17990 /**
17991 + * atomic64_dec_unchecked - decrement atomic64 variable
17992 + * @ptr: pointer to type atomic64_unchecked_t
17993 + *
17994 + * Atomically decrements @ptr by 1.
17995 + */
17996 +void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
17997 +{
17998 + atomic64_sub_unchecked(1, ptr);
17999 +}
18000 +EXPORT_SYMBOL(atomic64_dec_unchecked);
18001 +
18002 +/**
18003 * atomic64_dec_and_test - decrement and test
18004 * @ptr: pointer to type atomic64_t
18005 *
18006 diff -urNp linux-2.6.32.41/arch/x86/lib/checksum_32.S linux-2.6.32.41/arch/x86/lib/checksum_32.S
18007 --- linux-2.6.32.41/arch/x86/lib/checksum_32.S 2011-03-27 14:31:47.000000000 -0400
18008 +++ linux-2.6.32.41/arch/x86/lib/checksum_32.S 2011-04-17 15:56:46.000000000 -0400
18009 @@ -28,7 +28,8 @@
18010 #include <linux/linkage.h>
18011 #include <asm/dwarf2.h>
18012 #include <asm/errno.h>
18013 -
18014 +#include <asm/segment.h>
18015 +
18016 /*
18017 * computes a partial checksum, e.g. for TCP/UDP fragments
18018 */
18019 @@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (
18020
18021 #define ARGBASE 16
18022 #define FP 12
18023 -
18024 -ENTRY(csum_partial_copy_generic)
18025 +
18026 +ENTRY(csum_partial_copy_generic_to_user)
18027 CFI_STARTPROC
18028 +
18029 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18030 + pushl %gs
18031 + CFI_ADJUST_CFA_OFFSET 4
18032 + popl %es
18033 + CFI_ADJUST_CFA_OFFSET -4
18034 + jmp csum_partial_copy_generic
18035 +#endif
18036 +
18037 +ENTRY(csum_partial_copy_generic_from_user)
18038 +
18039 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18040 + pushl %gs
18041 + CFI_ADJUST_CFA_OFFSET 4
18042 + popl %ds
18043 + CFI_ADJUST_CFA_OFFSET -4
18044 +#endif
18045 +
18046 +ENTRY(csum_partial_copy_generic)
18047 subl $4,%esp
18048 CFI_ADJUST_CFA_OFFSET 4
18049 pushl %edi
18050 @@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
18051 jmp 4f
18052 SRC(1: movw (%esi), %bx )
18053 addl $2, %esi
18054 -DST( movw %bx, (%edi) )
18055 +DST( movw %bx, %es:(%edi) )
18056 addl $2, %edi
18057 addw %bx, %ax
18058 adcl $0, %eax
18059 @@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
18060 SRC(1: movl (%esi), %ebx )
18061 SRC( movl 4(%esi), %edx )
18062 adcl %ebx, %eax
18063 -DST( movl %ebx, (%edi) )
18064 +DST( movl %ebx, %es:(%edi) )
18065 adcl %edx, %eax
18066 -DST( movl %edx, 4(%edi) )
18067 +DST( movl %edx, %es:4(%edi) )
18068
18069 SRC( movl 8(%esi), %ebx )
18070 SRC( movl 12(%esi), %edx )
18071 adcl %ebx, %eax
18072 -DST( movl %ebx, 8(%edi) )
18073 +DST( movl %ebx, %es:8(%edi) )
18074 adcl %edx, %eax
18075 -DST( movl %edx, 12(%edi) )
18076 +DST( movl %edx, %es:12(%edi) )
18077
18078 SRC( movl 16(%esi), %ebx )
18079 SRC( movl 20(%esi), %edx )
18080 adcl %ebx, %eax
18081 -DST( movl %ebx, 16(%edi) )
18082 +DST( movl %ebx, %es:16(%edi) )
18083 adcl %edx, %eax
18084 -DST( movl %edx, 20(%edi) )
18085 +DST( movl %edx, %es:20(%edi) )
18086
18087 SRC( movl 24(%esi), %ebx )
18088 SRC( movl 28(%esi), %edx )
18089 adcl %ebx, %eax
18090 -DST( movl %ebx, 24(%edi) )
18091 +DST( movl %ebx, %es:24(%edi) )
18092 adcl %edx, %eax
18093 -DST( movl %edx, 28(%edi) )
18094 +DST( movl %edx, %es:28(%edi) )
18095
18096 lea 32(%esi), %esi
18097 lea 32(%edi), %edi
18098 @@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
18099 shrl $2, %edx # This clears CF
18100 SRC(3: movl (%esi), %ebx )
18101 adcl %ebx, %eax
18102 -DST( movl %ebx, (%edi) )
18103 +DST( movl %ebx, %es:(%edi) )
18104 lea 4(%esi), %esi
18105 lea 4(%edi), %edi
18106 dec %edx
18107 @@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
18108 jb 5f
18109 SRC( movw (%esi), %cx )
18110 leal 2(%esi), %esi
18111 -DST( movw %cx, (%edi) )
18112 +DST( movw %cx, %es:(%edi) )
18113 leal 2(%edi), %edi
18114 je 6f
18115 shll $16,%ecx
18116 SRC(5: movb (%esi), %cl )
18117 -DST( movb %cl, (%edi) )
18118 +DST( movb %cl, %es:(%edi) )
18119 6: addl %ecx, %eax
18120 adcl $0, %eax
18121 7:
18122 @@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
18123
18124 6001:
18125 movl ARGBASE+20(%esp), %ebx # src_err_ptr
18126 - movl $-EFAULT, (%ebx)
18127 + movl $-EFAULT, %ss:(%ebx)
18128
18129 # zero the complete destination - computing the rest
18130 # is too much work
18131 @@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
18132
18133 6002:
18134 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
18135 - movl $-EFAULT,(%ebx)
18136 + movl $-EFAULT,%ss:(%ebx)
18137 jmp 5000b
18138
18139 .previous
18140
18141 + pushl %ss
18142 + CFI_ADJUST_CFA_OFFSET 4
18143 + popl %ds
18144 + CFI_ADJUST_CFA_OFFSET -4
18145 + pushl %ss
18146 + CFI_ADJUST_CFA_OFFSET 4
18147 + popl %es
18148 + CFI_ADJUST_CFA_OFFSET -4
18149 popl %ebx
18150 CFI_ADJUST_CFA_OFFSET -4
18151 CFI_RESTORE ebx
18152 @@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
18153 CFI_ADJUST_CFA_OFFSET -4
18154 ret
18155 CFI_ENDPROC
18156 -ENDPROC(csum_partial_copy_generic)
18157 +ENDPROC(csum_partial_copy_generic_to_user)
18158
18159 #else
18160
18161 /* Version for PentiumII/PPro */
18162
18163 #define ROUND1(x) \
18164 + nop; nop; nop; \
18165 SRC(movl x(%esi), %ebx ) ; \
18166 addl %ebx, %eax ; \
18167 - DST(movl %ebx, x(%edi) ) ;
18168 + DST(movl %ebx, %es:x(%edi)) ;
18169
18170 #define ROUND(x) \
18171 + nop; nop; nop; \
18172 SRC(movl x(%esi), %ebx ) ; \
18173 adcl %ebx, %eax ; \
18174 - DST(movl %ebx, x(%edi) ) ;
18175 + DST(movl %ebx, %es:x(%edi)) ;
18176
18177 #define ARGBASE 12
18178 -
18179 -ENTRY(csum_partial_copy_generic)
18180 +
18181 +ENTRY(csum_partial_copy_generic_to_user)
18182 CFI_STARTPROC
18183 +
18184 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18185 + pushl %gs
18186 + CFI_ADJUST_CFA_OFFSET 4
18187 + popl %es
18188 + CFI_ADJUST_CFA_OFFSET -4
18189 + jmp csum_partial_copy_generic
18190 +#endif
18191 +
18192 +ENTRY(csum_partial_copy_generic_from_user)
18193 +
18194 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18195 + pushl %gs
18196 + CFI_ADJUST_CFA_OFFSET 4
18197 + popl %ds
18198 + CFI_ADJUST_CFA_OFFSET -4
18199 +#endif
18200 +
18201 +ENTRY(csum_partial_copy_generic)
18202 pushl %ebx
18203 CFI_ADJUST_CFA_OFFSET 4
18204 CFI_REL_OFFSET ebx, 0
18205 @@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
18206 subl %ebx, %edi
18207 lea -1(%esi),%edx
18208 andl $-32,%edx
18209 - lea 3f(%ebx,%ebx), %ebx
18210 + lea 3f(%ebx,%ebx,2), %ebx
18211 testl %esi, %esi
18212 jmp *%ebx
18213 1: addl $64,%esi
18214 @@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
18215 jb 5f
18216 SRC( movw (%esi), %dx )
18217 leal 2(%esi), %esi
18218 -DST( movw %dx, (%edi) )
18219 +DST( movw %dx, %es:(%edi) )
18220 leal 2(%edi), %edi
18221 je 6f
18222 shll $16,%edx
18223 5:
18224 SRC( movb (%esi), %dl )
18225 -DST( movb %dl, (%edi) )
18226 +DST( movb %dl, %es:(%edi) )
18227 6: addl %edx, %eax
18228 adcl $0, %eax
18229 7:
18230 .section .fixup, "ax"
18231 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
18232 - movl $-EFAULT, (%ebx)
18233 + movl $-EFAULT, %ss:(%ebx)
18234 # zero the complete destination (computing the rest is too much work)
18235 movl ARGBASE+8(%esp),%edi # dst
18236 movl ARGBASE+12(%esp),%ecx # len
18237 @@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
18238 rep; stosb
18239 jmp 7b
18240 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
18241 - movl $-EFAULT, (%ebx)
18242 + movl $-EFAULT, %ss:(%ebx)
18243 jmp 7b
18244 .previous
18245
18246 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18247 + pushl %ss
18248 + CFI_ADJUST_CFA_OFFSET 4
18249 + popl %ds
18250 + CFI_ADJUST_CFA_OFFSET -4
18251 + pushl %ss
18252 + CFI_ADJUST_CFA_OFFSET 4
18253 + popl %es
18254 + CFI_ADJUST_CFA_OFFSET -4
18255 +#endif
18256 +
18257 popl %esi
18258 CFI_ADJUST_CFA_OFFSET -4
18259 CFI_RESTORE esi
18260 @@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
18261 CFI_RESTORE ebx
18262 ret
18263 CFI_ENDPROC
18264 -ENDPROC(csum_partial_copy_generic)
18265 +ENDPROC(csum_partial_copy_generic_to_user)
18266
18267 #undef ROUND
18268 #undef ROUND1
18269 diff -urNp linux-2.6.32.41/arch/x86/lib/clear_page_64.S linux-2.6.32.41/arch/x86/lib/clear_page_64.S
18270 --- linux-2.6.32.41/arch/x86/lib/clear_page_64.S 2011-03-27 14:31:47.000000000 -0400
18271 +++ linux-2.6.32.41/arch/x86/lib/clear_page_64.S 2011-04-17 15:56:46.000000000 -0400
18272 @@ -43,7 +43,7 @@ ENDPROC(clear_page)
18273
18274 #include <asm/cpufeature.h>
18275
18276 - .section .altinstr_replacement,"ax"
18277 + .section .altinstr_replacement,"a"
18278 1: .byte 0xeb /* jmp <disp8> */
18279 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
18280 2:
18281 diff -urNp linux-2.6.32.41/arch/x86/lib/copy_page_64.S linux-2.6.32.41/arch/x86/lib/copy_page_64.S
18282 --- linux-2.6.32.41/arch/x86/lib/copy_page_64.S 2011-03-27 14:31:47.000000000 -0400
18283 +++ linux-2.6.32.41/arch/x86/lib/copy_page_64.S 2011-04-17 15:56:46.000000000 -0400
18284 @@ -104,7 +104,7 @@ ENDPROC(copy_page)
18285
18286 #include <asm/cpufeature.h>
18287
18288 - .section .altinstr_replacement,"ax"
18289 + .section .altinstr_replacement,"a"
18290 1: .byte 0xeb /* jmp <disp8> */
18291 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
18292 2:
18293 diff -urNp linux-2.6.32.41/arch/x86/lib/copy_user_64.S linux-2.6.32.41/arch/x86/lib/copy_user_64.S
18294 --- linux-2.6.32.41/arch/x86/lib/copy_user_64.S 2011-03-27 14:31:47.000000000 -0400
18295 +++ linux-2.6.32.41/arch/x86/lib/copy_user_64.S 2011-04-17 15:56:46.000000000 -0400
18296 @@ -15,13 +15,14 @@
18297 #include <asm/asm-offsets.h>
18298 #include <asm/thread_info.h>
18299 #include <asm/cpufeature.h>
18300 +#include <asm/pgtable.h>
18301
18302 .macro ALTERNATIVE_JUMP feature,orig,alt
18303 0:
18304 .byte 0xe9 /* 32bit jump */
18305 .long \orig-1f /* by default jump to orig */
18306 1:
18307 - .section .altinstr_replacement,"ax"
18308 + .section .altinstr_replacement,"a"
18309 2: .byte 0xe9 /* near jump with 32bit immediate */
18310 .long \alt-1b /* offset */ /* or alternatively to alt */
18311 .previous
18312 @@ -64,49 +65,19 @@
18313 #endif
18314 .endm
18315
18316 -/* Standard copy_to_user with segment limit checking */
18317 -ENTRY(copy_to_user)
18318 - CFI_STARTPROC
18319 - GET_THREAD_INFO(%rax)
18320 - movq %rdi,%rcx
18321 - addq %rdx,%rcx
18322 - jc bad_to_user
18323 - cmpq TI_addr_limit(%rax),%rcx
18324 - jae bad_to_user
18325 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
18326 - CFI_ENDPROC
18327 -ENDPROC(copy_to_user)
18328 -
18329 -/* Standard copy_from_user with segment limit checking */
18330 -ENTRY(copy_from_user)
18331 - CFI_STARTPROC
18332 - GET_THREAD_INFO(%rax)
18333 - movq %rsi,%rcx
18334 - addq %rdx,%rcx
18335 - jc bad_from_user
18336 - cmpq TI_addr_limit(%rax),%rcx
18337 - jae bad_from_user
18338 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
18339 - CFI_ENDPROC
18340 -ENDPROC(copy_from_user)
18341 -
18342 ENTRY(copy_user_generic)
18343 CFI_STARTPROC
18344 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
18345 CFI_ENDPROC
18346 ENDPROC(copy_user_generic)
18347
18348 -ENTRY(__copy_from_user_inatomic)
18349 - CFI_STARTPROC
18350 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
18351 - CFI_ENDPROC
18352 -ENDPROC(__copy_from_user_inatomic)
18353 -
18354 .section .fixup,"ax"
18355 /* must zero dest */
18356 ENTRY(bad_from_user)
18357 bad_from_user:
18358 CFI_STARTPROC
18359 + testl %edx,%edx
18360 + js bad_to_user
18361 movl %edx,%ecx
18362 xorl %eax,%eax
18363 rep
18364 diff -urNp linux-2.6.32.41/arch/x86/lib/copy_user_nocache_64.S linux-2.6.32.41/arch/x86/lib/copy_user_nocache_64.S
18365 --- linux-2.6.32.41/arch/x86/lib/copy_user_nocache_64.S 2011-03-27 14:31:47.000000000 -0400
18366 +++ linux-2.6.32.41/arch/x86/lib/copy_user_nocache_64.S 2011-04-17 15:56:46.000000000 -0400
18367 @@ -14,6 +14,7 @@
18368 #include <asm/current.h>
18369 #include <asm/asm-offsets.h>
18370 #include <asm/thread_info.h>
18371 +#include <asm/pgtable.h>
18372
18373 .macro ALIGN_DESTINATION
18374 #ifdef FIX_ALIGNMENT
18375 @@ -50,6 +51,15 @@
18376 */
18377 ENTRY(__copy_user_nocache)
18378 CFI_STARTPROC
18379 +
18380 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18381 + mov $PAX_USER_SHADOW_BASE,%rcx
18382 + cmp %rcx,%rsi
18383 + jae 1f
18384 + add %rcx,%rsi
18385 +1:
18386 +#endif
18387 +
18388 cmpl $8,%edx
18389 jb 20f /* less then 8 bytes, go to byte copy loop */
18390 ALIGN_DESTINATION
18391 diff -urNp linux-2.6.32.41/arch/x86/lib/csum-wrappers_64.c linux-2.6.32.41/arch/x86/lib/csum-wrappers_64.c
18392 --- linux-2.6.32.41/arch/x86/lib/csum-wrappers_64.c 2011-03-27 14:31:47.000000000 -0400
18393 +++ linux-2.6.32.41/arch/x86/lib/csum-wrappers_64.c 2011-05-04 17:56:20.000000000 -0400
18394 @@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
18395 len -= 2;
18396 }
18397 }
18398 +
18399 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18400 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
18401 + src += PAX_USER_SHADOW_BASE;
18402 +#endif
18403 +
18404 isum = csum_partial_copy_generic((__force const void *)src,
18405 dst, len, isum, errp, NULL);
18406 if (unlikely(*errp))
18407 @@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
18408 }
18409
18410 *errp = 0;
18411 +
18412 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18413 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
18414 + dst += PAX_USER_SHADOW_BASE;
18415 +#endif
18416 +
18417 return csum_partial_copy_generic(src, (void __force *)dst,
18418 len, isum, NULL, errp);
18419 }
18420 diff -urNp linux-2.6.32.41/arch/x86/lib/getuser.S linux-2.6.32.41/arch/x86/lib/getuser.S
18421 --- linux-2.6.32.41/arch/x86/lib/getuser.S 2011-03-27 14:31:47.000000000 -0400
18422 +++ linux-2.6.32.41/arch/x86/lib/getuser.S 2011-04-17 15:56:46.000000000 -0400
18423 @@ -33,14 +33,35 @@
18424 #include <asm/asm-offsets.h>
18425 #include <asm/thread_info.h>
18426 #include <asm/asm.h>
18427 +#include <asm/segment.h>
18428 +#include <asm/pgtable.h>
18429 +
18430 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18431 +#define __copyuser_seg gs;
18432 +#else
18433 +#define __copyuser_seg
18434 +#endif
18435
18436 .text
18437 ENTRY(__get_user_1)
18438 CFI_STARTPROC
18439 +
18440 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18441 GET_THREAD_INFO(%_ASM_DX)
18442 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18443 jae bad_get_user
18444 -1: movzb (%_ASM_AX),%edx
18445 +
18446 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18447 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18448 + cmp %_ASM_DX,%_ASM_AX
18449 + jae 1234f
18450 + add %_ASM_DX,%_ASM_AX
18451 +1234:
18452 +#endif
18453 +
18454 +#endif
18455 +
18456 +1: __copyuser_seg movzb (%_ASM_AX),%edx
18457 xor %eax,%eax
18458 ret
18459 CFI_ENDPROC
18460 @@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
18461 ENTRY(__get_user_2)
18462 CFI_STARTPROC
18463 add $1,%_ASM_AX
18464 +
18465 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18466 jc bad_get_user
18467 GET_THREAD_INFO(%_ASM_DX)
18468 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18469 jae bad_get_user
18470 -2: movzwl -1(%_ASM_AX),%edx
18471 +
18472 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18473 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18474 + cmp %_ASM_DX,%_ASM_AX
18475 + jae 1234f
18476 + add %_ASM_DX,%_ASM_AX
18477 +1234:
18478 +#endif
18479 +
18480 +#endif
18481 +
18482 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
18483 xor %eax,%eax
18484 ret
18485 CFI_ENDPROC
18486 @@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
18487 ENTRY(__get_user_4)
18488 CFI_STARTPROC
18489 add $3,%_ASM_AX
18490 +
18491 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18492 jc bad_get_user
18493 GET_THREAD_INFO(%_ASM_DX)
18494 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18495 jae bad_get_user
18496 -3: mov -3(%_ASM_AX),%edx
18497 +
18498 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18499 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18500 + cmp %_ASM_DX,%_ASM_AX
18501 + jae 1234f
18502 + add %_ASM_DX,%_ASM_AX
18503 +1234:
18504 +#endif
18505 +
18506 +#endif
18507 +
18508 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
18509 xor %eax,%eax
18510 ret
18511 CFI_ENDPROC
18512 @@ -80,6 +127,15 @@ ENTRY(__get_user_8)
18513 GET_THREAD_INFO(%_ASM_DX)
18514 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18515 jae bad_get_user
18516 +
18517 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18518 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18519 + cmp %_ASM_DX,%_ASM_AX
18520 + jae 1234f
18521 + add %_ASM_DX,%_ASM_AX
18522 +1234:
18523 +#endif
18524 +
18525 4: movq -7(%_ASM_AX),%_ASM_DX
18526 xor %eax,%eax
18527 ret
18528 diff -urNp linux-2.6.32.41/arch/x86/lib/memcpy_64.S linux-2.6.32.41/arch/x86/lib/memcpy_64.S
18529 --- linux-2.6.32.41/arch/x86/lib/memcpy_64.S 2011-03-27 14:31:47.000000000 -0400
18530 +++ linux-2.6.32.41/arch/x86/lib/memcpy_64.S 2011-04-17 15:56:46.000000000 -0400
18531 @@ -128,7 +128,7 @@ ENDPROC(__memcpy)
18532 * It is also a lot simpler. Use this when possible:
18533 */
18534
18535 - .section .altinstr_replacement, "ax"
18536 + .section .altinstr_replacement, "a"
18537 1: .byte 0xeb /* jmp <disp8> */
18538 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
18539 2:
18540 diff -urNp linux-2.6.32.41/arch/x86/lib/memset_64.S linux-2.6.32.41/arch/x86/lib/memset_64.S
18541 --- linux-2.6.32.41/arch/x86/lib/memset_64.S 2011-03-27 14:31:47.000000000 -0400
18542 +++ linux-2.6.32.41/arch/x86/lib/memset_64.S 2011-04-17 15:56:46.000000000 -0400
18543 @@ -118,7 +118,7 @@ ENDPROC(__memset)
18544
18545 #include <asm/cpufeature.h>
18546
18547 - .section .altinstr_replacement,"ax"
18548 + .section .altinstr_replacement,"a"
18549 1: .byte 0xeb /* jmp <disp8> */
18550 .byte (memset_c - memset) - (2f - 1b) /* offset */
18551 2:
18552 diff -urNp linux-2.6.32.41/arch/x86/lib/mmx_32.c linux-2.6.32.41/arch/x86/lib/mmx_32.c
18553 --- linux-2.6.32.41/arch/x86/lib/mmx_32.c 2011-03-27 14:31:47.000000000 -0400
18554 +++ linux-2.6.32.41/arch/x86/lib/mmx_32.c 2011-04-17 15:56:46.000000000 -0400
18555 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
18556 {
18557 void *p;
18558 int i;
18559 + unsigned long cr0;
18560
18561 if (unlikely(in_interrupt()))
18562 return __memcpy(to, from, len);
18563 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
18564 kernel_fpu_begin();
18565
18566 __asm__ __volatile__ (
18567 - "1: prefetch (%0)\n" /* This set is 28 bytes */
18568 - " prefetch 64(%0)\n"
18569 - " prefetch 128(%0)\n"
18570 - " prefetch 192(%0)\n"
18571 - " prefetch 256(%0)\n"
18572 + "1: prefetch (%1)\n" /* This set is 28 bytes */
18573 + " prefetch 64(%1)\n"
18574 + " prefetch 128(%1)\n"
18575 + " prefetch 192(%1)\n"
18576 + " prefetch 256(%1)\n"
18577 "2: \n"
18578 ".section .fixup, \"ax\"\n"
18579 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18580 + "3: \n"
18581 +
18582 +#ifdef CONFIG_PAX_KERNEXEC
18583 + " movl %%cr0, %0\n"
18584 + " movl %0, %%eax\n"
18585 + " andl $0xFFFEFFFF, %%eax\n"
18586 + " movl %%eax, %%cr0\n"
18587 +#endif
18588 +
18589 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18590 +
18591 +#ifdef CONFIG_PAX_KERNEXEC
18592 + " movl %0, %%cr0\n"
18593 +#endif
18594 +
18595 " jmp 2b\n"
18596 ".previous\n"
18597 _ASM_EXTABLE(1b, 3b)
18598 - : : "r" (from));
18599 + : "=&r" (cr0) : "r" (from) : "ax");
18600
18601 for ( ; i > 5; i--) {
18602 __asm__ __volatile__ (
18603 - "1: prefetch 320(%0)\n"
18604 - "2: movq (%0), %%mm0\n"
18605 - " movq 8(%0), %%mm1\n"
18606 - " movq 16(%0), %%mm2\n"
18607 - " movq 24(%0), %%mm3\n"
18608 - " movq %%mm0, (%1)\n"
18609 - " movq %%mm1, 8(%1)\n"
18610 - " movq %%mm2, 16(%1)\n"
18611 - " movq %%mm3, 24(%1)\n"
18612 - " movq 32(%0), %%mm0\n"
18613 - " movq 40(%0), %%mm1\n"
18614 - " movq 48(%0), %%mm2\n"
18615 - " movq 56(%0), %%mm3\n"
18616 - " movq %%mm0, 32(%1)\n"
18617 - " movq %%mm1, 40(%1)\n"
18618 - " movq %%mm2, 48(%1)\n"
18619 - " movq %%mm3, 56(%1)\n"
18620 + "1: prefetch 320(%1)\n"
18621 + "2: movq (%1), %%mm0\n"
18622 + " movq 8(%1), %%mm1\n"
18623 + " movq 16(%1), %%mm2\n"
18624 + " movq 24(%1), %%mm3\n"
18625 + " movq %%mm0, (%2)\n"
18626 + " movq %%mm1, 8(%2)\n"
18627 + " movq %%mm2, 16(%2)\n"
18628 + " movq %%mm3, 24(%2)\n"
18629 + " movq 32(%1), %%mm0\n"
18630 + " movq 40(%1), %%mm1\n"
18631 + " movq 48(%1), %%mm2\n"
18632 + " movq 56(%1), %%mm3\n"
18633 + " movq %%mm0, 32(%2)\n"
18634 + " movq %%mm1, 40(%2)\n"
18635 + " movq %%mm2, 48(%2)\n"
18636 + " movq %%mm3, 56(%2)\n"
18637 ".section .fixup, \"ax\"\n"
18638 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18639 + "3:\n"
18640 +
18641 +#ifdef CONFIG_PAX_KERNEXEC
18642 + " movl %%cr0, %0\n"
18643 + " movl %0, %%eax\n"
18644 + " andl $0xFFFEFFFF, %%eax\n"
18645 + " movl %%eax, %%cr0\n"
18646 +#endif
18647 +
18648 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18649 +
18650 +#ifdef CONFIG_PAX_KERNEXEC
18651 + " movl %0, %%cr0\n"
18652 +#endif
18653 +
18654 " jmp 2b\n"
18655 ".previous\n"
18656 _ASM_EXTABLE(1b, 3b)
18657 - : : "r" (from), "r" (to) : "memory");
18658 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18659
18660 from += 64;
18661 to += 64;
18662 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
18663 static void fast_copy_page(void *to, void *from)
18664 {
18665 int i;
18666 + unsigned long cr0;
18667
18668 kernel_fpu_begin();
18669
18670 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
18671 * but that is for later. -AV
18672 */
18673 __asm__ __volatile__(
18674 - "1: prefetch (%0)\n"
18675 - " prefetch 64(%0)\n"
18676 - " prefetch 128(%0)\n"
18677 - " prefetch 192(%0)\n"
18678 - " prefetch 256(%0)\n"
18679 + "1: prefetch (%1)\n"
18680 + " prefetch 64(%1)\n"
18681 + " prefetch 128(%1)\n"
18682 + " prefetch 192(%1)\n"
18683 + " prefetch 256(%1)\n"
18684 "2: \n"
18685 ".section .fixup, \"ax\"\n"
18686 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18687 + "3: \n"
18688 +
18689 +#ifdef CONFIG_PAX_KERNEXEC
18690 + " movl %%cr0, %0\n"
18691 + " movl %0, %%eax\n"
18692 + " andl $0xFFFEFFFF, %%eax\n"
18693 + " movl %%eax, %%cr0\n"
18694 +#endif
18695 +
18696 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18697 +
18698 +#ifdef CONFIG_PAX_KERNEXEC
18699 + " movl %0, %%cr0\n"
18700 +#endif
18701 +
18702 " jmp 2b\n"
18703 ".previous\n"
18704 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
18705 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
18706
18707 for (i = 0; i < (4096-320)/64; i++) {
18708 __asm__ __volatile__ (
18709 - "1: prefetch 320(%0)\n"
18710 - "2: movq (%0), %%mm0\n"
18711 - " movntq %%mm0, (%1)\n"
18712 - " movq 8(%0), %%mm1\n"
18713 - " movntq %%mm1, 8(%1)\n"
18714 - " movq 16(%0), %%mm2\n"
18715 - " movntq %%mm2, 16(%1)\n"
18716 - " movq 24(%0), %%mm3\n"
18717 - " movntq %%mm3, 24(%1)\n"
18718 - " movq 32(%0), %%mm4\n"
18719 - " movntq %%mm4, 32(%1)\n"
18720 - " movq 40(%0), %%mm5\n"
18721 - " movntq %%mm5, 40(%1)\n"
18722 - " movq 48(%0), %%mm6\n"
18723 - " movntq %%mm6, 48(%1)\n"
18724 - " movq 56(%0), %%mm7\n"
18725 - " movntq %%mm7, 56(%1)\n"
18726 + "1: prefetch 320(%1)\n"
18727 + "2: movq (%1), %%mm0\n"
18728 + " movntq %%mm0, (%2)\n"
18729 + " movq 8(%1), %%mm1\n"
18730 + " movntq %%mm1, 8(%2)\n"
18731 + " movq 16(%1), %%mm2\n"
18732 + " movntq %%mm2, 16(%2)\n"
18733 + " movq 24(%1), %%mm3\n"
18734 + " movntq %%mm3, 24(%2)\n"
18735 + " movq 32(%1), %%mm4\n"
18736 + " movntq %%mm4, 32(%2)\n"
18737 + " movq 40(%1), %%mm5\n"
18738 + " movntq %%mm5, 40(%2)\n"
18739 + " movq 48(%1), %%mm6\n"
18740 + " movntq %%mm6, 48(%2)\n"
18741 + " movq 56(%1), %%mm7\n"
18742 + " movntq %%mm7, 56(%2)\n"
18743 ".section .fixup, \"ax\"\n"
18744 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18745 + "3:\n"
18746 +
18747 +#ifdef CONFIG_PAX_KERNEXEC
18748 + " movl %%cr0, %0\n"
18749 + " movl %0, %%eax\n"
18750 + " andl $0xFFFEFFFF, %%eax\n"
18751 + " movl %%eax, %%cr0\n"
18752 +#endif
18753 +
18754 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18755 +
18756 +#ifdef CONFIG_PAX_KERNEXEC
18757 + " movl %0, %%cr0\n"
18758 +#endif
18759 +
18760 " jmp 2b\n"
18761 ".previous\n"
18762 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
18763 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18764
18765 from += 64;
18766 to += 64;
18767 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
18768 static void fast_copy_page(void *to, void *from)
18769 {
18770 int i;
18771 + unsigned long cr0;
18772
18773 kernel_fpu_begin();
18774
18775 __asm__ __volatile__ (
18776 - "1: prefetch (%0)\n"
18777 - " prefetch 64(%0)\n"
18778 - " prefetch 128(%0)\n"
18779 - " prefetch 192(%0)\n"
18780 - " prefetch 256(%0)\n"
18781 + "1: prefetch (%1)\n"
18782 + " prefetch 64(%1)\n"
18783 + " prefetch 128(%1)\n"
18784 + " prefetch 192(%1)\n"
18785 + " prefetch 256(%1)\n"
18786 "2: \n"
18787 ".section .fixup, \"ax\"\n"
18788 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18789 + "3: \n"
18790 +
18791 +#ifdef CONFIG_PAX_KERNEXEC
18792 + " movl %%cr0, %0\n"
18793 + " movl %0, %%eax\n"
18794 + " andl $0xFFFEFFFF, %%eax\n"
18795 + " movl %%eax, %%cr0\n"
18796 +#endif
18797 +
18798 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18799 +
18800 +#ifdef CONFIG_PAX_KERNEXEC
18801 + " movl %0, %%cr0\n"
18802 +#endif
18803 +
18804 " jmp 2b\n"
18805 ".previous\n"
18806 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
18807 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
18808
18809 for (i = 0; i < 4096/64; i++) {
18810 __asm__ __volatile__ (
18811 - "1: prefetch 320(%0)\n"
18812 - "2: movq (%0), %%mm0\n"
18813 - " movq 8(%0), %%mm1\n"
18814 - " movq 16(%0), %%mm2\n"
18815 - " movq 24(%0), %%mm3\n"
18816 - " movq %%mm0, (%1)\n"
18817 - " movq %%mm1, 8(%1)\n"
18818 - " movq %%mm2, 16(%1)\n"
18819 - " movq %%mm3, 24(%1)\n"
18820 - " movq 32(%0), %%mm0\n"
18821 - " movq 40(%0), %%mm1\n"
18822 - " movq 48(%0), %%mm2\n"
18823 - " movq 56(%0), %%mm3\n"
18824 - " movq %%mm0, 32(%1)\n"
18825 - " movq %%mm1, 40(%1)\n"
18826 - " movq %%mm2, 48(%1)\n"
18827 - " movq %%mm3, 56(%1)\n"
18828 + "1: prefetch 320(%1)\n"
18829 + "2: movq (%1), %%mm0\n"
18830 + " movq 8(%1), %%mm1\n"
18831 + " movq 16(%1), %%mm2\n"
18832 + " movq 24(%1), %%mm3\n"
18833 + " movq %%mm0, (%2)\n"
18834 + " movq %%mm1, 8(%2)\n"
18835 + " movq %%mm2, 16(%2)\n"
18836 + " movq %%mm3, 24(%2)\n"
18837 + " movq 32(%1), %%mm0\n"
18838 + " movq 40(%1), %%mm1\n"
18839 + " movq 48(%1), %%mm2\n"
18840 + " movq 56(%1), %%mm3\n"
18841 + " movq %%mm0, 32(%2)\n"
18842 + " movq %%mm1, 40(%2)\n"
18843 + " movq %%mm2, 48(%2)\n"
18844 + " movq %%mm3, 56(%2)\n"
18845 ".section .fixup, \"ax\"\n"
18846 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18847 + "3:\n"
18848 +
18849 +#ifdef CONFIG_PAX_KERNEXEC
18850 + " movl %%cr0, %0\n"
18851 + " movl %0, %%eax\n"
18852 + " andl $0xFFFEFFFF, %%eax\n"
18853 + " movl %%eax, %%cr0\n"
18854 +#endif
18855 +
18856 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18857 +
18858 +#ifdef CONFIG_PAX_KERNEXEC
18859 + " movl %0, %%cr0\n"
18860 +#endif
18861 +
18862 " jmp 2b\n"
18863 ".previous\n"
18864 _ASM_EXTABLE(1b, 3b)
18865 - : : "r" (from), "r" (to) : "memory");
18866 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18867
18868 from += 64;
18869 to += 64;
18870 diff -urNp linux-2.6.32.41/arch/x86/lib/putuser.S linux-2.6.32.41/arch/x86/lib/putuser.S
18871 --- linux-2.6.32.41/arch/x86/lib/putuser.S 2011-03-27 14:31:47.000000000 -0400
18872 +++ linux-2.6.32.41/arch/x86/lib/putuser.S 2011-04-17 15:56:46.000000000 -0400
18873 @@ -15,7 +15,8 @@
18874 #include <asm/thread_info.h>
18875 #include <asm/errno.h>
18876 #include <asm/asm.h>
18877 -
18878 +#include <asm/segment.h>
18879 +#include <asm/pgtable.h>
18880
18881 /*
18882 * __put_user_X
18883 @@ -29,52 +30,119 @@
18884 * as they get called from within inline assembly.
18885 */
18886
18887 -#define ENTER CFI_STARTPROC ; \
18888 - GET_THREAD_INFO(%_ASM_BX)
18889 +#define ENTER CFI_STARTPROC
18890 #define EXIT ret ; \
18891 CFI_ENDPROC
18892
18893 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18894 +#define _DEST %_ASM_CX,%_ASM_BX
18895 +#else
18896 +#define _DEST %_ASM_CX
18897 +#endif
18898 +
18899 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18900 +#define __copyuser_seg gs;
18901 +#else
18902 +#define __copyuser_seg
18903 +#endif
18904 +
18905 .text
18906 ENTRY(__put_user_1)
18907 ENTER
18908 +
18909 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18910 + GET_THREAD_INFO(%_ASM_BX)
18911 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
18912 jae bad_put_user
18913 -1: movb %al,(%_ASM_CX)
18914 +
18915 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18916 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18917 + cmp %_ASM_BX,%_ASM_CX
18918 + jb 1234f
18919 + xor %ebx,%ebx
18920 +1234:
18921 +#endif
18922 +
18923 +#endif
18924 +
18925 +1: __copyuser_seg movb %al,(_DEST)
18926 xor %eax,%eax
18927 EXIT
18928 ENDPROC(__put_user_1)
18929
18930 ENTRY(__put_user_2)
18931 ENTER
18932 +
18933 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18934 + GET_THREAD_INFO(%_ASM_BX)
18935 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18936 sub $1,%_ASM_BX
18937 cmp %_ASM_BX,%_ASM_CX
18938 jae bad_put_user
18939 -2: movw %ax,(%_ASM_CX)
18940 +
18941 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18942 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18943 + cmp %_ASM_BX,%_ASM_CX
18944 + jb 1234f
18945 + xor %ebx,%ebx
18946 +1234:
18947 +#endif
18948 +
18949 +#endif
18950 +
18951 +2: __copyuser_seg movw %ax,(_DEST)
18952 xor %eax,%eax
18953 EXIT
18954 ENDPROC(__put_user_2)
18955
18956 ENTRY(__put_user_4)
18957 ENTER
18958 +
18959 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18960 + GET_THREAD_INFO(%_ASM_BX)
18961 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18962 sub $3,%_ASM_BX
18963 cmp %_ASM_BX,%_ASM_CX
18964 jae bad_put_user
18965 -3: movl %eax,(%_ASM_CX)
18966 +
18967 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18968 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18969 + cmp %_ASM_BX,%_ASM_CX
18970 + jb 1234f
18971 + xor %ebx,%ebx
18972 +1234:
18973 +#endif
18974 +
18975 +#endif
18976 +
18977 +3: __copyuser_seg movl %eax,(_DEST)
18978 xor %eax,%eax
18979 EXIT
18980 ENDPROC(__put_user_4)
18981
18982 ENTRY(__put_user_8)
18983 ENTER
18984 +
18985 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18986 + GET_THREAD_INFO(%_ASM_BX)
18987 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18988 sub $7,%_ASM_BX
18989 cmp %_ASM_BX,%_ASM_CX
18990 jae bad_put_user
18991 -4: mov %_ASM_AX,(%_ASM_CX)
18992 +
18993 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18994 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18995 + cmp %_ASM_BX,%_ASM_CX
18996 + jb 1234f
18997 + xor %ebx,%ebx
18998 +1234:
18999 +#endif
19000 +
19001 +#endif
19002 +
19003 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
19004 #ifdef CONFIG_X86_32
19005 -5: movl %edx,4(%_ASM_CX)
19006 +5: __copyuser_seg movl %edx,4(_DEST)
19007 #endif
19008 xor %eax,%eax
19009 EXIT
19010 diff -urNp linux-2.6.32.41/arch/x86/lib/usercopy_32.c linux-2.6.32.41/arch/x86/lib/usercopy_32.c
19011 --- linux-2.6.32.41/arch/x86/lib/usercopy_32.c 2011-03-27 14:31:47.000000000 -0400
19012 +++ linux-2.6.32.41/arch/x86/lib/usercopy_32.c 2011-04-23 21:12:28.000000000 -0400
19013 @@ -43,7 +43,7 @@ do { \
19014 __asm__ __volatile__( \
19015 " testl %1,%1\n" \
19016 " jz 2f\n" \
19017 - "0: lodsb\n" \
19018 + "0: "__copyuser_seg"lodsb\n" \
19019 " stosb\n" \
19020 " testb %%al,%%al\n" \
19021 " jz 1f\n" \
19022 @@ -128,10 +128,12 @@ do { \
19023 int __d0; \
19024 might_fault(); \
19025 __asm__ __volatile__( \
19026 + __COPYUSER_SET_ES \
19027 "0: rep; stosl\n" \
19028 " movl %2,%0\n" \
19029 "1: rep; stosb\n" \
19030 "2:\n" \
19031 + __COPYUSER_RESTORE_ES \
19032 ".section .fixup,\"ax\"\n" \
19033 "3: lea 0(%2,%0,4),%0\n" \
19034 " jmp 2b\n" \
19035 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
19036 might_fault();
19037
19038 __asm__ __volatile__(
19039 + __COPYUSER_SET_ES
19040 " testl %0, %0\n"
19041 " jz 3f\n"
19042 " andl %0,%%ecx\n"
19043 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
19044 " subl %%ecx,%0\n"
19045 " addl %0,%%eax\n"
19046 "1:\n"
19047 + __COPYUSER_RESTORE_ES
19048 ".section .fixup,\"ax\"\n"
19049 "2: xorl %%eax,%%eax\n"
19050 " jmp 1b\n"
19051 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
19052
19053 #ifdef CONFIG_X86_INTEL_USERCOPY
19054 static unsigned long
19055 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
19056 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
19057 {
19058 int d0, d1;
19059 __asm__ __volatile__(
19060 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
19061 " .align 2,0x90\n"
19062 "3: movl 0(%4), %%eax\n"
19063 "4: movl 4(%4), %%edx\n"
19064 - "5: movl %%eax, 0(%3)\n"
19065 - "6: movl %%edx, 4(%3)\n"
19066 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
19067 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
19068 "7: movl 8(%4), %%eax\n"
19069 "8: movl 12(%4),%%edx\n"
19070 - "9: movl %%eax, 8(%3)\n"
19071 - "10: movl %%edx, 12(%3)\n"
19072 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
19073 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
19074 "11: movl 16(%4), %%eax\n"
19075 "12: movl 20(%4), %%edx\n"
19076 - "13: movl %%eax, 16(%3)\n"
19077 - "14: movl %%edx, 20(%3)\n"
19078 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
19079 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
19080 "15: movl 24(%4), %%eax\n"
19081 "16: movl 28(%4), %%edx\n"
19082 - "17: movl %%eax, 24(%3)\n"
19083 - "18: movl %%edx, 28(%3)\n"
19084 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
19085 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
19086 "19: movl 32(%4), %%eax\n"
19087 "20: movl 36(%4), %%edx\n"
19088 - "21: movl %%eax, 32(%3)\n"
19089 - "22: movl %%edx, 36(%3)\n"
19090 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
19091 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
19092 "23: movl 40(%4), %%eax\n"
19093 "24: movl 44(%4), %%edx\n"
19094 - "25: movl %%eax, 40(%3)\n"
19095 - "26: movl %%edx, 44(%3)\n"
19096 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
19097 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
19098 "27: movl 48(%4), %%eax\n"
19099 "28: movl 52(%4), %%edx\n"
19100 - "29: movl %%eax, 48(%3)\n"
19101 - "30: movl %%edx, 52(%3)\n"
19102 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
19103 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
19104 "31: movl 56(%4), %%eax\n"
19105 "32: movl 60(%4), %%edx\n"
19106 - "33: movl %%eax, 56(%3)\n"
19107 - "34: movl %%edx, 60(%3)\n"
19108 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
19109 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
19110 " addl $-64, %0\n"
19111 " addl $64, %4\n"
19112 " addl $64, %3\n"
19113 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
19114 " shrl $2, %0\n"
19115 " andl $3, %%eax\n"
19116 " cld\n"
19117 + __COPYUSER_SET_ES
19118 "99: rep; movsl\n"
19119 "36: movl %%eax, %0\n"
19120 "37: rep; movsb\n"
19121 "100:\n"
19122 + __COPYUSER_RESTORE_ES
19123 + ".section .fixup,\"ax\"\n"
19124 + "101: lea 0(%%eax,%0,4),%0\n"
19125 + " jmp 100b\n"
19126 + ".previous\n"
19127 + ".section __ex_table,\"a\"\n"
19128 + " .align 4\n"
19129 + " .long 1b,100b\n"
19130 + " .long 2b,100b\n"
19131 + " .long 3b,100b\n"
19132 + " .long 4b,100b\n"
19133 + " .long 5b,100b\n"
19134 + " .long 6b,100b\n"
19135 + " .long 7b,100b\n"
19136 + " .long 8b,100b\n"
19137 + " .long 9b,100b\n"
19138 + " .long 10b,100b\n"
19139 + " .long 11b,100b\n"
19140 + " .long 12b,100b\n"
19141 + " .long 13b,100b\n"
19142 + " .long 14b,100b\n"
19143 + " .long 15b,100b\n"
19144 + " .long 16b,100b\n"
19145 + " .long 17b,100b\n"
19146 + " .long 18b,100b\n"
19147 + " .long 19b,100b\n"
19148 + " .long 20b,100b\n"
19149 + " .long 21b,100b\n"
19150 + " .long 22b,100b\n"
19151 + " .long 23b,100b\n"
19152 + " .long 24b,100b\n"
19153 + " .long 25b,100b\n"
19154 + " .long 26b,100b\n"
19155 + " .long 27b,100b\n"
19156 + " .long 28b,100b\n"
19157 + " .long 29b,100b\n"
19158 + " .long 30b,100b\n"
19159 + " .long 31b,100b\n"
19160 + " .long 32b,100b\n"
19161 + " .long 33b,100b\n"
19162 + " .long 34b,100b\n"
19163 + " .long 35b,100b\n"
19164 + " .long 36b,100b\n"
19165 + " .long 37b,100b\n"
19166 + " .long 99b,101b\n"
19167 + ".previous"
19168 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
19169 + : "1"(to), "2"(from), "0"(size)
19170 + : "eax", "edx", "memory");
19171 + return size;
19172 +}
19173 +
19174 +static unsigned long
19175 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
19176 +{
19177 + int d0, d1;
19178 + __asm__ __volatile__(
19179 + " .align 2,0x90\n"
19180 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
19181 + " cmpl $67, %0\n"
19182 + " jbe 3f\n"
19183 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
19184 + " .align 2,0x90\n"
19185 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
19186 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
19187 + "5: movl %%eax, 0(%3)\n"
19188 + "6: movl %%edx, 4(%3)\n"
19189 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
19190 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
19191 + "9: movl %%eax, 8(%3)\n"
19192 + "10: movl %%edx, 12(%3)\n"
19193 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
19194 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
19195 + "13: movl %%eax, 16(%3)\n"
19196 + "14: movl %%edx, 20(%3)\n"
19197 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
19198 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
19199 + "17: movl %%eax, 24(%3)\n"
19200 + "18: movl %%edx, 28(%3)\n"
19201 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
19202 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
19203 + "21: movl %%eax, 32(%3)\n"
19204 + "22: movl %%edx, 36(%3)\n"
19205 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
19206 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
19207 + "25: movl %%eax, 40(%3)\n"
19208 + "26: movl %%edx, 44(%3)\n"
19209 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
19210 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
19211 + "29: movl %%eax, 48(%3)\n"
19212 + "30: movl %%edx, 52(%3)\n"
19213 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
19214 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
19215 + "33: movl %%eax, 56(%3)\n"
19216 + "34: movl %%edx, 60(%3)\n"
19217 + " addl $-64, %0\n"
19218 + " addl $64, %4\n"
19219 + " addl $64, %3\n"
19220 + " cmpl $63, %0\n"
19221 + " ja 1b\n"
19222 + "35: movl %0, %%eax\n"
19223 + " shrl $2, %0\n"
19224 + " andl $3, %%eax\n"
19225 + " cld\n"
19226 + "99: rep; "__copyuser_seg" movsl\n"
19227 + "36: movl %%eax, %0\n"
19228 + "37: rep; "__copyuser_seg" movsb\n"
19229 + "100:\n"
19230 ".section .fixup,\"ax\"\n"
19231 "101: lea 0(%%eax,%0,4),%0\n"
19232 " jmp 100b\n"
19233 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
19234 int d0, d1;
19235 __asm__ __volatile__(
19236 " .align 2,0x90\n"
19237 - "0: movl 32(%4), %%eax\n"
19238 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19239 " cmpl $67, %0\n"
19240 " jbe 2f\n"
19241 - "1: movl 64(%4), %%eax\n"
19242 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19243 " .align 2,0x90\n"
19244 - "2: movl 0(%4), %%eax\n"
19245 - "21: movl 4(%4), %%edx\n"
19246 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19247 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19248 " movl %%eax, 0(%3)\n"
19249 " movl %%edx, 4(%3)\n"
19250 - "3: movl 8(%4), %%eax\n"
19251 - "31: movl 12(%4),%%edx\n"
19252 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19253 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19254 " movl %%eax, 8(%3)\n"
19255 " movl %%edx, 12(%3)\n"
19256 - "4: movl 16(%4), %%eax\n"
19257 - "41: movl 20(%4), %%edx\n"
19258 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19259 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19260 " movl %%eax, 16(%3)\n"
19261 " movl %%edx, 20(%3)\n"
19262 - "10: movl 24(%4), %%eax\n"
19263 - "51: movl 28(%4), %%edx\n"
19264 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19265 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19266 " movl %%eax, 24(%3)\n"
19267 " movl %%edx, 28(%3)\n"
19268 - "11: movl 32(%4), %%eax\n"
19269 - "61: movl 36(%4), %%edx\n"
19270 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19271 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19272 " movl %%eax, 32(%3)\n"
19273 " movl %%edx, 36(%3)\n"
19274 - "12: movl 40(%4), %%eax\n"
19275 - "71: movl 44(%4), %%edx\n"
19276 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19277 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19278 " movl %%eax, 40(%3)\n"
19279 " movl %%edx, 44(%3)\n"
19280 - "13: movl 48(%4), %%eax\n"
19281 - "81: movl 52(%4), %%edx\n"
19282 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19283 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19284 " movl %%eax, 48(%3)\n"
19285 " movl %%edx, 52(%3)\n"
19286 - "14: movl 56(%4), %%eax\n"
19287 - "91: movl 60(%4), %%edx\n"
19288 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19289 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19290 " movl %%eax, 56(%3)\n"
19291 " movl %%edx, 60(%3)\n"
19292 " addl $-64, %0\n"
19293 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
19294 " shrl $2, %0\n"
19295 " andl $3, %%eax\n"
19296 " cld\n"
19297 - "6: rep; movsl\n"
19298 + "6: rep; "__copyuser_seg" movsl\n"
19299 " movl %%eax,%0\n"
19300 - "7: rep; movsb\n"
19301 + "7: rep; "__copyuser_seg" movsb\n"
19302 "8:\n"
19303 ".section .fixup,\"ax\"\n"
19304 "9: lea 0(%%eax,%0,4),%0\n"
19305 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
19306
19307 __asm__ __volatile__(
19308 " .align 2,0x90\n"
19309 - "0: movl 32(%4), %%eax\n"
19310 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19311 " cmpl $67, %0\n"
19312 " jbe 2f\n"
19313 - "1: movl 64(%4), %%eax\n"
19314 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19315 " .align 2,0x90\n"
19316 - "2: movl 0(%4), %%eax\n"
19317 - "21: movl 4(%4), %%edx\n"
19318 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19319 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19320 " movnti %%eax, 0(%3)\n"
19321 " movnti %%edx, 4(%3)\n"
19322 - "3: movl 8(%4), %%eax\n"
19323 - "31: movl 12(%4),%%edx\n"
19324 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19325 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19326 " movnti %%eax, 8(%3)\n"
19327 " movnti %%edx, 12(%3)\n"
19328 - "4: movl 16(%4), %%eax\n"
19329 - "41: movl 20(%4), %%edx\n"
19330 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19331 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19332 " movnti %%eax, 16(%3)\n"
19333 " movnti %%edx, 20(%3)\n"
19334 - "10: movl 24(%4), %%eax\n"
19335 - "51: movl 28(%4), %%edx\n"
19336 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19337 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19338 " movnti %%eax, 24(%3)\n"
19339 " movnti %%edx, 28(%3)\n"
19340 - "11: movl 32(%4), %%eax\n"
19341 - "61: movl 36(%4), %%edx\n"
19342 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19343 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19344 " movnti %%eax, 32(%3)\n"
19345 " movnti %%edx, 36(%3)\n"
19346 - "12: movl 40(%4), %%eax\n"
19347 - "71: movl 44(%4), %%edx\n"
19348 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19349 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19350 " movnti %%eax, 40(%3)\n"
19351 " movnti %%edx, 44(%3)\n"
19352 - "13: movl 48(%4), %%eax\n"
19353 - "81: movl 52(%4), %%edx\n"
19354 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19355 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19356 " movnti %%eax, 48(%3)\n"
19357 " movnti %%edx, 52(%3)\n"
19358 - "14: movl 56(%4), %%eax\n"
19359 - "91: movl 60(%4), %%edx\n"
19360 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19361 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19362 " movnti %%eax, 56(%3)\n"
19363 " movnti %%edx, 60(%3)\n"
19364 " addl $-64, %0\n"
19365 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
19366 " shrl $2, %0\n"
19367 " andl $3, %%eax\n"
19368 " cld\n"
19369 - "6: rep; movsl\n"
19370 + "6: rep; "__copyuser_seg" movsl\n"
19371 " movl %%eax,%0\n"
19372 - "7: rep; movsb\n"
19373 + "7: rep; "__copyuser_seg" movsb\n"
19374 "8:\n"
19375 ".section .fixup,\"ax\"\n"
19376 "9: lea 0(%%eax,%0,4),%0\n"
19377 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
19378
19379 __asm__ __volatile__(
19380 " .align 2,0x90\n"
19381 - "0: movl 32(%4), %%eax\n"
19382 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19383 " cmpl $67, %0\n"
19384 " jbe 2f\n"
19385 - "1: movl 64(%4), %%eax\n"
19386 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19387 " .align 2,0x90\n"
19388 - "2: movl 0(%4), %%eax\n"
19389 - "21: movl 4(%4), %%edx\n"
19390 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19391 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19392 " movnti %%eax, 0(%3)\n"
19393 " movnti %%edx, 4(%3)\n"
19394 - "3: movl 8(%4), %%eax\n"
19395 - "31: movl 12(%4),%%edx\n"
19396 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19397 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19398 " movnti %%eax, 8(%3)\n"
19399 " movnti %%edx, 12(%3)\n"
19400 - "4: movl 16(%4), %%eax\n"
19401 - "41: movl 20(%4), %%edx\n"
19402 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19403 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19404 " movnti %%eax, 16(%3)\n"
19405 " movnti %%edx, 20(%3)\n"
19406 - "10: movl 24(%4), %%eax\n"
19407 - "51: movl 28(%4), %%edx\n"
19408 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19409 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19410 " movnti %%eax, 24(%3)\n"
19411 " movnti %%edx, 28(%3)\n"
19412 - "11: movl 32(%4), %%eax\n"
19413 - "61: movl 36(%4), %%edx\n"
19414 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19415 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19416 " movnti %%eax, 32(%3)\n"
19417 " movnti %%edx, 36(%3)\n"
19418 - "12: movl 40(%4), %%eax\n"
19419 - "71: movl 44(%4), %%edx\n"
19420 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19421 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19422 " movnti %%eax, 40(%3)\n"
19423 " movnti %%edx, 44(%3)\n"
19424 - "13: movl 48(%4), %%eax\n"
19425 - "81: movl 52(%4), %%edx\n"
19426 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19427 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19428 " movnti %%eax, 48(%3)\n"
19429 " movnti %%edx, 52(%3)\n"
19430 - "14: movl 56(%4), %%eax\n"
19431 - "91: movl 60(%4), %%edx\n"
19432 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19433 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19434 " movnti %%eax, 56(%3)\n"
19435 " movnti %%edx, 60(%3)\n"
19436 " addl $-64, %0\n"
19437 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
19438 " shrl $2, %0\n"
19439 " andl $3, %%eax\n"
19440 " cld\n"
19441 - "6: rep; movsl\n"
19442 + "6: rep; "__copyuser_seg" movsl\n"
19443 " movl %%eax,%0\n"
19444 - "7: rep; movsb\n"
19445 + "7: rep; "__copyuser_seg" movsb\n"
19446 "8:\n"
19447 ".section .fixup,\"ax\"\n"
19448 "9: lea 0(%%eax,%0,4),%0\n"
19449 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
19450 */
19451 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
19452 unsigned long size);
19453 -unsigned long __copy_user_intel(void __user *to, const void *from,
19454 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
19455 + unsigned long size);
19456 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
19457 unsigned long size);
19458 unsigned long __copy_user_zeroing_intel_nocache(void *to,
19459 const void __user *from, unsigned long size);
19460 #endif /* CONFIG_X86_INTEL_USERCOPY */
19461
19462 /* Generic arbitrary sized copy. */
19463 -#define __copy_user(to, from, size) \
19464 +#define __copy_user(to, from, size, prefix, set, restore) \
19465 do { \
19466 int __d0, __d1, __d2; \
19467 __asm__ __volatile__( \
19468 + set \
19469 " cmp $7,%0\n" \
19470 " jbe 1f\n" \
19471 " movl %1,%0\n" \
19472 " negl %0\n" \
19473 " andl $7,%0\n" \
19474 " subl %0,%3\n" \
19475 - "4: rep; movsb\n" \
19476 + "4: rep; "prefix"movsb\n" \
19477 " movl %3,%0\n" \
19478 " shrl $2,%0\n" \
19479 " andl $3,%3\n" \
19480 " .align 2,0x90\n" \
19481 - "0: rep; movsl\n" \
19482 + "0: rep; "prefix"movsl\n" \
19483 " movl %3,%0\n" \
19484 - "1: rep; movsb\n" \
19485 + "1: rep; "prefix"movsb\n" \
19486 "2:\n" \
19487 + restore \
19488 ".section .fixup,\"ax\"\n" \
19489 "5: addl %3,%0\n" \
19490 " jmp 2b\n" \
19491 @@ -682,14 +799,14 @@ do { \
19492 " negl %0\n" \
19493 " andl $7,%0\n" \
19494 " subl %0,%3\n" \
19495 - "4: rep; movsb\n" \
19496 + "4: rep; "__copyuser_seg"movsb\n" \
19497 " movl %3,%0\n" \
19498 " shrl $2,%0\n" \
19499 " andl $3,%3\n" \
19500 " .align 2,0x90\n" \
19501 - "0: rep; movsl\n" \
19502 + "0: rep; "__copyuser_seg"movsl\n" \
19503 " movl %3,%0\n" \
19504 - "1: rep; movsb\n" \
19505 + "1: rep; "__copyuser_seg"movsb\n" \
19506 "2:\n" \
19507 ".section .fixup,\"ax\"\n" \
19508 "5: addl %3,%0\n" \
19509 @@ -775,9 +892,9 @@ survive:
19510 }
19511 #endif
19512 if (movsl_is_ok(to, from, n))
19513 - __copy_user(to, from, n);
19514 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
19515 else
19516 - n = __copy_user_intel(to, from, n);
19517 + n = __generic_copy_to_user_intel(to, from, n);
19518 return n;
19519 }
19520 EXPORT_SYMBOL(__copy_to_user_ll);
19521 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
19522 unsigned long n)
19523 {
19524 if (movsl_is_ok(to, from, n))
19525 - __copy_user(to, from, n);
19526 + __copy_user(to, from, n, __copyuser_seg, "", "");
19527 else
19528 - n = __copy_user_intel((void __user *)to,
19529 - (const void *)from, n);
19530 + n = __generic_copy_from_user_intel(to, from, n);
19531 return n;
19532 }
19533 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
19534 @@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocach
19535 if (n > 64 && cpu_has_xmm2)
19536 n = __copy_user_intel_nocache(to, from, n);
19537 else
19538 - __copy_user(to, from, n);
19539 + __copy_user(to, from, n, __copyuser_seg, "", "");
19540 #else
19541 - __copy_user(to, from, n);
19542 + __copy_user(to, from, n, __copyuser_seg, "", "");
19543 #endif
19544 return n;
19545 }
19546 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
19547
19548 -/**
19549 - * copy_to_user: - Copy a block of data into user space.
19550 - * @to: Destination address, in user space.
19551 - * @from: Source address, in kernel space.
19552 - * @n: Number of bytes to copy.
19553 - *
19554 - * Context: User context only. This function may sleep.
19555 - *
19556 - * Copy data from kernel space to user space.
19557 - *
19558 - * Returns number of bytes that could not be copied.
19559 - * On success, this will be zero.
19560 - */
19561 -unsigned long
19562 -copy_to_user(void __user *to, const void *from, unsigned long n)
19563 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19564 +void __set_fs(mm_segment_t x)
19565 {
19566 - if (access_ok(VERIFY_WRITE, to, n))
19567 - n = __copy_to_user(to, from, n);
19568 - return n;
19569 + switch (x.seg) {
19570 + case 0:
19571 + loadsegment(gs, 0);
19572 + break;
19573 + case TASK_SIZE_MAX:
19574 + loadsegment(gs, __USER_DS);
19575 + break;
19576 + case -1UL:
19577 + loadsegment(gs, __KERNEL_DS);
19578 + break;
19579 + default:
19580 + BUG();
19581 + }
19582 + return;
19583 }
19584 -EXPORT_SYMBOL(copy_to_user);
19585 +EXPORT_SYMBOL(__set_fs);
19586
19587 -/**
19588 - * copy_from_user: - Copy a block of data from user space.
19589 - * @to: Destination address, in kernel space.
19590 - * @from: Source address, in user space.
19591 - * @n: Number of bytes to copy.
19592 - *
19593 - * Context: User context only. This function may sleep.
19594 - *
19595 - * Copy data from user space to kernel space.
19596 - *
19597 - * Returns number of bytes that could not be copied.
19598 - * On success, this will be zero.
19599 - *
19600 - * If some data could not be copied, this function will pad the copied
19601 - * data to the requested size using zero bytes.
19602 - */
19603 -unsigned long
19604 -copy_from_user(void *to, const void __user *from, unsigned long n)
19605 +void set_fs(mm_segment_t x)
19606 {
19607 - if (access_ok(VERIFY_READ, from, n))
19608 - n = __copy_from_user(to, from, n);
19609 - else
19610 - memset(to, 0, n);
19611 - return n;
19612 + current_thread_info()->addr_limit = x;
19613 + __set_fs(x);
19614 }
19615 -EXPORT_SYMBOL(copy_from_user);
19616 +EXPORT_SYMBOL(set_fs);
19617 +#endif
19618 diff -urNp linux-2.6.32.41/arch/x86/lib/usercopy_64.c linux-2.6.32.41/arch/x86/lib/usercopy_64.c
19619 --- linux-2.6.32.41/arch/x86/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
19620 +++ linux-2.6.32.41/arch/x86/lib/usercopy_64.c 2011-05-04 17:56:20.000000000 -0400
19621 @@ -42,6 +42,12 @@ long
19622 __strncpy_from_user(char *dst, const char __user *src, long count)
19623 {
19624 long res;
19625 +
19626 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19627 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19628 + src += PAX_USER_SHADOW_BASE;
19629 +#endif
19630 +
19631 __do_strncpy_from_user(dst, src, count, res);
19632 return res;
19633 }
19634 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
19635 {
19636 long __d0;
19637 might_fault();
19638 +
19639 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19640 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
19641 + addr += PAX_USER_SHADOW_BASE;
19642 +#endif
19643 +
19644 /* no memory constraint because it doesn't change any memory gcc knows
19645 about */
19646 asm volatile(
19647 @@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
19648
19649 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
19650 {
19651 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
19652 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
19653 +
19654 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19655 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
19656 + to += PAX_USER_SHADOW_BASE;
19657 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
19658 + from += PAX_USER_SHADOW_BASE;
19659 +#endif
19660 +
19661 return copy_user_generic((__force void *)to, (__force void *)from, len);
19662 - }
19663 - return len;
19664 + }
19665 + return len;
19666 }
19667 EXPORT_SYMBOL(copy_in_user);
19668
19669 diff -urNp linux-2.6.32.41/arch/x86/Makefile linux-2.6.32.41/arch/x86/Makefile
19670 --- linux-2.6.32.41/arch/x86/Makefile 2011-03-27 14:31:47.000000000 -0400
19671 +++ linux-2.6.32.41/arch/x86/Makefile 2011-04-17 15:56:46.000000000 -0400
19672 @@ -189,3 +189,12 @@ define archhelp
19673 echo ' FDARGS="..." arguments for the booted kernel'
19674 echo ' FDINITRD=file initrd for the booted kernel'
19675 endef
19676 +
19677 +define OLD_LD
19678 +
19679 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
19680 +*** Please upgrade your binutils to 2.18 or newer
19681 +endef
19682 +
19683 +archprepare:
19684 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
19685 diff -urNp linux-2.6.32.41/arch/x86/mm/extable.c linux-2.6.32.41/arch/x86/mm/extable.c
19686 --- linux-2.6.32.41/arch/x86/mm/extable.c 2011-03-27 14:31:47.000000000 -0400
19687 +++ linux-2.6.32.41/arch/x86/mm/extable.c 2011-04-17 15:56:46.000000000 -0400
19688 @@ -1,14 +1,71 @@
19689 #include <linux/module.h>
19690 #include <linux/spinlock.h>
19691 +#include <linux/sort.h>
19692 #include <asm/uaccess.h>
19693 +#include <asm/pgtable.h>
19694
19695 +/*
19696 + * The exception table needs to be sorted so that the binary
19697 + * search that we use to find entries in it works properly.
19698 + * This is used both for the kernel exception table and for
19699 + * the exception tables of modules that get loaded.
19700 + */
19701 +static int cmp_ex(const void *a, const void *b)
19702 +{
19703 + const struct exception_table_entry *x = a, *y = b;
19704 +
19705 + /* avoid overflow */
19706 + if (x->insn > y->insn)
19707 + return 1;
19708 + if (x->insn < y->insn)
19709 + return -1;
19710 + return 0;
19711 +}
19712 +
19713 +static void swap_ex(void *a, void *b, int size)
19714 +{
19715 + struct exception_table_entry t, *x = a, *y = b;
19716 +
19717 + t = *x;
19718 +
19719 + pax_open_kernel();
19720 + *x = *y;
19721 + *y = t;
19722 + pax_close_kernel();
19723 +}
19724 +
19725 +void sort_extable(struct exception_table_entry *start,
19726 + struct exception_table_entry *finish)
19727 +{
19728 + sort(start, finish - start, sizeof(struct exception_table_entry),
19729 + cmp_ex, swap_ex);
19730 +}
19731 +
19732 +#ifdef CONFIG_MODULES
19733 +/*
19734 + * If the exception table is sorted, any referring to the module init
19735 + * will be at the beginning or the end.
19736 + */
19737 +void trim_init_extable(struct module *m)
19738 +{
19739 + /*trim the beginning*/
19740 + while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
19741 + m->extable++;
19742 + m->num_exentries--;
19743 + }
19744 + /*trim the end*/
19745 + while (m->num_exentries &&
19746 + within_module_init(m->extable[m->num_exentries-1].insn, m))
19747 + m->num_exentries--;
19748 +}
19749 +#endif /* CONFIG_MODULES */
19750
19751 int fixup_exception(struct pt_regs *regs)
19752 {
19753 const struct exception_table_entry *fixup;
19754
19755 #ifdef CONFIG_PNPBIOS
19756 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
19757 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
19758 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
19759 extern u32 pnp_bios_is_utter_crap;
19760 pnp_bios_is_utter_crap = 1;
19761 diff -urNp linux-2.6.32.41/arch/x86/mm/fault.c linux-2.6.32.41/arch/x86/mm/fault.c
19762 --- linux-2.6.32.41/arch/x86/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
19763 +++ linux-2.6.32.41/arch/x86/mm/fault.c 2011-06-06 17:35:16.000000000 -0400
19764 @@ -11,10 +11,19 @@
19765 #include <linux/kprobes.h> /* __kprobes, ... */
19766 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
19767 #include <linux/perf_event.h> /* perf_sw_event */
19768 +#include <linux/unistd.h>
19769 +#include <linux/compiler.h>
19770
19771 #include <asm/traps.h> /* dotraplinkage, ... */
19772 #include <asm/pgalloc.h> /* pgd_*(), ... */
19773 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
19774 +#include <asm/vsyscall.h>
19775 +#include <asm/tlbflush.h>
19776 +
19777 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19778 +#include <asm/stacktrace.h>
19779 +#include "../kernel/dumpstack.h"
19780 +#endif
19781
19782 /*
19783 * Page fault error code bits:
19784 @@ -51,7 +60,7 @@ static inline int notify_page_fault(stru
19785 int ret = 0;
19786
19787 /* kprobe_running() needs smp_processor_id() */
19788 - if (kprobes_built_in() && !user_mode_vm(regs)) {
19789 + if (kprobes_built_in() && !user_mode(regs)) {
19790 preempt_disable();
19791 if (kprobe_running() && kprobe_fault_handler(regs, 14))
19792 ret = 1;
19793 @@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *re
19794 return !instr_lo || (instr_lo>>1) == 1;
19795 case 0x00:
19796 /* Prefetch instruction is 0x0F0D or 0x0F18 */
19797 - if (probe_kernel_address(instr, opcode))
19798 + if (user_mode(regs)) {
19799 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
19800 + return 0;
19801 + } else if (probe_kernel_address(instr, opcode))
19802 return 0;
19803
19804 *prefetch = (instr_lo == 0xF) &&
19805 @@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsign
19806 while (instr < max_instr) {
19807 unsigned char opcode;
19808
19809 - if (probe_kernel_address(instr, opcode))
19810 + if (user_mode(regs)) {
19811 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
19812 + break;
19813 + } else if (probe_kernel_address(instr, opcode))
19814 break;
19815
19816 instr++;
19817 @@ -172,6 +187,30 @@ force_sig_info_fault(int si_signo, int s
19818 force_sig_info(si_signo, &info, tsk);
19819 }
19820
19821 +#ifdef CONFIG_PAX_EMUTRAMP
19822 +static int pax_handle_fetch_fault(struct pt_regs *regs);
19823 +#endif
19824 +
19825 +#ifdef CONFIG_PAX_PAGEEXEC
19826 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
19827 +{
19828 + pgd_t *pgd;
19829 + pud_t *pud;
19830 + pmd_t *pmd;
19831 +
19832 + pgd = pgd_offset(mm, address);
19833 + if (!pgd_present(*pgd))
19834 + return NULL;
19835 + pud = pud_offset(pgd, address);
19836 + if (!pud_present(*pud))
19837 + return NULL;
19838 + pmd = pmd_offset(pud, address);
19839 + if (!pmd_present(*pmd))
19840 + return NULL;
19841 + return pmd;
19842 +}
19843 +#endif
19844 +
19845 DEFINE_SPINLOCK(pgd_lock);
19846 LIST_HEAD(pgd_list);
19847
19848 @@ -224,11 +263,24 @@ void vmalloc_sync_all(void)
19849 address += PMD_SIZE) {
19850
19851 unsigned long flags;
19852 +
19853 +#ifdef CONFIG_PAX_PER_CPU_PGD
19854 + unsigned long cpu;
19855 +#else
19856 struct page *page;
19857 +#endif
19858
19859 spin_lock_irqsave(&pgd_lock, flags);
19860 +
19861 +#ifdef CONFIG_PAX_PER_CPU_PGD
19862 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19863 + pgd_t *pgd = get_cpu_pgd(cpu);
19864 +#else
19865 list_for_each_entry(page, &pgd_list, lru) {
19866 - if (!vmalloc_sync_one(page_address(page), address))
19867 + pgd_t *pgd = page_address(page);
19868 +#endif
19869 +
19870 + if (!vmalloc_sync_one(pgd, address))
19871 break;
19872 }
19873 spin_unlock_irqrestore(&pgd_lock, flags);
19874 @@ -258,6 +310,11 @@ static noinline int vmalloc_fault(unsign
19875 * an interrupt in the middle of a task switch..
19876 */
19877 pgd_paddr = read_cr3();
19878 +
19879 +#ifdef CONFIG_PAX_PER_CPU_PGD
19880 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
19881 +#endif
19882 +
19883 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
19884 if (!pmd_k)
19885 return -1;
19886 @@ -332,15 +389,27 @@ void vmalloc_sync_all(void)
19887
19888 const pgd_t *pgd_ref = pgd_offset_k(address);
19889 unsigned long flags;
19890 +
19891 +#ifdef CONFIG_PAX_PER_CPU_PGD
19892 + unsigned long cpu;
19893 +#else
19894 struct page *page;
19895 +#endif
19896
19897 if (pgd_none(*pgd_ref))
19898 continue;
19899
19900 spin_lock_irqsave(&pgd_lock, flags);
19901 +
19902 +#ifdef CONFIG_PAX_PER_CPU_PGD
19903 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19904 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
19905 +#else
19906 list_for_each_entry(page, &pgd_list, lru) {
19907 pgd_t *pgd;
19908 pgd = (pgd_t *)page_address(page) + pgd_index(address);
19909 +#endif
19910 +
19911 if (pgd_none(*pgd))
19912 set_pgd(pgd, *pgd_ref);
19913 else
19914 @@ -373,7 +442,14 @@ static noinline int vmalloc_fault(unsign
19915 * happen within a race in page table update. In the later
19916 * case just flush:
19917 */
19918 +
19919 +#ifdef CONFIG_PAX_PER_CPU_PGD
19920 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
19921 + pgd = pgd_offset_cpu(smp_processor_id(), address);
19922 +#else
19923 pgd = pgd_offset(current->active_mm, address);
19924 +#endif
19925 +
19926 pgd_ref = pgd_offset_k(address);
19927 if (pgd_none(*pgd_ref))
19928 return -1;
19929 @@ -535,7 +611,7 @@ static int is_errata93(struct pt_regs *r
19930 static int is_errata100(struct pt_regs *regs, unsigned long address)
19931 {
19932 #ifdef CONFIG_X86_64
19933 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
19934 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
19935 return 1;
19936 #endif
19937 return 0;
19938 @@ -562,7 +638,7 @@ static int is_f00f_bug(struct pt_regs *r
19939 }
19940
19941 static const char nx_warning[] = KERN_CRIT
19942 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
19943 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
19944
19945 static void
19946 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
19947 @@ -571,15 +647,26 @@ show_fault_oops(struct pt_regs *regs, un
19948 if (!oops_may_print())
19949 return;
19950
19951 - if (error_code & PF_INSTR) {
19952 + if (nx_enabled && (error_code & PF_INSTR)) {
19953 unsigned int level;
19954
19955 pte_t *pte = lookup_address(address, &level);
19956
19957 if (pte && pte_present(*pte) && !pte_exec(*pte))
19958 - printk(nx_warning, current_uid());
19959 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
19960 }
19961
19962 +#ifdef CONFIG_PAX_KERNEXEC
19963 + if (init_mm.start_code <= address && address < init_mm.end_code) {
19964 + if (current->signal->curr_ip)
19965 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
19966 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
19967 + else
19968 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
19969 + current->comm, task_pid_nr(current), current_uid(), current_euid());
19970 + }
19971 +#endif
19972 +
19973 printk(KERN_ALERT "BUG: unable to handle kernel ");
19974 if (address < PAGE_SIZE)
19975 printk(KERN_CONT "NULL pointer dereference");
19976 @@ -704,6 +791,68 @@ __bad_area_nosemaphore(struct pt_regs *r
19977 unsigned long address, int si_code)
19978 {
19979 struct task_struct *tsk = current;
19980 + struct mm_struct *mm = tsk->mm;
19981 +
19982 +#ifdef CONFIG_X86_64
19983 + if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
19984 + if (regs->ip == (unsigned long)vgettimeofday) {
19985 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
19986 + return;
19987 + } else if (regs->ip == (unsigned long)vtime) {
19988 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
19989 + return;
19990 + } else if (regs->ip == (unsigned long)vgetcpu) {
19991 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
19992 + return;
19993 + }
19994 + }
19995 +#endif
19996 +
19997 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19998 + if (mm && (error_code & PF_USER)) {
19999 + unsigned long ip = regs->ip;
20000 +
20001 + if (v8086_mode(regs))
20002 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
20003 +
20004 + /*
20005 + * It's possible to have interrupts off here:
20006 + */
20007 + local_irq_enable();
20008 +
20009 +#ifdef CONFIG_PAX_PAGEEXEC
20010 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
20011 + ((nx_enabled && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
20012 +
20013 +#ifdef CONFIG_PAX_EMUTRAMP
20014 + switch (pax_handle_fetch_fault(regs)) {
20015 + case 2:
20016 + return;
20017 + }
20018 +#endif
20019 +
20020 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
20021 + do_group_exit(SIGKILL);
20022 + }
20023 +#endif
20024 +
20025 +#ifdef CONFIG_PAX_SEGMEXEC
20026 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
20027 +
20028 +#ifdef CONFIG_PAX_EMUTRAMP
20029 + switch (pax_handle_fetch_fault(regs)) {
20030 + case 2:
20031 + return;
20032 + }
20033 +#endif
20034 +
20035 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
20036 + do_group_exit(SIGKILL);
20037 + }
20038 +#endif
20039 +
20040 + }
20041 +#endif
20042
20043 /* User mode accesses just cause a SIGSEGV */
20044 if (error_code & PF_USER) {
20045 @@ -857,6 +1006,99 @@ static int spurious_fault_check(unsigned
20046 return 1;
20047 }
20048
20049 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20050 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
20051 +{
20052 + pte_t *pte;
20053 + pmd_t *pmd;
20054 + spinlock_t *ptl;
20055 + unsigned char pte_mask;
20056 +
20057 + if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
20058 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
20059 + return 0;
20060 +
20061 + /* PaX: it's our fault, let's handle it if we can */
20062 +
20063 + /* PaX: take a look at read faults before acquiring any locks */
20064 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
20065 + /* instruction fetch attempt from a protected page in user mode */
20066 + up_read(&mm->mmap_sem);
20067 +
20068 +#ifdef CONFIG_PAX_EMUTRAMP
20069 + switch (pax_handle_fetch_fault(regs)) {
20070 + case 2:
20071 + return 1;
20072 + }
20073 +#endif
20074 +
20075 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
20076 + do_group_exit(SIGKILL);
20077 + }
20078 +
20079 + pmd = pax_get_pmd(mm, address);
20080 + if (unlikely(!pmd))
20081 + return 0;
20082 +
20083 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
20084 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
20085 + pte_unmap_unlock(pte, ptl);
20086 + return 0;
20087 + }
20088 +
20089 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
20090 + /* write attempt to a protected page in user mode */
20091 + pte_unmap_unlock(pte, ptl);
20092 + return 0;
20093 + }
20094 +
20095 +#ifdef CONFIG_SMP
20096 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
20097 +#else
20098 + if (likely(address > get_limit(regs->cs)))
20099 +#endif
20100 + {
20101 + set_pte(pte, pte_mkread(*pte));
20102 + __flush_tlb_one(address);
20103 + pte_unmap_unlock(pte, ptl);
20104 + up_read(&mm->mmap_sem);
20105 + return 1;
20106 + }
20107 +
20108 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
20109 +
20110 + /*
20111 + * PaX: fill DTLB with user rights and retry
20112 + */
20113 + __asm__ __volatile__ (
20114 + "orb %2,(%1)\n"
20115 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
20116 +/*
20117 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
20118 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
20119 + * page fault when examined during a TLB load attempt. this is true not only
20120 + * for PTEs holding a non-present entry but also present entries that will
20121 + * raise a page fault (such as those set up by PaX, or the copy-on-write
20122 + * mechanism). in effect it means that we do *not* need to flush the TLBs
20123 + * for our target pages since their PTEs are simply not in the TLBs at all.
20124 +
20125 + * the best thing in omitting it is that we gain around 15-20% speed in the
20126 + * fast path of the page fault handler and can get rid of tracing since we
20127 + * can no longer flush unintended entries.
20128 + */
20129 + "invlpg (%0)\n"
20130 +#endif
20131 + __copyuser_seg"testb $0,(%0)\n"
20132 + "xorb %3,(%1)\n"
20133 + :
20134 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
20135 + : "memory", "cc");
20136 + pte_unmap_unlock(pte, ptl);
20137 + up_read(&mm->mmap_sem);
20138 + return 1;
20139 +}
20140 +#endif
20141 +
20142 /*
20143 * Handle a spurious fault caused by a stale TLB entry.
20144 *
20145 @@ -923,6 +1165,9 @@ int show_unhandled_signals = 1;
20146 static inline int
20147 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
20148 {
20149 + if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
20150 + return 1;
20151 +
20152 if (write) {
20153 /* write, present and write, not present: */
20154 if (unlikely(!(vma->vm_flags & VM_WRITE)))
20155 @@ -956,17 +1201,31 @@ do_page_fault(struct pt_regs *regs, unsi
20156 {
20157 struct vm_area_struct *vma;
20158 struct task_struct *tsk;
20159 - unsigned long address;
20160 struct mm_struct *mm;
20161 int write;
20162 int fault;
20163
20164 + /* Get the faulting address: */
20165 + unsigned long address = read_cr2();
20166 +
20167 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20168 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
20169 + if (!search_exception_tables(regs->ip)) {
20170 + bad_area_nosemaphore(regs, error_code, address);
20171 + return;
20172 + }
20173 + if (address < PAX_USER_SHADOW_BASE) {
20174 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
20175 + printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
20176 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
20177 + } else
20178 + address -= PAX_USER_SHADOW_BASE;
20179 + }
20180 +#endif
20181 +
20182 tsk = current;
20183 mm = tsk->mm;
20184
20185 - /* Get the faulting address: */
20186 - address = read_cr2();
20187 -
20188 /*
20189 * Detect and handle instructions that would cause a page fault for
20190 * both a tracked kernel page and a userspace page.
20191 @@ -1026,7 +1285,7 @@ do_page_fault(struct pt_regs *regs, unsi
20192 * User-mode registers count as a user access even for any
20193 * potential system fault or CPU buglet:
20194 */
20195 - if (user_mode_vm(regs)) {
20196 + if (user_mode(regs)) {
20197 local_irq_enable();
20198 error_code |= PF_USER;
20199 } else {
20200 @@ -1080,6 +1339,11 @@ do_page_fault(struct pt_regs *regs, unsi
20201 might_sleep();
20202 }
20203
20204 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20205 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
20206 + return;
20207 +#endif
20208 +
20209 vma = find_vma(mm, address);
20210 if (unlikely(!vma)) {
20211 bad_area(regs, error_code, address);
20212 @@ -1091,18 +1355,24 @@ do_page_fault(struct pt_regs *regs, unsi
20213 bad_area(regs, error_code, address);
20214 return;
20215 }
20216 - if (error_code & PF_USER) {
20217 - /*
20218 - * Accessing the stack below %sp is always a bug.
20219 - * The large cushion allows instructions like enter
20220 - * and pusha to work. ("enter $65535, $31" pushes
20221 - * 32 pointers and then decrements %sp by 65535.)
20222 - */
20223 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
20224 - bad_area(regs, error_code, address);
20225 - return;
20226 - }
20227 + /*
20228 + * Accessing the stack below %sp is always a bug.
20229 + * The large cushion allows instructions like enter
20230 + * and pusha to work. ("enter $65535, $31" pushes
20231 + * 32 pointers and then decrements %sp by 65535.)
20232 + */
20233 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
20234 + bad_area(regs, error_code, address);
20235 + return;
20236 + }
20237 +
20238 +#ifdef CONFIG_PAX_SEGMEXEC
20239 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
20240 + bad_area(regs, error_code, address);
20241 + return;
20242 }
20243 +#endif
20244 +
20245 if (unlikely(expand_stack(vma, address))) {
20246 bad_area(regs, error_code, address);
20247 return;
20248 @@ -1146,3 +1416,199 @@ good_area:
20249
20250 up_read(&mm->mmap_sem);
20251 }
20252 +
20253 +#ifdef CONFIG_PAX_EMUTRAMP
20254 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
20255 +{
20256 + int err;
20257 +
20258 + do { /* PaX: gcc trampoline emulation #1 */
20259 + unsigned char mov1, mov2;
20260 + unsigned short jmp;
20261 + unsigned int addr1, addr2;
20262 +
20263 +#ifdef CONFIG_X86_64
20264 + if ((regs->ip + 11) >> 32)
20265 + break;
20266 +#endif
20267 +
20268 + err = get_user(mov1, (unsigned char __user *)regs->ip);
20269 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
20270 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
20271 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
20272 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
20273 +
20274 + if (err)
20275 + break;
20276 +
20277 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
20278 + regs->cx = addr1;
20279 + regs->ax = addr2;
20280 + regs->ip = addr2;
20281 + return 2;
20282 + }
20283 + } while (0);
20284 +
20285 + do { /* PaX: gcc trampoline emulation #2 */
20286 + unsigned char mov, jmp;
20287 + unsigned int addr1, addr2;
20288 +
20289 +#ifdef CONFIG_X86_64
20290 + if ((regs->ip + 9) >> 32)
20291 + break;
20292 +#endif
20293 +
20294 + err = get_user(mov, (unsigned char __user *)regs->ip);
20295 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
20296 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
20297 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
20298 +
20299 + if (err)
20300 + break;
20301 +
20302 + if (mov == 0xB9 && jmp == 0xE9) {
20303 + regs->cx = addr1;
20304 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
20305 + return 2;
20306 + }
20307 + } while (0);
20308 +
20309 + return 1; /* PaX in action */
20310 +}
20311 +
20312 +#ifdef CONFIG_X86_64
20313 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
20314 +{
20315 + int err;
20316 +
20317 + do { /* PaX: gcc trampoline emulation #1 */
20318 + unsigned short mov1, mov2, jmp1;
20319 + unsigned char jmp2;
20320 + unsigned int addr1;
20321 + unsigned long addr2;
20322 +
20323 + err = get_user(mov1, (unsigned short __user *)regs->ip);
20324 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
20325 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
20326 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
20327 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
20328 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
20329 +
20330 + if (err)
20331 + break;
20332 +
20333 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
20334 + regs->r11 = addr1;
20335 + regs->r10 = addr2;
20336 + regs->ip = addr1;
20337 + return 2;
20338 + }
20339 + } while (0);
20340 +
20341 + do { /* PaX: gcc trampoline emulation #2 */
20342 + unsigned short mov1, mov2, jmp1;
20343 + unsigned char jmp2;
20344 + unsigned long addr1, addr2;
20345 +
20346 + err = get_user(mov1, (unsigned short __user *)regs->ip);
20347 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
20348 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
20349 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
20350 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
20351 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
20352 +
20353 + if (err)
20354 + break;
20355 +
20356 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
20357 + regs->r11 = addr1;
20358 + regs->r10 = addr2;
20359 + regs->ip = addr1;
20360 + return 2;
20361 + }
20362 + } while (0);
20363 +
20364 + return 1; /* PaX in action */
20365 +}
20366 +#endif
20367 +
20368 +/*
20369 + * PaX: decide what to do with offenders (regs->ip = fault address)
20370 + *
20371 + * returns 1 when task should be killed
20372 + * 2 when gcc trampoline was detected
20373 + */
20374 +static int pax_handle_fetch_fault(struct pt_regs *regs)
20375 +{
20376 + if (v8086_mode(regs))
20377 + return 1;
20378 +
20379 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
20380 + return 1;
20381 +
20382 +#ifdef CONFIG_X86_32
20383 + return pax_handle_fetch_fault_32(regs);
20384 +#else
20385 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
20386 + return pax_handle_fetch_fault_32(regs);
20387 + else
20388 + return pax_handle_fetch_fault_64(regs);
20389 +#endif
20390 +}
20391 +#endif
20392 +
20393 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20394 +void pax_report_insns(void *pc, void *sp)
20395 +{
20396 + long i;
20397 +
20398 + printk(KERN_ERR "PAX: bytes at PC: ");
20399 + for (i = 0; i < 20; i++) {
20400 + unsigned char c;
20401 + if (get_user(c, (__force unsigned char __user *)pc+i))
20402 + printk(KERN_CONT "?? ");
20403 + else
20404 + printk(KERN_CONT "%02x ", c);
20405 + }
20406 + printk("\n");
20407 +
20408 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
20409 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
20410 + unsigned long c;
20411 + if (get_user(c, (__force unsigned long __user *)sp+i))
20412 +#ifdef CONFIG_X86_32
20413 + printk(KERN_CONT "???????? ");
20414 +#else
20415 + printk(KERN_CONT "???????????????? ");
20416 +#endif
20417 + else
20418 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
20419 + }
20420 + printk("\n");
20421 +}
20422 +#endif
20423 +
20424 +/**
20425 + * probe_kernel_write(): safely attempt to write to a location
20426 + * @dst: address to write to
20427 + * @src: pointer to the data that shall be written
20428 + * @size: size of the data chunk
20429 + *
20430 + * Safely write to address @dst from the buffer at @src. If a kernel fault
20431 + * happens, handle that and return -EFAULT.
20432 + */
20433 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
20434 +{
20435 + long ret;
20436 + mm_segment_t old_fs = get_fs();
20437 +
20438 + set_fs(KERNEL_DS);
20439 + pagefault_disable();
20440 + pax_open_kernel();
20441 + ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
20442 + pax_close_kernel();
20443 + pagefault_enable();
20444 + set_fs(old_fs);
20445 +
20446 + return ret ? -EFAULT : 0;
20447 +}
20448 diff -urNp linux-2.6.32.41/arch/x86/mm/gup.c linux-2.6.32.41/arch/x86/mm/gup.c
20449 --- linux-2.6.32.41/arch/x86/mm/gup.c 2011-03-27 14:31:47.000000000 -0400
20450 +++ linux-2.6.32.41/arch/x86/mm/gup.c 2011-04-17 15:56:46.000000000 -0400
20451 @@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long
20452 addr = start;
20453 len = (unsigned long) nr_pages << PAGE_SHIFT;
20454 end = start + len;
20455 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
20456 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
20457 (void __user *)start, len)))
20458 return 0;
20459
20460 diff -urNp linux-2.6.32.41/arch/x86/mm/highmem_32.c linux-2.6.32.41/arch/x86/mm/highmem_32.c
20461 --- linux-2.6.32.41/arch/x86/mm/highmem_32.c 2011-03-27 14:31:47.000000000 -0400
20462 +++ linux-2.6.32.41/arch/x86/mm/highmem_32.c 2011-04-17 15:56:46.000000000 -0400
20463 @@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page
20464 idx = type + KM_TYPE_NR*smp_processor_id();
20465 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
20466 BUG_ON(!pte_none(*(kmap_pte-idx)));
20467 +
20468 + pax_open_kernel();
20469 set_pte(kmap_pte-idx, mk_pte(page, prot));
20470 + pax_close_kernel();
20471
20472 return (void *)vaddr;
20473 }
20474 diff -urNp linux-2.6.32.41/arch/x86/mm/hugetlbpage.c linux-2.6.32.41/arch/x86/mm/hugetlbpage.c
20475 --- linux-2.6.32.41/arch/x86/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
20476 +++ linux-2.6.32.41/arch/x86/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
20477 @@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmappe
20478 struct hstate *h = hstate_file(file);
20479 struct mm_struct *mm = current->mm;
20480 struct vm_area_struct *vma;
20481 - unsigned long start_addr;
20482 + unsigned long start_addr, pax_task_size = TASK_SIZE;
20483 +
20484 +#ifdef CONFIG_PAX_SEGMEXEC
20485 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20486 + pax_task_size = SEGMEXEC_TASK_SIZE;
20487 +#endif
20488 +
20489 + pax_task_size -= PAGE_SIZE;
20490
20491 if (len > mm->cached_hole_size) {
20492 - start_addr = mm->free_area_cache;
20493 + start_addr = mm->free_area_cache;
20494 } else {
20495 - start_addr = TASK_UNMAPPED_BASE;
20496 - mm->cached_hole_size = 0;
20497 + start_addr = mm->mmap_base;
20498 + mm->cached_hole_size = 0;
20499 }
20500
20501 full_search:
20502 @@ -281,26 +288,27 @@ full_search:
20503
20504 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
20505 /* At this point: (!vma || addr < vma->vm_end). */
20506 - if (TASK_SIZE - len < addr) {
20507 + if (pax_task_size - len < addr) {
20508 /*
20509 * Start a new search - just in case we missed
20510 * some holes.
20511 */
20512 - if (start_addr != TASK_UNMAPPED_BASE) {
20513 - start_addr = TASK_UNMAPPED_BASE;
20514 + if (start_addr != mm->mmap_base) {
20515 + start_addr = mm->mmap_base;
20516 mm->cached_hole_size = 0;
20517 goto full_search;
20518 }
20519 return -ENOMEM;
20520 }
20521 - if (!vma || addr + len <= vma->vm_start) {
20522 - mm->free_area_cache = addr + len;
20523 - return addr;
20524 - }
20525 + if (check_heap_stack_gap(vma, addr, len))
20526 + break;
20527 if (addr + mm->cached_hole_size < vma->vm_start)
20528 mm->cached_hole_size = vma->vm_start - addr;
20529 addr = ALIGN(vma->vm_end, huge_page_size(h));
20530 }
20531 +
20532 + mm->free_area_cache = addr + len;
20533 + return addr;
20534 }
20535
20536 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
20537 @@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmappe
20538 {
20539 struct hstate *h = hstate_file(file);
20540 struct mm_struct *mm = current->mm;
20541 - struct vm_area_struct *vma, *prev_vma;
20542 - unsigned long base = mm->mmap_base, addr = addr0;
20543 + struct vm_area_struct *vma;
20544 + unsigned long base = mm->mmap_base, addr;
20545 unsigned long largest_hole = mm->cached_hole_size;
20546 - int first_time = 1;
20547
20548 /* don't allow allocations above current base */
20549 if (mm->free_area_cache > base)
20550 @@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmappe
20551 largest_hole = 0;
20552 mm->free_area_cache = base;
20553 }
20554 -try_again:
20555 +
20556 /* make sure it can fit in the remaining address space */
20557 if (mm->free_area_cache < len)
20558 goto fail;
20559
20560 /* either no address requested or cant fit in requested address hole */
20561 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
20562 + addr = (mm->free_area_cache - len);
20563 do {
20564 + addr &= huge_page_mask(h);
20565 + vma = find_vma(mm, addr);
20566 /*
20567 * Lookup failure means no vma is above this address,
20568 * i.e. return with success:
20569 - */
20570 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
20571 - return addr;
20572 -
20573 - /*
20574 * new region fits between prev_vma->vm_end and
20575 * vma->vm_start, use it:
20576 */
20577 - if (addr + len <= vma->vm_start &&
20578 - (!prev_vma || (addr >= prev_vma->vm_end))) {
20579 + if (check_heap_stack_gap(vma, addr, len)) {
20580 /* remember the address as a hint for next time */
20581 - mm->cached_hole_size = largest_hole;
20582 - return (mm->free_area_cache = addr);
20583 - } else {
20584 - /* pull free_area_cache down to the first hole */
20585 - if (mm->free_area_cache == vma->vm_end) {
20586 - mm->free_area_cache = vma->vm_start;
20587 - mm->cached_hole_size = largest_hole;
20588 - }
20589 + mm->cached_hole_size = largest_hole;
20590 + return (mm->free_area_cache = addr);
20591 + }
20592 + /* pull free_area_cache down to the first hole */
20593 + if (mm->free_area_cache == vma->vm_end) {
20594 + mm->free_area_cache = vma->vm_start;
20595 + mm->cached_hole_size = largest_hole;
20596 }
20597
20598 /* remember the largest hole we saw so far */
20599 if (addr + largest_hole < vma->vm_start)
20600 - largest_hole = vma->vm_start - addr;
20601 + largest_hole = vma->vm_start - addr;
20602
20603 /* try just below the current vma->vm_start */
20604 - addr = (vma->vm_start - len) & huge_page_mask(h);
20605 - } while (len <= vma->vm_start);
20606 + addr = skip_heap_stack_gap(vma, len);
20607 + } while (!IS_ERR_VALUE(addr));
20608
20609 fail:
20610 /*
20611 - * if hint left us with no space for the requested
20612 - * mapping then try again:
20613 - */
20614 - if (first_time) {
20615 - mm->free_area_cache = base;
20616 - largest_hole = 0;
20617 - first_time = 0;
20618 - goto try_again;
20619 - }
20620 - /*
20621 * A failed mmap() very likely causes application failure,
20622 * so fall back to the bottom-up function here. This scenario
20623 * can happen with large stack limits and large mmap()
20624 * allocations.
20625 */
20626 - mm->free_area_cache = TASK_UNMAPPED_BASE;
20627 +
20628 +#ifdef CONFIG_PAX_SEGMEXEC
20629 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20630 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
20631 + else
20632 +#endif
20633 +
20634 + mm->mmap_base = TASK_UNMAPPED_BASE;
20635 +
20636 +#ifdef CONFIG_PAX_RANDMMAP
20637 + if (mm->pax_flags & MF_PAX_RANDMMAP)
20638 + mm->mmap_base += mm->delta_mmap;
20639 +#endif
20640 +
20641 + mm->free_area_cache = mm->mmap_base;
20642 mm->cached_hole_size = ~0UL;
20643 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
20644 len, pgoff, flags);
20645 @@ -387,6 +393,7 @@ fail:
20646 /*
20647 * Restore the topdown base:
20648 */
20649 + mm->mmap_base = base;
20650 mm->free_area_cache = base;
20651 mm->cached_hole_size = ~0UL;
20652
20653 @@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *f
20654 struct hstate *h = hstate_file(file);
20655 struct mm_struct *mm = current->mm;
20656 struct vm_area_struct *vma;
20657 + unsigned long pax_task_size = TASK_SIZE;
20658
20659 if (len & ~huge_page_mask(h))
20660 return -EINVAL;
20661 - if (len > TASK_SIZE)
20662 +
20663 +#ifdef CONFIG_PAX_SEGMEXEC
20664 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20665 + pax_task_size = SEGMEXEC_TASK_SIZE;
20666 +#endif
20667 +
20668 + pax_task_size -= PAGE_SIZE;
20669 +
20670 + if (len > pax_task_size)
20671 return -ENOMEM;
20672
20673 if (flags & MAP_FIXED) {
20674 @@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *f
20675 if (addr) {
20676 addr = ALIGN(addr, huge_page_size(h));
20677 vma = find_vma(mm, addr);
20678 - if (TASK_SIZE - len >= addr &&
20679 - (!vma || addr + len <= vma->vm_start))
20680 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
20681 return addr;
20682 }
20683 if (mm->get_unmapped_area == arch_get_unmapped_area)
20684 diff -urNp linux-2.6.32.41/arch/x86/mm/init_32.c linux-2.6.32.41/arch/x86/mm/init_32.c
20685 --- linux-2.6.32.41/arch/x86/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
20686 +++ linux-2.6.32.41/arch/x86/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
20687 @@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
20688 }
20689
20690 /*
20691 - * Creates a middle page table and puts a pointer to it in the
20692 - * given global directory entry. This only returns the gd entry
20693 - * in non-PAE compilation mode, since the middle layer is folded.
20694 - */
20695 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
20696 -{
20697 - pud_t *pud;
20698 - pmd_t *pmd_table;
20699 -
20700 -#ifdef CONFIG_X86_PAE
20701 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
20702 - if (after_bootmem)
20703 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
20704 - else
20705 - pmd_table = (pmd_t *)alloc_low_page();
20706 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
20707 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
20708 - pud = pud_offset(pgd, 0);
20709 - BUG_ON(pmd_table != pmd_offset(pud, 0));
20710 -
20711 - return pmd_table;
20712 - }
20713 -#endif
20714 - pud = pud_offset(pgd, 0);
20715 - pmd_table = pmd_offset(pud, 0);
20716 -
20717 - return pmd_table;
20718 -}
20719 -
20720 -/*
20721 * Create a page table and place a pointer to it in a middle page
20722 * directory entry:
20723 */
20724 @@ -121,13 +91,28 @@ static pte_t * __init one_page_table_ini
20725 page_table = (pte_t *)alloc_low_page();
20726
20727 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
20728 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20729 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
20730 +#else
20731 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
20732 +#endif
20733 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
20734 }
20735
20736 return pte_offset_kernel(pmd, 0);
20737 }
20738
20739 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
20740 +{
20741 + pud_t *pud;
20742 + pmd_t *pmd_table;
20743 +
20744 + pud = pud_offset(pgd, 0);
20745 + pmd_table = pmd_offset(pud, 0);
20746 +
20747 + return pmd_table;
20748 +}
20749 +
20750 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
20751 {
20752 int pgd_idx = pgd_index(vaddr);
20753 @@ -201,6 +186,7 @@ page_table_range_init(unsigned long star
20754 int pgd_idx, pmd_idx;
20755 unsigned long vaddr;
20756 pgd_t *pgd;
20757 + pud_t *pud;
20758 pmd_t *pmd;
20759 pte_t *pte = NULL;
20760
20761 @@ -210,8 +196,13 @@ page_table_range_init(unsigned long star
20762 pgd = pgd_base + pgd_idx;
20763
20764 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
20765 - pmd = one_md_table_init(pgd);
20766 - pmd = pmd + pmd_index(vaddr);
20767 + pud = pud_offset(pgd, vaddr);
20768 + pmd = pmd_offset(pud, vaddr);
20769 +
20770 +#ifdef CONFIG_X86_PAE
20771 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
20772 +#endif
20773 +
20774 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
20775 pmd++, pmd_idx++) {
20776 pte = page_table_kmap_check(one_page_table_init(pmd),
20777 @@ -223,11 +214,20 @@ page_table_range_init(unsigned long star
20778 }
20779 }
20780
20781 -static inline int is_kernel_text(unsigned long addr)
20782 +static inline int is_kernel_text(unsigned long start, unsigned long end)
20783 {
20784 - if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
20785 - return 1;
20786 - return 0;
20787 + if ((start > ktla_ktva((unsigned long)_etext) ||
20788 + end <= ktla_ktva((unsigned long)_stext)) &&
20789 + (start > ktla_ktva((unsigned long)_einittext) ||
20790 + end <= ktla_ktva((unsigned long)_sinittext)) &&
20791 +
20792 +#ifdef CONFIG_ACPI_SLEEP
20793 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
20794 +#endif
20795 +
20796 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
20797 + return 0;
20798 + return 1;
20799 }
20800
20801 /*
20802 @@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned lo
20803 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
20804 unsigned long start_pfn, end_pfn;
20805 pgd_t *pgd_base = swapper_pg_dir;
20806 - int pgd_idx, pmd_idx, pte_ofs;
20807 + unsigned int pgd_idx, pmd_idx, pte_ofs;
20808 unsigned long pfn;
20809 pgd_t *pgd;
20810 + pud_t *pud;
20811 pmd_t *pmd;
20812 pte_t *pte;
20813 unsigned pages_2m, pages_4k;
20814 @@ -278,8 +279,13 @@ repeat:
20815 pfn = start_pfn;
20816 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
20817 pgd = pgd_base + pgd_idx;
20818 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
20819 - pmd = one_md_table_init(pgd);
20820 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
20821 + pud = pud_offset(pgd, 0);
20822 + pmd = pmd_offset(pud, 0);
20823 +
20824 +#ifdef CONFIG_X86_PAE
20825 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
20826 +#endif
20827
20828 if (pfn >= end_pfn)
20829 continue;
20830 @@ -291,14 +297,13 @@ repeat:
20831 #endif
20832 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
20833 pmd++, pmd_idx++) {
20834 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
20835 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
20836
20837 /*
20838 * Map with big pages if possible, otherwise
20839 * create normal page tables:
20840 */
20841 if (use_pse) {
20842 - unsigned int addr2;
20843 pgprot_t prot = PAGE_KERNEL_LARGE;
20844 /*
20845 * first pass will use the same initial
20846 @@ -308,11 +313,7 @@ repeat:
20847 __pgprot(PTE_IDENT_ATTR |
20848 _PAGE_PSE);
20849
20850 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
20851 - PAGE_OFFSET + PAGE_SIZE-1;
20852 -
20853 - if (is_kernel_text(addr) ||
20854 - is_kernel_text(addr2))
20855 + if (is_kernel_text(address, address + PMD_SIZE))
20856 prot = PAGE_KERNEL_LARGE_EXEC;
20857
20858 pages_2m++;
20859 @@ -329,7 +330,7 @@ repeat:
20860 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
20861 pte += pte_ofs;
20862 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
20863 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
20864 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
20865 pgprot_t prot = PAGE_KERNEL;
20866 /*
20867 * first pass will use the same initial
20868 @@ -337,7 +338,7 @@ repeat:
20869 */
20870 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
20871
20872 - if (is_kernel_text(addr))
20873 + if (is_kernel_text(address, address + PAGE_SIZE))
20874 prot = PAGE_KERNEL_EXEC;
20875
20876 pages_4k++;
20877 @@ -489,7 +490,7 @@ void __init native_pagetable_setup_start
20878
20879 pud = pud_offset(pgd, va);
20880 pmd = pmd_offset(pud, va);
20881 - if (!pmd_present(*pmd))
20882 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
20883 break;
20884
20885 pte = pte_offset_kernel(pmd, va);
20886 @@ -541,9 +542,7 @@ void __init early_ioremap_page_table_ran
20887
20888 static void __init pagetable_init(void)
20889 {
20890 - pgd_t *pgd_base = swapper_pg_dir;
20891 -
20892 - permanent_kmaps_init(pgd_base);
20893 + permanent_kmaps_init(swapper_pg_dir);
20894 }
20895
20896 #ifdef CONFIG_ACPI_SLEEP
20897 @@ -551,12 +550,12 @@ static void __init pagetable_init(void)
20898 * ACPI suspend needs this for resume, because things like the intel-agp
20899 * driver might have split up a kernel 4MB mapping.
20900 */
20901 -char swsusp_pg_dir[PAGE_SIZE]
20902 +pgd_t swsusp_pg_dir[PTRS_PER_PGD]
20903 __attribute__ ((aligned(PAGE_SIZE)));
20904
20905 static inline void save_pg_dir(void)
20906 {
20907 - memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
20908 + clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
20909 }
20910 #else /* !CONFIG_ACPI_SLEEP */
20911 static inline void save_pg_dir(void)
20912 @@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
20913 flush_tlb_all();
20914 }
20915
20916 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
20917 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
20918 EXPORT_SYMBOL_GPL(__supported_pte_mask);
20919
20920 /* user-defined highmem size */
20921 @@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void
20922 * Initialize the boot-time allocator (with low memory only):
20923 */
20924 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
20925 - bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
20926 + bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
20927 PAGE_SIZE);
20928 if (bootmap == -1L)
20929 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
20930 @@ -864,6 +863,12 @@ void __init mem_init(void)
20931
20932 pci_iommu_alloc();
20933
20934 +#ifdef CONFIG_PAX_PER_CPU_PGD
20935 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
20936 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20937 + KERNEL_PGD_PTRS);
20938 +#endif
20939 +
20940 #ifdef CONFIG_FLATMEM
20941 BUG_ON(!mem_map);
20942 #endif
20943 @@ -881,7 +886,7 @@ void __init mem_init(void)
20944 set_highmem_pages_init();
20945
20946 codesize = (unsigned long) &_etext - (unsigned long) &_text;
20947 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
20948 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
20949 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
20950
20951 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
20952 @@ -923,10 +928,10 @@ void __init mem_init(void)
20953 ((unsigned long)&__init_end -
20954 (unsigned long)&__init_begin) >> 10,
20955
20956 - (unsigned long)&_etext, (unsigned long)&_edata,
20957 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
20958 + (unsigned long)&_sdata, (unsigned long)&_edata,
20959 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
20960
20961 - (unsigned long)&_text, (unsigned long)&_etext,
20962 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
20963 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
20964
20965 /*
20966 @@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
20967 if (!kernel_set_to_readonly)
20968 return;
20969
20970 + start = ktla_ktva(start);
20971 pr_debug("Set kernel text: %lx - %lx for read write\n",
20972 start, start+size);
20973
20974 @@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
20975 if (!kernel_set_to_readonly)
20976 return;
20977
20978 + start = ktla_ktva(start);
20979 pr_debug("Set kernel text: %lx - %lx for read only\n",
20980 start, start+size);
20981
20982 @@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
20983 unsigned long start = PFN_ALIGN(_text);
20984 unsigned long size = PFN_ALIGN(_etext) - start;
20985
20986 + start = ktla_ktva(start);
20987 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
20988 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
20989 size >> 10);
20990 diff -urNp linux-2.6.32.41/arch/x86/mm/init_64.c linux-2.6.32.41/arch/x86/mm/init_64.c
20991 --- linux-2.6.32.41/arch/x86/mm/init_64.c 2011-04-17 17:00:52.000000000 -0400
20992 +++ linux-2.6.32.41/arch/x86/mm/init_64.c 2011-04-17 17:03:05.000000000 -0400
20993 @@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
20994 pmd = fill_pmd(pud, vaddr);
20995 pte = fill_pte(pmd, vaddr);
20996
20997 + pax_open_kernel();
20998 set_pte(pte, new_pte);
20999 + pax_close_kernel();
21000
21001 /*
21002 * It's enough to flush this one mapping.
21003 @@ -223,14 +225,12 @@ static void __init __init_extra_mapping(
21004 pgd = pgd_offset_k((unsigned long)__va(phys));
21005 if (pgd_none(*pgd)) {
21006 pud = (pud_t *) spp_getpage();
21007 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
21008 - _PAGE_USER));
21009 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
21010 }
21011 pud = pud_offset(pgd, (unsigned long)__va(phys));
21012 if (pud_none(*pud)) {
21013 pmd = (pmd_t *) spp_getpage();
21014 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
21015 - _PAGE_USER));
21016 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
21017 }
21018 pmd = pmd_offset(pud, phys);
21019 BUG_ON(!pmd_none(*pmd));
21020 @@ -675,6 +675,12 @@ void __init mem_init(void)
21021
21022 pci_iommu_alloc();
21023
21024 +#ifdef CONFIG_PAX_PER_CPU_PGD
21025 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
21026 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21027 + KERNEL_PGD_PTRS);
21028 +#endif
21029 +
21030 /* clear_bss() already clear the empty_zero_page */
21031
21032 reservedpages = 0;
21033 @@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
21034 static struct vm_area_struct gate_vma = {
21035 .vm_start = VSYSCALL_START,
21036 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
21037 - .vm_page_prot = PAGE_READONLY_EXEC,
21038 - .vm_flags = VM_READ | VM_EXEC
21039 + .vm_page_prot = PAGE_READONLY,
21040 + .vm_flags = VM_READ
21041 };
21042
21043 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
21044 @@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long a
21045
21046 const char *arch_vma_name(struct vm_area_struct *vma)
21047 {
21048 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21049 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21050 return "[vdso]";
21051 if (vma == &gate_vma)
21052 return "[vsyscall]";
21053 diff -urNp linux-2.6.32.41/arch/x86/mm/init.c linux-2.6.32.41/arch/x86/mm/init.c
21054 --- linux-2.6.32.41/arch/x86/mm/init.c 2011-04-17 17:00:52.000000000 -0400
21055 +++ linux-2.6.32.41/arch/x86/mm/init.c 2011-06-07 19:06:09.000000000 -0400
21056 @@ -69,11 +69,7 @@ static void __init find_early_table_spac
21057 * cause a hotspot and fill up ZONE_DMA. The page tables
21058 * need roughly 0.5KB per GB.
21059 */
21060 -#ifdef CONFIG_X86_32
21061 - start = 0x7000;
21062 -#else
21063 - start = 0x8000;
21064 -#endif
21065 + start = 0x100000;
21066 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
21067 tables, PAGE_SIZE);
21068 if (e820_table_start == -1UL)
21069 @@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_m
21070 #endif
21071
21072 set_nx();
21073 - if (nx_enabled)
21074 + if (nx_enabled && cpu_has_nx)
21075 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
21076
21077 /* Enable PSE if available */
21078 @@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_m
21079 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
21080 * mmio resources as well as potential bios/acpi data regions.
21081 */
21082 +
21083 int devmem_is_allowed(unsigned long pagenr)
21084 {
21085 +#ifdef CONFIG_GRKERNSEC_KMEM
21086 + /* allow BDA */
21087 + if (!pagenr)
21088 + return 1;
21089 + /* allow EBDA */
21090 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
21091 + return 1;
21092 + /* allow ISA/video mem */
21093 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
21094 + return 1;
21095 + /* throw out everything else below 1MB */
21096 + if (pagenr <= 256)
21097 + return 0;
21098 +#else
21099 if (pagenr <= 256)
21100 return 1;
21101 +#endif
21102 +
21103 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
21104 return 0;
21105 if (!page_is_ram(pagenr))
21106 @@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigne
21107
21108 void free_initmem(void)
21109 {
21110 +
21111 +#ifdef CONFIG_PAX_KERNEXEC
21112 +#ifdef CONFIG_X86_32
21113 + /* PaX: limit KERNEL_CS to actual size */
21114 + unsigned long addr, limit;
21115 + struct desc_struct d;
21116 + int cpu;
21117 +
21118 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
21119 + limit = (limit - 1UL) >> PAGE_SHIFT;
21120 +
21121 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
21122 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
21123 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
21124 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
21125 + }
21126 +
21127 + /* PaX: make KERNEL_CS read-only */
21128 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
21129 + if (!paravirt_enabled())
21130 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
21131 +/*
21132 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
21133 + pgd = pgd_offset_k(addr);
21134 + pud = pud_offset(pgd, addr);
21135 + pmd = pmd_offset(pud, addr);
21136 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21137 + }
21138 +*/
21139 +#ifdef CONFIG_X86_PAE
21140 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
21141 +/*
21142 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
21143 + pgd = pgd_offset_k(addr);
21144 + pud = pud_offset(pgd, addr);
21145 + pmd = pmd_offset(pud, addr);
21146 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
21147 + }
21148 +*/
21149 +#endif
21150 +
21151 +#ifdef CONFIG_MODULES
21152 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
21153 +#endif
21154 +
21155 +#else
21156 + pgd_t *pgd;
21157 + pud_t *pud;
21158 + pmd_t *pmd;
21159 + unsigned long addr, end;
21160 +
21161 + /* PaX: make kernel code/rodata read-only, rest non-executable */
21162 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
21163 + pgd = pgd_offset_k(addr);
21164 + pud = pud_offset(pgd, addr);
21165 + pmd = pmd_offset(pud, addr);
21166 + if (!pmd_present(*pmd))
21167 + continue;
21168 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
21169 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21170 + else
21171 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
21172 + }
21173 +
21174 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
21175 + end = addr + KERNEL_IMAGE_SIZE;
21176 + for (; addr < end; addr += PMD_SIZE) {
21177 + pgd = pgd_offset_k(addr);
21178 + pud = pud_offset(pgd, addr);
21179 + pmd = pmd_offset(pud, addr);
21180 + if (!pmd_present(*pmd))
21181 + continue;
21182 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
21183 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21184 + }
21185 +#endif
21186 +
21187 + flush_tlb_all();
21188 +#endif
21189 +
21190 free_init_pages("unused kernel memory",
21191 (unsigned long)(&__init_begin),
21192 (unsigned long)(&__init_end));
21193 diff -urNp linux-2.6.32.41/arch/x86/mm/iomap_32.c linux-2.6.32.41/arch/x86/mm/iomap_32.c
21194 --- linux-2.6.32.41/arch/x86/mm/iomap_32.c 2011-03-27 14:31:47.000000000 -0400
21195 +++ linux-2.6.32.41/arch/x86/mm/iomap_32.c 2011-04-17 15:56:46.000000000 -0400
21196 @@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long
21197 debug_kmap_atomic(type);
21198 idx = type + KM_TYPE_NR * smp_processor_id();
21199 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
21200 +
21201 + pax_open_kernel();
21202 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
21203 + pax_close_kernel();
21204 +
21205 arch_flush_lazy_mmu_mode();
21206
21207 return (void *)vaddr;
21208 diff -urNp linux-2.6.32.41/arch/x86/mm/ioremap.c linux-2.6.32.41/arch/x86/mm/ioremap.c
21209 --- linux-2.6.32.41/arch/x86/mm/ioremap.c 2011-03-27 14:31:47.000000000 -0400
21210 +++ linux-2.6.32.41/arch/x86/mm/ioremap.c 2011-04-17 15:56:46.000000000 -0400
21211 @@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
21212 * Second special case: Some BIOSen report the PC BIOS
21213 * area (640->1Mb) as ram even though it is not.
21214 */
21215 - if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
21216 - pagenr < (BIOS_END >> PAGE_SHIFT))
21217 + if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
21218 + pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
21219 return 0;
21220
21221 for (i = 0; i < e820.nr_map; i++) {
21222 @@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(re
21223 /*
21224 * Don't allow anybody to remap normal RAM that we're using..
21225 */
21226 - for (pfn = phys_addr >> PAGE_SHIFT;
21227 - (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
21228 - pfn++) {
21229 -
21230 + for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
21231 int is_ram = page_is_ram(pfn);
21232
21233 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
21234 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
21235 return NULL;
21236 WARN_ON_ONCE(is_ram);
21237 }
21238 @@ -407,7 +404,7 @@ static int __init early_ioremap_debug_se
21239 early_param("early_ioremap_debug", early_ioremap_debug_setup);
21240
21241 static __initdata int after_paging_init;
21242 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
21243 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
21244
21245 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
21246 {
21247 @@ -439,8 +436,7 @@ void __init early_ioremap_init(void)
21248 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
21249
21250 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
21251 - memset(bm_pte, 0, sizeof(bm_pte));
21252 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
21253 + pmd_populate_user(&init_mm, pmd, bm_pte);
21254
21255 /*
21256 * The boot-ioremap range spans multiple pmds, for which
21257 diff -urNp linux-2.6.32.41/arch/x86/mm/kmemcheck/kmemcheck.c linux-2.6.32.41/arch/x86/mm/kmemcheck/kmemcheck.c
21258 --- linux-2.6.32.41/arch/x86/mm/kmemcheck/kmemcheck.c 2011-03-27 14:31:47.000000000 -0400
21259 +++ linux-2.6.32.41/arch/x86/mm/kmemcheck/kmemcheck.c 2011-04-17 15:56:46.000000000 -0400
21260 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
21261 * memory (e.g. tracked pages)? For now, we need this to avoid
21262 * invoking kmemcheck for PnP BIOS calls.
21263 */
21264 - if (regs->flags & X86_VM_MASK)
21265 + if (v8086_mode(regs))
21266 return false;
21267 - if (regs->cs != __KERNEL_CS)
21268 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
21269 return false;
21270
21271 pte = kmemcheck_pte_lookup(address);
21272 diff -urNp linux-2.6.32.41/arch/x86/mm/mmap.c linux-2.6.32.41/arch/x86/mm/mmap.c
21273 --- linux-2.6.32.41/arch/x86/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
21274 +++ linux-2.6.32.41/arch/x86/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
21275 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
21276 * Leave an at least ~128 MB hole with possible stack randomization.
21277 */
21278 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
21279 -#define MAX_GAP (TASK_SIZE/6*5)
21280 +#define MAX_GAP (pax_task_size/6*5)
21281
21282 /*
21283 * True on X86_32 or when emulating IA32 on X86_64
21284 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
21285 return rnd << PAGE_SHIFT;
21286 }
21287
21288 -static unsigned long mmap_base(void)
21289 +static unsigned long mmap_base(struct mm_struct *mm)
21290 {
21291 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
21292 + unsigned long pax_task_size = TASK_SIZE;
21293 +
21294 +#ifdef CONFIG_PAX_SEGMEXEC
21295 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21296 + pax_task_size = SEGMEXEC_TASK_SIZE;
21297 +#endif
21298
21299 if (gap < MIN_GAP)
21300 gap = MIN_GAP;
21301 else if (gap > MAX_GAP)
21302 gap = MAX_GAP;
21303
21304 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
21305 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
21306 }
21307
21308 /*
21309 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
21310 * does, but not when emulating X86_32
21311 */
21312 -static unsigned long mmap_legacy_base(void)
21313 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
21314 {
21315 - if (mmap_is_ia32())
21316 + if (mmap_is_ia32()) {
21317 +
21318 +#ifdef CONFIG_PAX_SEGMEXEC
21319 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21320 + return SEGMEXEC_TASK_UNMAPPED_BASE;
21321 + else
21322 +#endif
21323 +
21324 return TASK_UNMAPPED_BASE;
21325 - else
21326 + } else
21327 return TASK_UNMAPPED_BASE + mmap_rnd();
21328 }
21329
21330 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
21331 void arch_pick_mmap_layout(struct mm_struct *mm)
21332 {
21333 if (mmap_is_legacy()) {
21334 - mm->mmap_base = mmap_legacy_base();
21335 + mm->mmap_base = mmap_legacy_base(mm);
21336 +
21337 +#ifdef CONFIG_PAX_RANDMMAP
21338 + if (mm->pax_flags & MF_PAX_RANDMMAP)
21339 + mm->mmap_base += mm->delta_mmap;
21340 +#endif
21341 +
21342 mm->get_unmapped_area = arch_get_unmapped_area;
21343 mm->unmap_area = arch_unmap_area;
21344 } else {
21345 - mm->mmap_base = mmap_base();
21346 + mm->mmap_base = mmap_base(mm);
21347 +
21348 +#ifdef CONFIG_PAX_RANDMMAP
21349 + if (mm->pax_flags & MF_PAX_RANDMMAP)
21350 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
21351 +#endif
21352 +
21353 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
21354 mm->unmap_area = arch_unmap_area_topdown;
21355 }
21356 diff -urNp linux-2.6.32.41/arch/x86/mm/mmio-mod.c linux-2.6.32.41/arch/x86/mm/mmio-mod.c
21357 --- linux-2.6.32.41/arch/x86/mm/mmio-mod.c 2011-03-27 14:31:47.000000000 -0400
21358 +++ linux-2.6.32.41/arch/x86/mm/mmio-mod.c 2011-05-04 17:56:28.000000000 -0400
21359 @@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p,
21360 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
21361 void __iomem *addr)
21362 {
21363 - static atomic_t next_id;
21364 + static atomic_unchecked_t next_id;
21365 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
21366 /* These are page-unaligned. */
21367 struct mmiotrace_map map = {
21368 @@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_
21369 .private = trace
21370 },
21371 .phys = offset,
21372 - .id = atomic_inc_return(&next_id)
21373 + .id = atomic_inc_return_unchecked(&next_id)
21374 };
21375 map.map_id = trace->id;
21376
21377 diff -urNp linux-2.6.32.41/arch/x86/mm/numa_32.c linux-2.6.32.41/arch/x86/mm/numa_32.c
21378 --- linux-2.6.32.41/arch/x86/mm/numa_32.c 2011-03-27 14:31:47.000000000 -0400
21379 +++ linux-2.6.32.41/arch/x86/mm/numa_32.c 2011-04-17 15:56:46.000000000 -0400
21380 @@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int
21381 }
21382 #endif
21383
21384 -extern unsigned long find_max_low_pfn(void);
21385 extern unsigned long highend_pfn, highstart_pfn;
21386
21387 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
21388 diff -urNp linux-2.6.32.41/arch/x86/mm/pageattr.c linux-2.6.32.41/arch/x86/mm/pageattr.c
21389 --- linux-2.6.32.41/arch/x86/mm/pageattr.c 2011-03-27 14:31:47.000000000 -0400
21390 +++ linux-2.6.32.41/arch/x86/mm/pageattr.c 2011-04-17 15:56:46.000000000 -0400
21391 @@ -261,16 +261,17 @@ static inline pgprot_t static_protection
21392 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
21393 */
21394 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
21395 - pgprot_val(forbidden) |= _PAGE_NX;
21396 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
21397
21398 /*
21399 * The kernel text needs to be executable for obvious reasons
21400 * Does not cover __inittext since that is gone later on. On
21401 * 64bit we do not enforce !NX on the low mapping
21402 */
21403 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
21404 - pgprot_val(forbidden) |= _PAGE_NX;
21405 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
21406 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
21407
21408 +#ifdef CONFIG_DEBUG_RODATA
21409 /*
21410 * The .rodata section needs to be read-only. Using the pfn
21411 * catches all aliases.
21412 @@ -278,6 +279,14 @@ static inline pgprot_t static_protection
21413 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
21414 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
21415 pgprot_val(forbidden) |= _PAGE_RW;
21416 +#endif
21417 +
21418 +#ifdef CONFIG_PAX_KERNEXEC
21419 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
21420 + pgprot_val(forbidden) |= _PAGE_RW;
21421 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
21422 + }
21423 +#endif
21424
21425 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
21426
21427 @@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
21428 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
21429 {
21430 /* change init_mm */
21431 + pax_open_kernel();
21432 set_pte_atomic(kpte, pte);
21433 +
21434 #ifdef CONFIG_X86_32
21435 if (!SHARED_KERNEL_PMD) {
21436 +
21437 +#ifdef CONFIG_PAX_PER_CPU_PGD
21438 + unsigned long cpu;
21439 +#else
21440 struct page *page;
21441 +#endif
21442
21443 +#ifdef CONFIG_PAX_PER_CPU_PGD
21444 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
21445 + pgd_t *pgd = get_cpu_pgd(cpu);
21446 +#else
21447 list_for_each_entry(page, &pgd_list, lru) {
21448 - pgd_t *pgd;
21449 + pgd_t *pgd = (pgd_t *)page_address(page);
21450 +#endif
21451 +
21452 pud_t *pud;
21453 pmd_t *pmd;
21454
21455 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
21456 + pgd += pgd_index(address);
21457 pud = pud_offset(pgd, address);
21458 pmd = pmd_offset(pud, address);
21459 set_pte_atomic((pte_t *)pmd, pte);
21460 }
21461 }
21462 #endif
21463 + pax_close_kernel();
21464 }
21465
21466 static int
21467 diff -urNp linux-2.6.32.41/arch/x86/mm/pageattr-test.c linux-2.6.32.41/arch/x86/mm/pageattr-test.c
21468 --- linux-2.6.32.41/arch/x86/mm/pageattr-test.c 2011-03-27 14:31:47.000000000 -0400
21469 +++ linux-2.6.32.41/arch/x86/mm/pageattr-test.c 2011-04-17 15:56:46.000000000 -0400
21470 @@ -36,7 +36,7 @@ enum {
21471
21472 static int pte_testbit(pte_t pte)
21473 {
21474 - return pte_flags(pte) & _PAGE_UNUSED1;
21475 + return pte_flags(pte) & _PAGE_CPA_TEST;
21476 }
21477
21478 struct split_state {
21479 diff -urNp linux-2.6.32.41/arch/x86/mm/pat.c linux-2.6.32.41/arch/x86/mm/pat.c
21480 --- linux-2.6.32.41/arch/x86/mm/pat.c 2011-03-27 14:31:47.000000000 -0400
21481 +++ linux-2.6.32.41/arch/x86/mm/pat.c 2011-04-17 15:56:46.000000000 -0400
21482 @@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct
21483
21484 conflict:
21485 printk(KERN_INFO "%s:%d conflicting memory types "
21486 - "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
21487 + "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
21488 new->end, cattr_name(new->type), cattr_name(entry->type));
21489 return -EBUSY;
21490 }
21491 @@ -559,7 +559,7 @@ unlock_ret:
21492
21493 if (err) {
21494 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
21495 - current->comm, current->pid, start, end);
21496 + current->comm, task_pid_nr(current), start, end);
21497 }
21498
21499 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
21500 @@ -689,8 +689,8 @@ static inline int range_is_allowed(unsig
21501 while (cursor < to) {
21502 if (!devmem_is_allowed(pfn)) {
21503 printk(KERN_INFO
21504 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
21505 - current->comm, from, to);
21506 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
21507 + current->comm, from, to, cursor);
21508 return 0;
21509 }
21510 cursor += PAGE_SIZE;
21511 @@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, un
21512 printk(KERN_INFO
21513 "%s:%d ioremap_change_attr failed %s "
21514 "for %Lx-%Lx\n",
21515 - current->comm, current->pid,
21516 + current->comm, task_pid_nr(current),
21517 cattr_name(flags),
21518 base, (unsigned long long)(base + size));
21519 return -EINVAL;
21520 @@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr,
21521 free_memtype(paddr, paddr + size);
21522 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
21523 " for %Lx-%Lx, got %s\n",
21524 - current->comm, current->pid,
21525 + current->comm, task_pid_nr(current),
21526 cattr_name(want_flags),
21527 (unsigned long long)paddr,
21528 (unsigned long long)(paddr + size),
21529 diff -urNp linux-2.6.32.41/arch/x86/mm/pgtable_32.c linux-2.6.32.41/arch/x86/mm/pgtable_32.c
21530 --- linux-2.6.32.41/arch/x86/mm/pgtable_32.c 2011-03-27 14:31:47.000000000 -0400
21531 +++ linux-2.6.32.41/arch/x86/mm/pgtable_32.c 2011-04-17 15:56:46.000000000 -0400
21532 @@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr,
21533 return;
21534 }
21535 pte = pte_offset_kernel(pmd, vaddr);
21536 +
21537 + pax_open_kernel();
21538 if (pte_val(pteval))
21539 set_pte_at(&init_mm, vaddr, pte, pteval);
21540 else
21541 pte_clear(&init_mm, vaddr, pte);
21542 + pax_close_kernel();
21543
21544 /*
21545 * It's enough to flush this one mapping.
21546 diff -urNp linux-2.6.32.41/arch/x86/mm/pgtable.c linux-2.6.32.41/arch/x86/mm/pgtable.c
21547 --- linux-2.6.32.41/arch/x86/mm/pgtable.c 2011-03-27 14:31:47.000000000 -0400
21548 +++ linux-2.6.32.41/arch/x86/mm/pgtable.c 2011-05-11 18:25:15.000000000 -0400
21549 @@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *p
21550 list_del(&page->lru);
21551 }
21552
21553 -#define UNSHARED_PTRS_PER_PGD \
21554 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
21555 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21556 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
21557
21558 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
21559 +{
21560 + while (count--)
21561 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
21562 +}
21563 +#endif
21564 +
21565 +#ifdef CONFIG_PAX_PER_CPU_PGD
21566 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
21567 +{
21568 + while (count--)
21569 +
21570 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21571 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
21572 +#else
21573 + *dst++ = *src++;
21574 +#endif
21575 +
21576 +}
21577 +#endif
21578 +
21579 +#ifdef CONFIG_X86_64
21580 +#define pxd_t pud_t
21581 +#define pyd_t pgd_t
21582 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
21583 +#define pxd_free(mm, pud) pud_free((mm), (pud))
21584 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
21585 +#define pyd_offset(mm ,address) pgd_offset((mm), (address))
21586 +#define PYD_SIZE PGDIR_SIZE
21587 +#else
21588 +#define pxd_t pmd_t
21589 +#define pyd_t pud_t
21590 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
21591 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
21592 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
21593 +#define pyd_offset(mm ,address) pud_offset((mm), (address))
21594 +#define PYD_SIZE PUD_SIZE
21595 +#endif
21596 +
21597 +#ifdef CONFIG_PAX_PER_CPU_PGD
21598 +static inline void pgd_ctor(pgd_t *pgd) {}
21599 +static inline void pgd_dtor(pgd_t *pgd) {}
21600 +#else
21601 static void pgd_ctor(pgd_t *pgd)
21602 {
21603 /* If the pgd points to a shared pagetable level (either the
21604 @@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
21605 pgd_list_del(pgd);
21606 spin_unlock_irqrestore(&pgd_lock, flags);
21607 }
21608 +#endif
21609
21610 /*
21611 * List of all pgd's needed for non-PAE so it can invalidate entries
21612 @@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
21613 * -- wli
21614 */
21615
21616 -#ifdef CONFIG_X86_PAE
21617 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
21618 /*
21619 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
21620 * updating the top-level pagetable entries to guarantee the
21621 @@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
21622 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
21623 * and initialize the kernel pmds here.
21624 */
21625 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
21626 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
21627
21628 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
21629 {
21630 @@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm,
21631 */
21632 flush_tlb_mm(mm);
21633 }
21634 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
21635 +#define PREALLOCATED_PXDS USER_PGD_PTRS
21636 #else /* !CONFIG_X86_PAE */
21637
21638 /* No need to prepopulate any pagetable entries in non-PAE modes. */
21639 -#define PREALLOCATED_PMDS 0
21640 +#define PREALLOCATED_PXDS 0
21641
21642 #endif /* CONFIG_X86_PAE */
21643
21644 -static void free_pmds(pmd_t *pmds[])
21645 +static void free_pxds(pxd_t *pxds[])
21646 {
21647 int i;
21648
21649 - for(i = 0; i < PREALLOCATED_PMDS; i++)
21650 - if (pmds[i])
21651 - free_page((unsigned long)pmds[i]);
21652 + for(i = 0; i < PREALLOCATED_PXDS; i++)
21653 + if (pxds[i])
21654 + free_page((unsigned long)pxds[i]);
21655 }
21656
21657 -static int preallocate_pmds(pmd_t *pmds[])
21658 +static int preallocate_pxds(pxd_t *pxds[])
21659 {
21660 int i;
21661 bool failed = false;
21662
21663 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
21664 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
21665 - if (pmd == NULL)
21666 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
21667 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
21668 + if (pxd == NULL)
21669 failed = true;
21670 - pmds[i] = pmd;
21671 + pxds[i] = pxd;
21672 }
21673
21674 if (failed) {
21675 - free_pmds(pmds);
21676 + free_pxds(pxds);
21677 return -ENOMEM;
21678 }
21679
21680 @@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[
21681 * preallocate which never got a corresponding vma will need to be
21682 * freed manually.
21683 */
21684 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
21685 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
21686 {
21687 int i;
21688
21689 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
21690 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
21691 pgd_t pgd = pgdp[i];
21692
21693 if (pgd_val(pgd) != 0) {
21694 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
21695 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
21696
21697 - pgdp[i] = native_make_pgd(0);
21698 + set_pgd(pgdp + i, native_make_pgd(0));
21699
21700 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
21701 - pmd_free(mm, pmd);
21702 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
21703 + pxd_free(mm, pxd);
21704 }
21705 }
21706 }
21707
21708 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
21709 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
21710 {
21711 - pud_t *pud;
21712 + pyd_t *pyd;
21713 unsigned long addr;
21714 int i;
21715
21716 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
21717 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
21718 return;
21719
21720 - pud = pud_offset(pgd, 0);
21721 +#ifdef CONFIG_X86_64
21722 + pyd = pyd_offset(mm, 0L);
21723 +#else
21724 + pyd = pyd_offset(pgd, 0L);
21725 +#endif
21726
21727 - for (addr = i = 0; i < PREALLOCATED_PMDS;
21728 - i++, pud++, addr += PUD_SIZE) {
21729 - pmd_t *pmd = pmds[i];
21730 + for (addr = i = 0; i < PREALLOCATED_PXDS;
21731 + i++, pyd++, addr += PYD_SIZE) {
21732 + pxd_t *pxd = pxds[i];
21733
21734 if (i >= KERNEL_PGD_BOUNDARY)
21735 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
21736 - sizeof(pmd_t) * PTRS_PER_PMD);
21737 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
21738 + sizeof(pxd_t) * PTRS_PER_PMD);
21739
21740 - pud_populate(mm, pud, pmd);
21741 + pyd_populate(mm, pyd, pxd);
21742 }
21743 }
21744
21745 pgd_t *pgd_alloc(struct mm_struct *mm)
21746 {
21747 pgd_t *pgd;
21748 - pmd_t *pmds[PREALLOCATED_PMDS];
21749 + pxd_t *pxds[PREALLOCATED_PXDS];
21750 +
21751 unsigned long flags;
21752
21753 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
21754 @@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
21755
21756 mm->pgd = pgd;
21757
21758 - if (preallocate_pmds(pmds) != 0)
21759 + if (preallocate_pxds(pxds) != 0)
21760 goto out_free_pgd;
21761
21762 if (paravirt_pgd_alloc(mm) != 0)
21763 - goto out_free_pmds;
21764 + goto out_free_pxds;
21765
21766 /*
21767 * Make sure that pre-populating the pmds is atomic with
21768 @@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
21769 spin_lock_irqsave(&pgd_lock, flags);
21770
21771 pgd_ctor(pgd);
21772 - pgd_prepopulate_pmd(mm, pgd, pmds);
21773 + pgd_prepopulate_pxd(mm, pgd, pxds);
21774
21775 spin_unlock_irqrestore(&pgd_lock, flags);
21776
21777 return pgd;
21778
21779 -out_free_pmds:
21780 - free_pmds(pmds);
21781 +out_free_pxds:
21782 + free_pxds(pxds);
21783 out_free_pgd:
21784 free_page((unsigned long)pgd);
21785 out:
21786 @@ -287,7 +338,7 @@ out:
21787
21788 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
21789 {
21790 - pgd_mop_up_pmds(mm, pgd);
21791 + pgd_mop_up_pxds(mm, pgd);
21792 pgd_dtor(pgd);
21793 paravirt_pgd_free(mm, pgd);
21794 free_page((unsigned long)pgd);
21795 diff -urNp linux-2.6.32.41/arch/x86/mm/setup_nx.c linux-2.6.32.41/arch/x86/mm/setup_nx.c
21796 --- linux-2.6.32.41/arch/x86/mm/setup_nx.c 2011-03-27 14:31:47.000000000 -0400
21797 +++ linux-2.6.32.41/arch/x86/mm/setup_nx.c 2011-04-17 15:56:46.000000000 -0400
21798 @@ -4,11 +4,10 @@
21799
21800 #include <asm/pgtable.h>
21801
21802 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
21803 int nx_enabled;
21804
21805 -#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21806 -static int disable_nx __cpuinitdata;
21807 -
21808 +#ifndef CONFIG_PAX_PAGEEXEC
21809 /*
21810 * noexec = on|off
21811 *
21812 @@ -22,32 +21,26 @@ static int __init noexec_setup(char *str
21813 if (!str)
21814 return -EINVAL;
21815 if (!strncmp(str, "on", 2)) {
21816 - __supported_pte_mask |= _PAGE_NX;
21817 - disable_nx = 0;
21818 + nx_enabled = 1;
21819 } else if (!strncmp(str, "off", 3)) {
21820 - disable_nx = 1;
21821 - __supported_pte_mask &= ~_PAGE_NX;
21822 + nx_enabled = 0;
21823 }
21824 return 0;
21825 }
21826 early_param("noexec", noexec_setup);
21827 #endif
21828 +#endif
21829
21830 #ifdef CONFIG_X86_PAE
21831 void __init set_nx(void)
21832 {
21833 - unsigned int v[4], l, h;
21834 + if (!nx_enabled && cpu_has_nx) {
21835 + unsigned l, h;
21836
21837 - if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
21838 - cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
21839 -
21840 - if ((v[3] & (1 << 20)) && !disable_nx) {
21841 - rdmsr(MSR_EFER, l, h);
21842 - l |= EFER_NX;
21843 - wrmsr(MSR_EFER, l, h);
21844 - nx_enabled = 1;
21845 - __supported_pte_mask |= _PAGE_NX;
21846 - }
21847 + __supported_pte_mask &= ~_PAGE_NX;
21848 + rdmsr(MSR_EFER, l, h);
21849 + l &= ~EFER_NX;
21850 + wrmsr(MSR_EFER, l, h);
21851 }
21852 }
21853 #else
21854 @@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
21855 unsigned long efer;
21856
21857 rdmsrl(MSR_EFER, efer);
21858 - if (!(efer & EFER_NX) || disable_nx)
21859 + if (!(efer & EFER_NX) || !nx_enabled)
21860 __supported_pte_mask &= ~_PAGE_NX;
21861 }
21862 #endif
21863 diff -urNp linux-2.6.32.41/arch/x86/mm/tlb.c linux-2.6.32.41/arch/x86/mm/tlb.c
21864 --- linux-2.6.32.41/arch/x86/mm/tlb.c 2011-03-27 14:31:47.000000000 -0400
21865 +++ linux-2.6.32.41/arch/x86/mm/tlb.c 2011-04-23 12:56:10.000000000 -0400
21866 @@ -61,7 +61,11 @@ void leave_mm(int cpu)
21867 BUG();
21868 cpumask_clear_cpu(cpu,
21869 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
21870 +
21871 +#ifndef CONFIG_PAX_PER_CPU_PGD
21872 load_cr3(swapper_pg_dir);
21873 +#endif
21874 +
21875 }
21876 EXPORT_SYMBOL_GPL(leave_mm);
21877
21878 diff -urNp linux-2.6.32.41/arch/x86/oprofile/backtrace.c linux-2.6.32.41/arch/x86/oprofile/backtrace.c
21879 --- linux-2.6.32.41/arch/x86/oprofile/backtrace.c 2011-03-27 14:31:47.000000000 -0400
21880 +++ linux-2.6.32.41/arch/x86/oprofile/backtrace.c 2011-04-17 15:56:46.000000000 -0400
21881 @@ -57,7 +57,7 @@ static struct frame_head *dump_user_back
21882 struct frame_head bufhead[2];
21883
21884 /* Also check accessibility of one struct frame_head beyond */
21885 - if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
21886 + if (!__access_ok(VERIFY_READ, head, sizeof(bufhead)))
21887 return NULL;
21888 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
21889 return NULL;
21890 @@ -77,7 +77,7 @@ x86_backtrace(struct pt_regs * const reg
21891 {
21892 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
21893
21894 - if (!user_mode_vm(regs)) {
21895 + if (!user_mode(regs)) {
21896 unsigned long stack = kernel_stack_pointer(regs);
21897 if (depth)
21898 dump_trace(NULL, regs, (unsigned long *)stack, 0,
21899 diff -urNp linux-2.6.32.41/arch/x86/oprofile/op_model_p4.c linux-2.6.32.41/arch/x86/oprofile/op_model_p4.c
21900 --- linux-2.6.32.41/arch/x86/oprofile/op_model_p4.c 2011-03-27 14:31:47.000000000 -0400
21901 +++ linux-2.6.32.41/arch/x86/oprofile/op_model_p4.c 2011-04-17 15:56:46.000000000 -0400
21902 @@ -50,7 +50,7 @@ static inline void setup_num_counters(vo
21903 #endif
21904 }
21905
21906 -static int inline addr_increment(void)
21907 +static inline int addr_increment(void)
21908 {
21909 #ifdef CONFIG_SMP
21910 return smp_num_siblings == 2 ? 2 : 1;
21911 diff -urNp linux-2.6.32.41/arch/x86/pci/common.c linux-2.6.32.41/arch/x86/pci/common.c
21912 --- linux-2.6.32.41/arch/x86/pci/common.c 2011-03-27 14:31:47.000000000 -0400
21913 +++ linux-2.6.32.41/arch/x86/pci/common.c 2011-04-23 12:56:10.000000000 -0400
21914 @@ -31,8 +31,8 @@ int noioapicreroute = 1;
21915 int pcibios_last_bus = -1;
21916 unsigned long pirq_table_addr;
21917 struct pci_bus *pci_root_bus;
21918 -struct pci_raw_ops *raw_pci_ops;
21919 -struct pci_raw_ops *raw_pci_ext_ops;
21920 +const struct pci_raw_ops *raw_pci_ops;
21921 +const struct pci_raw_ops *raw_pci_ext_ops;
21922
21923 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
21924 int reg, int len, u32 *val)
21925 diff -urNp linux-2.6.32.41/arch/x86/pci/direct.c linux-2.6.32.41/arch/x86/pci/direct.c
21926 --- linux-2.6.32.41/arch/x86/pci/direct.c 2011-03-27 14:31:47.000000000 -0400
21927 +++ linux-2.6.32.41/arch/x86/pci/direct.c 2011-04-17 15:56:46.000000000 -0400
21928 @@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int
21929
21930 #undef PCI_CONF1_ADDRESS
21931
21932 -struct pci_raw_ops pci_direct_conf1 = {
21933 +const struct pci_raw_ops pci_direct_conf1 = {
21934 .read = pci_conf1_read,
21935 .write = pci_conf1_write,
21936 };
21937 @@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int
21938
21939 #undef PCI_CONF2_ADDRESS
21940
21941 -struct pci_raw_ops pci_direct_conf2 = {
21942 +const struct pci_raw_ops pci_direct_conf2 = {
21943 .read = pci_conf2_read,
21944 .write = pci_conf2_write,
21945 };
21946 @@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
21947 * This should be close to trivial, but it isn't, because there are buggy
21948 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
21949 */
21950 -static int __init pci_sanity_check(struct pci_raw_ops *o)
21951 +static int __init pci_sanity_check(const struct pci_raw_ops *o)
21952 {
21953 u32 x = 0;
21954 int year, devfn;
21955 diff -urNp linux-2.6.32.41/arch/x86/pci/mmconfig_32.c linux-2.6.32.41/arch/x86/pci/mmconfig_32.c
21956 --- linux-2.6.32.41/arch/x86/pci/mmconfig_32.c 2011-03-27 14:31:47.000000000 -0400
21957 +++ linux-2.6.32.41/arch/x86/pci/mmconfig_32.c 2011-04-17 15:56:46.000000000 -0400
21958 @@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int
21959 return 0;
21960 }
21961
21962 -static struct pci_raw_ops pci_mmcfg = {
21963 +static const struct pci_raw_ops pci_mmcfg = {
21964 .read = pci_mmcfg_read,
21965 .write = pci_mmcfg_write,
21966 };
21967 diff -urNp linux-2.6.32.41/arch/x86/pci/mmconfig_64.c linux-2.6.32.41/arch/x86/pci/mmconfig_64.c
21968 --- linux-2.6.32.41/arch/x86/pci/mmconfig_64.c 2011-03-27 14:31:47.000000000 -0400
21969 +++ linux-2.6.32.41/arch/x86/pci/mmconfig_64.c 2011-04-17 15:56:46.000000000 -0400
21970 @@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int
21971 return 0;
21972 }
21973
21974 -static struct pci_raw_ops pci_mmcfg = {
21975 +static const struct pci_raw_ops pci_mmcfg = {
21976 .read = pci_mmcfg_read,
21977 .write = pci_mmcfg_write,
21978 };
21979 diff -urNp linux-2.6.32.41/arch/x86/pci/numaq_32.c linux-2.6.32.41/arch/x86/pci/numaq_32.c
21980 --- linux-2.6.32.41/arch/x86/pci/numaq_32.c 2011-03-27 14:31:47.000000000 -0400
21981 +++ linux-2.6.32.41/arch/x86/pci/numaq_32.c 2011-04-17 15:56:46.000000000 -0400
21982 @@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned i
21983
21984 #undef PCI_CONF1_MQ_ADDRESS
21985
21986 -static struct pci_raw_ops pci_direct_conf1_mq = {
21987 +static const struct pci_raw_ops pci_direct_conf1_mq = {
21988 .read = pci_conf1_mq_read,
21989 .write = pci_conf1_mq_write
21990 };
21991 diff -urNp linux-2.6.32.41/arch/x86/pci/olpc.c linux-2.6.32.41/arch/x86/pci/olpc.c
21992 --- linux-2.6.32.41/arch/x86/pci/olpc.c 2011-03-27 14:31:47.000000000 -0400
21993 +++ linux-2.6.32.41/arch/x86/pci/olpc.c 2011-04-17 15:56:46.000000000 -0400
21994 @@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int s
21995 return 0;
21996 }
21997
21998 -static struct pci_raw_ops pci_olpc_conf = {
21999 +static const struct pci_raw_ops pci_olpc_conf = {
22000 .read = pci_olpc_read,
22001 .write = pci_olpc_write,
22002 };
22003 diff -urNp linux-2.6.32.41/arch/x86/pci/pcbios.c linux-2.6.32.41/arch/x86/pci/pcbios.c
22004 --- linux-2.6.32.41/arch/x86/pci/pcbios.c 2011-03-27 14:31:47.000000000 -0400
22005 +++ linux-2.6.32.41/arch/x86/pci/pcbios.c 2011-04-17 15:56:46.000000000 -0400
22006 @@ -56,50 +56,93 @@ union bios32 {
22007 static struct {
22008 unsigned long address;
22009 unsigned short segment;
22010 -} bios32_indirect = { 0, __KERNEL_CS };
22011 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
22012
22013 /*
22014 * Returns the entry point for the given service, NULL on error
22015 */
22016
22017 -static unsigned long bios32_service(unsigned long service)
22018 +static unsigned long __devinit bios32_service(unsigned long service)
22019 {
22020 unsigned char return_code; /* %al */
22021 unsigned long address; /* %ebx */
22022 unsigned long length; /* %ecx */
22023 unsigned long entry; /* %edx */
22024 unsigned long flags;
22025 + struct desc_struct d, *gdt;
22026
22027 local_irq_save(flags);
22028 - __asm__("lcall *(%%edi); cld"
22029 +
22030 + gdt = get_cpu_gdt_table(smp_processor_id());
22031 +
22032 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
22033 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
22034 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
22035 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
22036 +
22037 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
22038 : "=a" (return_code),
22039 "=b" (address),
22040 "=c" (length),
22041 "=d" (entry)
22042 : "0" (service),
22043 "1" (0),
22044 - "D" (&bios32_indirect));
22045 + "D" (&bios32_indirect),
22046 + "r"(__PCIBIOS_DS)
22047 + : "memory");
22048 +
22049 + pax_open_kernel();
22050 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
22051 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
22052 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
22053 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
22054 + pax_close_kernel();
22055 +
22056 local_irq_restore(flags);
22057
22058 switch (return_code) {
22059 - case 0:
22060 - return address + entry;
22061 - case 0x80: /* Not present */
22062 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
22063 - return 0;
22064 - default: /* Shouldn't happen */
22065 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
22066 - service, return_code);
22067 + case 0: {
22068 + int cpu;
22069 + unsigned char flags;
22070 +
22071 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
22072 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
22073 + printk(KERN_WARNING "bios32_service: not valid\n");
22074 return 0;
22075 + }
22076 + address = address + PAGE_OFFSET;
22077 + length += 16UL; /* some BIOSs underreport this... */
22078 + flags = 4;
22079 + if (length >= 64*1024*1024) {
22080 + length >>= PAGE_SHIFT;
22081 + flags |= 8;
22082 + }
22083 +
22084 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
22085 + gdt = get_cpu_gdt_table(cpu);
22086 + pack_descriptor(&d, address, length, 0x9b, flags);
22087 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
22088 + pack_descriptor(&d, address, length, 0x93, flags);
22089 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
22090 + }
22091 + return entry;
22092 + }
22093 + case 0x80: /* Not present */
22094 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
22095 + return 0;
22096 + default: /* Shouldn't happen */
22097 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
22098 + service, return_code);
22099 + return 0;
22100 }
22101 }
22102
22103 static struct {
22104 unsigned long address;
22105 unsigned short segment;
22106 -} pci_indirect = { 0, __KERNEL_CS };
22107 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
22108
22109 -static int pci_bios_present;
22110 +static int pci_bios_present __read_only;
22111
22112 static int __devinit check_pcibios(void)
22113 {
22114 @@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
22115 unsigned long flags, pcibios_entry;
22116
22117 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
22118 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
22119 + pci_indirect.address = pcibios_entry;
22120
22121 local_irq_save(flags);
22122 - __asm__(
22123 - "lcall *(%%edi); cld\n\t"
22124 + __asm__("movw %w6, %%ds\n\t"
22125 + "lcall *%%ss:(%%edi); cld\n\t"
22126 + "push %%ss\n\t"
22127 + "pop %%ds\n\t"
22128 "jc 1f\n\t"
22129 "xor %%ah, %%ah\n"
22130 "1:"
22131 @@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
22132 "=b" (ebx),
22133 "=c" (ecx)
22134 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
22135 - "D" (&pci_indirect)
22136 + "D" (&pci_indirect),
22137 + "r" (__PCIBIOS_DS)
22138 : "memory");
22139 local_irq_restore(flags);
22140
22141 @@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int se
22142
22143 switch (len) {
22144 case 1:
22145 - __asm__("lcall *(%%esi); cld\n\t"
22146 + __asm__("movw %w6, %%ds\n\t"
22147 + "lcall *%%ss:(%%esi); cld\n\t"
22148 + "push %%ss\n\t"
22149 + "pop %%ds\n\t"
22150 "jc 1f\n\t"
22151 "xor %%ah, %%ah\n"
22152 "1:"
22153 @@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int se
22154 : "1" (PCIBIOS_READ_CONFIG_BYTE),
22155 "b" (bx),
22156 "D" ((long)reg),
22157 - "S" (&pci_indirect));
22158 + "S" (&pci_indirect),
22159 + "r" (__PCIBIOS_DS));
22160 /*
22161 * Zero-extend the result beyond 8 bits, do not trust the
22162 * BIOS having done it:
22163 @@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int se
22164 *value &= 0xff;
22165 break;
22166 case 2:
22167 - __asm__("lcall *(%%esi); cld\n\t"
22168 + __asm__("movw %w6, %%ds\n\t"
22169 + "lcall *%%ss:(%%esi); cld\n\t"
22170 + "push %%ss\n\t"
22171 + "pop %%ds\n\t"
22172 "jc 1f\n\t"
22173 "xor %%ah, %%ah\n"
22174 "1:"
22175 @@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int se
22176 : "1" (PCIBIOS_READ_CONFIG_WORD),
22177 "b" (bx),
22178 "D" ((long)reg),
22179 - "S" (&pci_indirect));
22180 + "S" (&pci_indirect),
22181 + "r" (__PCIBIOS_DS));
22182 /*
22183 * Zero-extend the result beyond 16 bits, do not trust the
22184 * BIOS having done it:
22185 @@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int se
22186 *value &= 0xffff;
22187 break;
22188 case 4:
22189 - __asm__("lcall *(%%esi); cld\n\t"
22190 + __asm__("movw %w6, %%ds\n\t"
22191 + "lcall *%%ss:(%%esi); cld\n\t"
22192 + "push %%ss\n\t"
22193 + "pop %%ds\n\t"
22194 "jc 1f\n\t"
22195 "xor %%ah, %%ah\n"
22196 "1:"
22197 @@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int se
22198 : "1" (PCIBIOS_READ_CONFIG_DWORD),
22199 "b" (bx),
22200 "D" ((long)reg),
22201 - "S" (&pci_indirect));
22202 + "S" (&pci_indirect),
22203 + "r" (__PCIBIOS_DS));
22204 break;
22205 }
22206
22207 @@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int s
22208
22209 switch (len) {
22210 case 1:
22211 - __asm__("lcall *(%%esi); cld\n\t"
22212 + __asm__("movw %w6, %%ds\n\t"
22213 + "lcall *%%ss:(%%esi); cld\n\t"
22214 + "push %%ss\n\t"
22215 + "pop %%ds\n\t"
22216 "jc 1f\n\t"
22217 "xor %%ah, %%ah\n"
22218 "1:"
22219 @@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int s
22220 "c" (value),
22221 "b" (bx),
22222 "D" ((long)reg),
22223 - "S" (&pci_indirect));
22224 + "S" (&pci_indirect),
22225 + "r" (__PCIBIOS_DS));
22226 break;
22227 case 2:
22228 - __asm__("lcall *(%%esi); cld\n\t"
22229 + __asm__("movw %w6, %%ds\n\t"
22230 + "lcall *%%ss:(%%esi); cld\n\t"
22231 + "push %%ss\n\t"
22232 + "pop %%ds\n\t"
22233 "jc 1f\n\t"
22234 "xor %%ah, %%ah\n"
22235 "1:"
22236 @@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int s
22237 "c" (value),
22238 "b" (bx),
22239 "D" ((long)reg),
22240 - "S" (&pci_indirect));
22241 + "S" (&pci_indirect),
22242 + "r" (__PCIBIOS_DS));
22243 break;
22244 case 4:
22245 - __asm__("lcall *(%%esi); cld\n\t"
22246 + __asm__("movw %w6, %%ds\n\t"
22247 + "lcall *%%ss:(%%esi); cld\n\t"
22248 + "push %%ss\n\t"
22249 + "pop %%ds\n\t"
22250 "jc 1f\n\t"
22251 "xor %%ah, %%ah\n"
22252 "1:"
22253 @@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int s
22254 "c" (value),
22255 "b" (bx),
22256 "D" ((long)reg),
22257 - "S" (&pci_indirect));
22258 + "S" (&pci_indirect),
22259 + "r" (__PCIBIOS_DS));
22260 break;
22261 }
22262
22263 @@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int s
22264 * Function table for BIOS32 access
22265 */
22266
22267 -static struct pci_raw_ops pci_bios_access = {
22268 +static const struct pci_raw_ops pci_bios_access = {
22269 .read = pci_bios_read,
22270 .write = pci_bios_write
22271 };
22272 @@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_acces
22273 * Try to find PCI BIOS.
22274 */
22275
22276 -static struct pci_raw_ops * __devinit pci_find_bios(void)
22277 +static const struct pci_raw_ops * __devinit pci_find_bios(void)
22278 {
22279 union bios32 *check;
22280 unsigned char sum;
22281 @@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_i
22282
22283 DBG("PCI: Fetching IRQ routing table... ");
22284 __asm__("push %%es\n\t"
22285 + "movw %w8, %%ds\n\t"
22286 "push %%ds\n\t"
22287 "pop %%es\n\t"
22288 - "lcall *(%%esi); cld\n\t"
22289 + "lcall *%%ss:(%%esi); cld\n\t"
22290 "pop %%es\n\t"
22291 + "push %%ss\n\t"
22292 + "pop %%ds\n"
22293 "jc 1f\n\t"
22294 "xor %%ah, %%ah\n"
22295 "1:"
22296 @@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_i
22297 "1" (0),
22298 "D" ((long) &opt),
22299 "S" (&pci_indirect),
22300 - "m" (opt)
22301 + "m" (opt),
22302 + "r" (__PCIBIOS_DS)
22303 : "memory");
22304 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
22305 if (ret & 0xff00)
22306 @@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_d
22307 {
22308 int ret;
22309
22310 - __asm__("lcall *(%%esi); cld\n\t"
22311 + __asm__("movw %w5, %%ds\n\t"
22312 + "lcall *%%ss:(%%esi); cld\n\t"
22313 + "push %%ss\n\t"
22314 + "pop %%ds\n"
22315 "jc 1f\n\t"
22316 "xor %%ah, %%ah\n"
22317 "1:"
22318 @@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_d
22319 : "0" (PCIBIOS_SET_PCI_HW_INT),
22320 "b" ((dev->bus->number << 8) | dev->devfn),
22321 "c" ((irq << 8) | (pin + 10)),
22322 - "S" (&pci_indirect));
22323 + "S" (&pci_indirect),
22324 + "r" (__PCIBIOS_DS));
22325 return !(ret & 0xff00);
22326 }
22327 EXPORT_SYMBOL(pcibios_set_irq_routing);
22328 diff -urNp linux-2.6.32.41/arch/x86/power/cpu.c linux-2.6.32.41/arch/x86/power/cpu.c
22329 --- linux-2.6.32.41/arch/x86/power/cpu.c 2011-03-27 14:31:47.000000000 -0400
22330 +++ linux-2.6.32.41/arch/x86/power/cpu.c 2011-04-17 15:56:46.000000000 -0400
22331 @@ -129,7 +129,7 @@ static void do_fpu_end(void)
22332 static void fix_processor_context(void)
22333 {
22334 int cpu = smp_processor_id();
22335 - struct tss_struct *t = &per_cpu(init_tss, cpu);
22336 + struct tss_struct *t = init_tss + cpu;
22337
22338 set_tss_desc(cpu, t); /*
22339 * This just modifies memory; should not be
22340 @@ -139,7 +139,9 @@ static void fix_processor_context(void)
22341 */
22342
22343 #ifdef CONFIG_X86_64
22344 + pax_open_kernel();
22345 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
22346 + pax_close_kernel();
22347
22348 syscall_init(); /* This sets MSR_*STAR and related */
22349 #endif
22350 diff -urNp linux-2.6.32.41/arch/x86/vdso/Makefile linux-2.6.32.41/arch/x86/vdso/Makefile
22351 --- linux-2.6.32.41/arch/x86/vdso/Makefile 2011-03-27 14:31:47.000000000 -0400
22352 +++ linux-2.6.32.41/arch/x86/vdso/Makefile 2011-04-17 15:56:46.000000000 -0400
22353 @@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
22354 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
22355 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
22356
22357 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
22358 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
22359 GCOV_PROFILE := n
22360
22361 #
22362 diff -urNp linux-2.6.32.41/arch/x86/vdso/vclock_gettime.c linux-2.6.32.41/arch/x86/vdso/vclock_gettime.c
22363 --- linux-2.6.32.41/arch/x86/vdso/vclock_gettime.c 2011-03-27 14:31:47.000000000 -0400
22364 +++ linux-2.6.32.41/arch/x86/vdso/vclock_gettime.c 2011-04-17 15:56:46.000000000 -0400
22365 @@ -22,24 +22,48 @@
22366 #include <asm/hpet.h>
22367 #include <asm/unistd.h>
22368 #include <asm/io.h>
22369 +#include <asm/fixmap.h>
22370 #include "vextern.h"
22371
22372 #define gtod vdso_vsyscall_gtod_data
22373
22374 +notrace noinline long __vdso_fallback_time(long *t)
22375 +{
22376 + long secs;
22377 + asm volatile("syscall"
22378 + : "=a" (secs)
22379 + : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
22380 + return secs;
22381 +}
22382 +
22383 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
22384 {
22385 long ret;
22386 asm("syscall" : "=a" (ret) :
22387 - "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
22388 + "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
22389 return ret;
22390 }
22391
22392 +notrace static inline cycle_t __vdso_vread_hpet(void)
22393 +{
22394 + return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
22395 +}
22396 +
22397 +notrace static inline cycle_t __vdso_vread_tsc(void)
22398 +{
22399 + cycle_t ret = (cycle_t)vget_cycles();
22400 +
22401 + return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
22402 +}
22403 +
22404 notrace static inline long vgetns(void)
22405 {
22406 long v;
22407 - cycles_t (*vread)(void);
22408 - vread = gtod->clock.vread;
22409 - v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
22410 + if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
22411 + v = __vdso_vread_tsc();
22412 + else
22413 + v = __vdso_vread_hpet();
22414 + v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
22415 return (v * gtod->clock.mult) >> gtod->clock.shift;
22416 }
22417
22418 @@ -113,7 +137,9 @@ notrace static noinline int do_monotonic
22419
22420 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
22421 {
22422 - if (likely(gtod->sysctl_enabled))
22423 + if (likely(gtod->sysctl_enabled &&
22424 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
22425 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
22426 switch (clock) {
22427 case CLOCK_REALTIME:
22428 if (likely(gtod->clock.vread))
22429 @@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid
22430 int clock_gettime(clockid_t, struct timespec *)
22431 __attribute__((weak, alias("__vdso_clock_gettime")));
22432
22433 -notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
22434 +notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
22435 {
22436 long ret;
22437 - if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
22438 + asm("syscall" : "=a" (ret) :
22439 + "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
22440 + return ret;
22441 +}
22442 +
22443 +notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
22444 +{
22445 + if (likely(gtod->sysctl_enabled &&
22446 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
22447 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
22448 + {
22449 if (likely(tv != NULL)) {
22450 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
22451 offsetof(struct timespec, tv_nsec) ||
22452 @@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct t
22453 }
22454 return 0;
22455 }
22456 - asm("syscall" : "=a" (ret) :
22457 - "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
22458 - return ret;
22459 + return __vdso_fallback_gettimeofday(tv, tz);
22460 }
22461 int gettimeofday(struct timeval *, struct timezone *)
22462 __attribute__((weak, alias("__vdso_gettimeofday")));
22463 diff -urNp linux-2.6.32.41/arch/x86/vdso/vdso32-setup.c linux-2.6.32.41/arch/x86/vdso/vdso32-setup.c
22464 --- linux-2.6.32.41/arch/x86/vdso/vdso32-setup.c 2011-03-27 14:31:47.000000000 -0400
22465 +++ linux-2.6.32.41/arch/x86/vdso/vdso32-setup.c 2011-04-23 12:56:10.000000000 -0400
22466 @@ -25,6 +25,7 @@
22467 #include <asm/tlbflush.h>
22468 #include <asm/vdso.h>
22469 #include <asm/proto.h>
22470 +#include <asm/mman.h>
22471
22472 enum {
22473 VDSO_DISABLED = 0,
22474 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
22475 void enable_sep_cpu(void)
22476 {
22477 int cpu = get_cpu();
22478 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
22479 + struct tss_struct *tss = init_tss + cpu;
22480
22481 if (!boot_cpu_has(X86_FEATURE_SEP)) {
22482 put_cpu();
22483 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
22484 gate_vma.vm_start = FIXADDR_USER_START;
22485 gate_vma.vm_end = FIXADDR_USER_END;
22486 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
22487 - gate_vma.vm_page_prot = __P101;
22488 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
22489 /*
22490 * Make sure the vDSO gets into every core dump.
22491 * Dumping its contents makes post-mortem fully interpretable later
22492 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
22493 if (compat)
22494 addr = VDSO_HIGH_BASE;
22495 else {
22496 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
22497 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
22498 if (IS_ERR_VALUE(addr)) {
22499 ret = addr;
22500 goto up_fail;
22501 }
22502 }
22503
22504 - current->mm->context.vdso = (void *)addr;
22505 + current->mm->context.vdso = addr;
22506
22507 if (compat_uses_vma || !compat) {
22508 /*
22509 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
22510 }
22511
22512 current_thread_info()->sysenter_return =
22513 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
22514 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
22515
22516 up_fail:
22517 if (ret)
22518 - current->mm->context.vdso = NULL;
22519 + current->mm->context.vdso = 0;
22520
22521 up_write(&mm->mmap_sem);
22522
22523 @@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
22524
22525 const char *arch_vma_name(struct vm_area_struct *vma)
22526 {
22527 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
22528 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
22529 return "[vdso]";
22530 +
22531 +#ifdef CONFIG_PAX_SEGMEXEC
22532 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
22533 + return "[vdso]";
22534 +#endif
22535 +
22536 return NULL;
22537 }
22538
22539 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
22540 struct mm_struct *mm = tsk->mm;
22541
22542 /* Check to see if this task was created in compat vdso mode */
22543 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
22544 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
22545 return &gate_vma;
22546 return NULL;
22547 }
22548 diff -urNp linux-2.6.32.41/arch/x86/vdso/vdso.lds.S linux-2.6.32.41/arch/x86/vdso/vdso.lds.S
22549 --- linux-2.6.32.41/arch/x86/vdso/vdso.lds.S 2011-03-27 14:31:47.000000000 -0400
22550 +++ linux-2.6.32.41/arch/x86/vdso/vdso.lds.S 2011-06-06 17:35:35.000000000 -0400
22551 @@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
22552 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
22553 #include "vextern.h"
22554 #undef VEXTERN
22555 +
22556 +#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
22557 +VEXTERN(fallback_gettimeofday)
22558 +VEXTERN(fallback_time)
22559 +VEXTERN(getcpu)
22560 +#undef VEXTERN
22561 diff -urNp linux-2.6.32.41/arch/x86/vdso/vextern.h linux-2.6.32.41/arch/x86/vdso/vextern.h
22562 --- linux-2.6.32.41/arch/x86/vdso/vextern.h 2011-03-27 14:31:47.000000000 -0400
22563 +++ linux-2.6.32.41/arch/x86/vdso/vextern.h 2011-04-17 15:56:46.000000000 -0400
22564 @@ -11,6 +11,5 @@
22565 put into vextern.h and be referenced as a pointer with vdso prefix.
22566 The main kernel later fills in the values. */
22567
22568 -VEXTERN(jiffies)
22569 VEXTERN(vgetcpu_mode)
22570 VEXTERN(vsyscall_gtod_data)
22571 diff -urNp linux-2.6.32.41/arch/x86/vdso/vma.c linux-2.6.32.41/arch/x86/vdso/vma.c
22572 --- linux-2.6.32.41/arch/x86/vdso/vma.c 2011-03-27 14:31:47.000000000 -0400
22573 +++ linux-2.6.32.41/arch/x86/vdso/vma.c 2011-04-17 15:56:46.000000000 -0400
22574 @@ -57,7 +57,7 @@ static int __init init_vdso_vars(void)
22575 if (!vbase)
22576 goto oom;
22577
22578 - if (memcmp(vbase, "\177ELF", 4)) {
22579 + if (memcmp(vbase, ELFMAG, SELFMAG)) {
22580 printk("VDSO: I'm broken; not ELF\n");
22581 vdso_enabled = 0;
22582 }
22583 @@ -66,6 +66,7 @@ static int __init init_vdso_vars(void)
22584 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
22585 #include "vextern.h"
22586 #undef VEXTERN
22587 + vunmap(vbase);
22588 return 0;
22589
22590 oom:
22591 @@ -116,7 +117,7 @@ int arch_setup_additional_pages(struct l
22592 goto up_fail;
22593 }
22594
22595 - current->mm->context.vdso = (void *)addr;
22596 + current->mm->context.vdso = addr;
22597
22598 ret = install_special_mapping(mm, addr, vdso_size,
22599 VM_READ|VM_EXEC|
22600 @@ -124,7 +125,7 @@ int arch_setup_additional_pages(struct l
22601 VM_ALWAYSDUMP,
22602 vdso_pages);
22603 if (ret) {
22604 - current->mm->context.vdso = NULL;
22605 + current->mm->context.vdso = 0;
22606 goto up_fail;
22607 }
22608
22609 @@ -132,10 +133,3 @@ up_fail:
22610 up_write(&mm->mmap_sem);
22611 return ret;
22612 }
22613 -
22614 -static __init int vdso_setup(char *s)
22615 -{
22616 - vdso_enabled = simple_strtoul(s, NULL, 0);
22617 - return 0;
22618 -}
22619 -__setup("vdso=", vdso_setup);
22620 diff -urNp linux-2.6.32.41/arch/x86/xen/enlighten.c linux-2.6.32.41/arch/x86/xen/enlighten.c
22621 --- linux-2.6.32.41/arch/x86/xen/enlighten.c 2011-03-27 14:31:47.000000000 -0400
22622 +++ linux-2.6.32.41/arch/x86/xen/enlighten.c 2011-05-22 23:02:03.000000000 -0400
22623 @@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
22624
22625 struct shared_info xen_dummy_shared_info;
22626
22627 -void *xen_initial_gdt;
22628 -
22629 /*
22630 * Point at some empty memory to start with. We map the real shared_info
22631 * page as soon as fixmap is up and running.
22632 @@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_des
22633
22634 preempt_disable();
22635
22636 - start = __get_cpu_var(idt_desc).address;
22637 + start = (unsigned long)__get_cpu_var(idt_desc).address;
22638 end = start + __get_cpu_var(idt_desc).size + 1;
22639
22640 xen_mc_flush();
22641 @@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic
22642 #endif
22643 };
22644
22645 -static void xen_reboot(int reason)
22646 +static __noreturn void xen_reboot(int reason)
22647 {
22648 struct sched_shutdown r = { .reason = reason };
22649
22650 @@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
22651 BUG();
22652 }
22653
22654 -static void xen_restart(char *msg)
22655 +static __noreturn void xen_restart(char *msg)
22656 {
22657 xen_reboot(SHUTDOWN_reboot);
22658 }
22659
22660 -static void xen_emergency_restart(void)
22661 +static __noreturn void xen_emergency_restart(void)
22662 {
22663 xen_reboot(SHUTDOWN_reboot);
22664 }
22665
22666 -static void xen_machine_halt(void)
22667 +static __noreturn void xen_machine_halt(void)
22668 {
22669 xen_reboot(SHUTDOWN_poweroff);
22670 }
22671 @@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(
22672 */
22673 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
22674
22675 -#ifdef CONFIG_X86_64
22676 /* Work out if we support NX */
22677 - check_efer();
22678 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
22679 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
22680 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
22681 + unsigned l, h;
22682 +
22683 +#ifdef CONFIG_X86_PAE
22684 + nx_enabled = 1;
22685 +#endif
22686 + __supported_pte_mask |= _PAGE_NX;
22687 + rdmsr(MSR_EFER, l, h);
22688 + l |= EFER_NX;
22689 + wrmsr(MSR_EFER, l, h);
22690 + }
22691 #endif
22692
22693 xen_setup_features();
22694 @@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(
22695
22696 machine_ops = xen_machine_ops;
22697
22698 - /*
22699 - * The only reliable way to retain the initial address of the
22700 - * percpu gdt_page is to remember it here, so we can go and
22701 - * mark it RW later, when the initial percpu area is freed.
22702 - */
22703 - xen_initial_gdt = &per_cpu(gdt_page, 0);
22704 -
22705 xen_smp_init();
22706
22707 pgd = (pgd_t *)xen_start_info->pt_base;
22708 diff -urNp linux-2.6.32.41/arch/x86/xen/mmu.c linux-2.6.32.41/arch/x86/xen/mmu.c
22709 --- linux-2.6.32.41/arch/x86/xen/mmu.c 2011-03-27 14:31:47.000000000 -0400
22710 +++ linux-2.6.32.41/arch/x86/xen/mmu.c 2011-04-17 15:56:46.000000000 -0400
22711 @@ -1714,6 +1714,8 @@ __init pgd_t *xen_setup_kernel_pagetable
22712 convert_pfn_mfn(init_level4_pgt);
22713 convert_pfn_mfn(level3_ident_pgt);
22714 convert_pfn_mfn(level3_kernel_pgt);
22715 + convert_pfn_mfn(level3_vmalloc_pgt);
22716 + convert_pfn_mfn(level3_vmemmap_pgt);
22717
22718 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
22719 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
22720 @@ -1732,7 +1734,10 @@ __init pgd_t *xen_setup_kernel_pagetable
22721 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
22722 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
22723 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
22724 + set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
22725 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
22726 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
22727 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
22728 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
22729 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
22730
22731 diff -urNp linux-2.6.32.41/arch/x86/xen/smp.c linux-2.6.32.41/arch/x86/xen/smp.c
22732 --- linux-2.6.32.41/arch/x86/xen/smp.c 2011-03-27 14:31:47.000000000 -0400
22733 +++ linux-2.6.32.41/arch/x86/xen/smp.c 2011-05-11 18:25:15.000000000 -0400
22734 @@ -167,11 +167,6 @@ static void __init xen_smp_prepare_boot_
22735 {
22736 BUG_ON(smp_processor_id() != 0);
22737 native_smp_prepare_boot_cpu();
22738 -
22739 - /* We've switched to the "real" per-cpu gdt, so make sure the
22740 - old memory can be recycled */
22741 - make_lowmem_page_readwrite(xen_initial_gdt);
22742 -
22743 xen_setup_vcpu_info_placement();
22744 }
22745
22746 @@ -231,12 +226,12 @@ cpu_initialize_context(unsigned int cpu,
22747 gdt = get_cpu_gdt_table(cpu);
22748
22749 ctxt->flags = VGCF_IN_KERNEL;
22750 - ctxt->user_regs.ds = __USER_DS;
22751 - ctxt->user_regs.es = __USER_DS;
22752 + ctxt->user_regs.ds = __KERNEL_DS;
22753 + ctxt->user_regs.es = __KERNEL_DS;
22754 ctxt->user_regs.ss = __KERNEL_DS;
22755 #ifdef CONFIG_X86_32
22756 ctxt->user_regs.fs = __KERNEL_PERCPU;
22757 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
22758 + savesegment(gs, ctxt->user_regs.gs);
22759 #else
22760 ctxt->gs_base_kernel = per_cpu_offset(cpu);
22761 #endif
22762 @@ -287,13 +282,12 @@ static int __cpuinit xen_cpu_up(unsigned
22763 int rc;
22764
22765 per_cpu(current_task, cpu) = idle;
22766 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
22767 #ifdef CONFIG_X86_32
22768 irq_ctx_init(cpu);
22769 #else
22770 clear_tsk_thread_flag(idle, TIF_FORK);
22771 - per_cpu(kernel_stack, cpu) =
22772 - (unsigned long)task_stack_page(idle) -
22773 - KERNEL_STACK_OFFSET + THREAD_SIZE;
22774 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
22775 #endif
22776 xen_setup_runstate_info(cpu);
22777 xen_setup_timer(cpu);
22778 diff -urNp linux-2.6.32.41/arch/x86/xen/xen-asm_32.S linux-2.6.32.41/arch/x86/xen/xen-asm_32.S
22779 --- linux-2.6.32.41/arch/x86/xen/xen-asm_32.S 2011-03-27 14:31:47.000000000 -0400
22780 +++ linux-2.6.32.41/arch/x86/xen/xen-asm_32.S 2011-04-22 19:13:13.000000000 -0400
22781 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
22782 ESP_OFFSET=4 # bytes pushed onto stack
22783
22784 /*
22785 - * Store vcpu_info pointer for easy access. Do it this way to
22786 - * avoid having to reload %fs
22787 + * Store vcpu_info pointer for easy access.
22788 */
22789 #ifdef CONFIG_SMP
22790 - GET_THREAD_INFO(%eax)
22791 - movl TI_cpu(%eax), %eax
22792 - movl __per_cpu_offset(,%eax,4), %eax
22793 - mov per_cpu__xen_vcpu(%eax), %eax
22794 + push %fs
22795 + mov $(__KERNEL_PERCPU), %eax
22796 + mov %eax, %fs
22797 + mov PER_CPU_VAR(xen_vcpu), %eax
22798 + pop %fs
22799 #else
22800 movl per_cpu__xen_vcpu, %eax
22801 #endif
22802 diff -urNp linux-2.6.32.41/arch/x86/xen/xen-head.S linux-2.6.32.41/arch/x86/xen/xen-head.S
22803 --- linux-2.6.32.41/arch/x86/xen/xen-head.S 2011-03-27 14:31:47.000000000 -0400
22804 +++ linux-2.6.32.41/arch/x86/xen/xen-head.S 2011-04-17 15:56:46.000000000 -0400
22805 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
22806 #ifdef CONFIG_X86_32
22807 mov %esi,xen_start_info
22808 mov $init_thread_union+THREAD_SIZE,%esp
22809 +#ifdef CONFIG_SMP
22810 + movl $cpu_gdt_table,%edi
22811 + movl $__per_cpu_load,%eax
22812 + movw %ax,__KERNEL_PERCPU + 2(%edi)
22813 + rorl $16,%eax
22814 + movb %al,__KERNEL_PERCPU + 4(%edi)
22815 + movb %ah,__KERNEL_PERCPU + 7(%edi)
22816 + movl $__per_cpu_end - 1,%eax
22817 + subl $__per_cpu_start,%eax
22818 + movw %ax,__KERNEL_PERCPU + 0(%edi)
22819 +#endif
22820 #else
22821 mov %rsi,xen_start_info
22822 mov $init_thread_union+THREAD_SIZE,%rsp
22823 diff -urNp linux-2.6.32.41/arch/x86/xen/xen-ops.h linux-2.6.32.41/arch/x86/xen/xen-ops.h
22824 --- linux-2.6.32.41/arch/x86/xen/xen-ops.h 2011-03-27 14:31:47.000000000 -0400
22825 +++ linux-2.6.32.41/arch/x86/xen/xen-ops.h 2011-04-17 15:56:46.000000000 -0400
22826 @@ -10,8 +10,6 @@
22827 extern const char xen_hypervisor_callback[];
22828 extern const char xen_failsafe_callback[];
22829
22830 -extern void *xen_initial_gdt;
22831 -
22832 struct trap_info;
22833 void xen_copy_trap_info(struct trap_info *traps);
22834
22835 diff -urNp linux-2.6.32.41/block/blk-integrity.c linux-2.6.32.41/block/blk-integrity.c
22836 --- linux-2.6.32.41/block/blk-integrity.c 2011-03-27 14:31:47.000000000 -0400
22837 +++ linux-2.6.32.41/block/blk-integrity.c 2011-04-17 15:56:46.000000000 -0400
22838 @@ -278,7 +278,7 @@ static struct attribute *integrity_attrs
22839 NULL,
22840 };
22841
22842 -static struct sysfs_ops integrity_ops = {
22843 +static const struct sysfs_ops integrity_ops = {
22844 .show = &integrity_attr_show,
22845 .store = &integrity_attr_store,
22846 };
22847 diff -urNp linux-2.6.32.41/block/blk-iopoll.c linux-2.6.32.41/block/blk-iopoll.c
22848 --- linux-2.6.32.41/block/blk-iopoll.c 2011-03-27 14:31:47.000000000 -0400
22849 +++ linux-2.6.32.41/block/blk-iopoll.c 2011-04-17 15:56:46.000000000 -0400
22850 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
22851 }
22852 EXPORT_SYMBOL(blk_iopoll_complete);
22853
22854 -static void blk_iopoll_softirq(struct softirq_action *h)
22855 +static void blk_iopoll_softirq(void)
22856 {
22857 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
22858 int rearm = 0, budget = blk_iopoll_budget;
22859 diff -urNp linux-2.6.32.41/block/blk-map.c linux-2.6.32.41/block/blk-map.c
22860 --- linux-2.6.32.41/block/blk-map.c 2011-03-27 14:31:47.000000000 -0400
22861 +++ linux-2.6.32.41/block/blk-map.c 2011-04-18 16:57:33.000000000 -0400
22862 @@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct requ
22863 * direct dma. else, set up kernel bounce buffers
22864 */
22865 uaddr = (unsigned long) ubuf;
22866 - if (blk_rq_aligned(q, ubuf, len) && !map_data)
22867 + if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
22868 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
22869 else
22870 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
22871 @@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_q
22872 for (i = 0; i < iov_count; i++) {
22873 unsigned long uaddr = (unsigned long)iov[i].iov_base;
22874
22875 + if (!iov[i].iov_len)
22876 + return -EINVAL;
22877 +
22878 if (uaddr & queue_dma_alignment(q)) {
22879 unaligned = 1;
22880 break;
22881 }
22882 - if (!iov[i].iov_len)
22883 - return -EINVAL;
22884 }
22885
22886 if (unaligned || (q->dma_pad_mask & len) || map_data)
22887 @@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue
22888 if (!len || !kbuf)
22889 return -EINVAL;
22890
22891 - do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
22892 + do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
22893 if (do_copy)
22894 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
22895 else
22896 diff -urNp linux-2.6.32.41/block/blk-softirq.c linux-2.6.32.41/block/blk-softirq.c
22897 --- linux-2.6.32.41/block/blk-softirq.c 2011-03-27 14:31:47.000000000 -0400
22898 +++ linux-2.6.32.41/block/blk-softirq.c 2011-04-17 15:56:46.000000000 -0400
22899 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
22900 * Softirq action handler - move entries to local list and loop over them
22901 * while passing them to the queue registered handler.
22902 */
22903 -static void blk_done_softirq(struct softirq_action *h)
22904 +static void blk_done_softirq(void)
22905 {
22906 struct list_head *cpu_list, local_list;
22907
22908 diff -urNp linux-2.6.32.41/block/blk-sysfs.c linux-2.6.32.41/block/blk-sysfs.c
22909 --- linux-2.6.32.41/block/blk-sysfs.c 2011-05-10 22:12:01.000000000 -0400
22910 +++ linux-2.6.32.41/block/blk-sysfs.c 2011-05-10 22:12:26.000000000 -0400
22911 @@ -414,7 +414,7 @@ static void blk_release_queue(struct kob
22912 kmem_cache_free(blk_requestq_cachep, q);
22913 }
22914
22915 -static struct sysfs_ops queue_sysfs_ops = {
22916 +static const struct sysfs_ops queue_sysfs_ops = {
22917 .show = queue_attr_show,
22918 .store = queue_attr_store,
22919 };
22920 diff -urNp linux-2.6.32.41/block/bsg.c linux-2.6.32.41/block/bsg.c
22921 --- linux-2.6.32.41/block/bsg.c 2011-03-27 14:31:47.000000000 -0400
22922 +++ linux-2.6.32.41/block/bsg.c 2011-04-17 15:56:46.000000000 -0400
22923 @@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
22924 struct sg_io_v4 *hdr, struct bsg_device *bd,
22925 fmode_t has_write_perm)
22926 {
22927 + unsigned char tmpcmd[sizeof(rq->__cmd)];
22928 + unsigned char *cmdptr;
22929 +
22930 if (hdr->request_len > BLK_MAX_CDB) {
22931 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
22932 if (!rq->cmd)
22933 return -ENOMEM;
22934 - }
22935 + cmdptr = rq->cmd;
22936 + } else
22937 + cmdptr = tmpcmd;
22938
22939 - if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
22940 + if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
22941 hdr->request_len))
22942 return -EFAULT;
22943
22944 + if (cmdptr != rq->cmd)
22945 + memcpy(rq->cmd, cmdptr, hdr->request_len);
22946 +
22947 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
22948 if (blk_verify_command(rq->cmd, has_write_perm))
22949 return -EPERM;
22950 diff -urNp linux-2.6.32.41/block/elevator.c linux-2.6.32.41/block/elevator.c
22951 --- linux-2.6.32.41/block/elevator.c 2011-03-27 14:31:47.000000000 -0400
22952 +++ linux-2.6.32.41/block/elevator.c 2011-04-17 15:56:46.000000000 -0400
22953 @@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, str
22954 return error;
22955 }
22956
22957 -static struct sysfs_ops elv_sysfs_ops = {
22958 +static const struct sysfs_ops elv_sysfs_ops = {
22959 .show = elv_attr_show,
22960 .store = elv_attr_store,
22961 };
22962 diff -urNp linux-2.6.32.41/block/scsi_ioctl.c linux-2.6.32.41/block/scsi_ioctl.c
22963 --- linux-2.6.32.41/block/scsi_ioctl.c 2011-03-27 14:31:47.000000000 -0400
22964 +++ linux-2.6.32.41/block/scsi_ioctl.c 2011-04-23 13:28:22.000000000 -0400
22965 @@ -220,8 +220,20 @@ EXPORT_SYMBOL(blk_verify_command);
22966 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
22967 struct sg_io_hdr *hdr, fmode_t mode)
22968 {
22969 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
22970 + unsigned char tmpcmd[sizeof(rq->__cmd)];
22971 + unsigned char *cmdptr;
22972 +
22973 + if (rq->cmd != rq->__cmd)
22974 + cmdptr = rq->cmd;
22975 + else
22976 + cmdptr = tmpcmd;
22977 +
22978 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
22979 return -EFAULT;
22980 +
22981 + if (cmdptr != rq->cmd)
22982 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
22983 +
22984 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
22985 return -EPERM;
22986
22987 @@ -430,6 +442,8 @@ int sg_scsi_ioctl(struct request_queue *
22988 int err;
22989 unsigned int in_len, out_len, bytes, opcode, cmdlen;
22990 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
22991 + unsigned char tmpcmd[sizeof(rq->__cmd)];
22992 + unsigned char *cmdptr;
22993
22994 if (!sic)
22995 return -EINVAL;
22996 @@ -463,9 +477,18 @@ int sg_scsi_ioctl(struct request_queue *
22997 */
22998 err = -EFAULT;
22999 rq->cmd_len = cmdlen;
23000 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
23001 +
23002 + if (rq->cmd != rq->__cmd)
23003 + cmdptr = rq->cmd;
23004 + else
23005 + cmdptr = tmpcmd;
23006 +
23007 + if (copy_from_user(cmdptr, sic->data, cmdlen))
23008 goto error;
23009
23010 + if (rq->cmd != cmdptr)
23011 + memcpy(rq->cmd, cmdptr, cmdlen);
23012 +
23013 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
23014 goto error;
23015
23016 diff -urNp linux-2.6.32.41/crypto/serpent.c linux-2.6.32.41/crypto/serpent.c
23017 --- linux-2.6.32.41/crypto/serpent.c 2011-03-27 14:31:47.000000000 -0400
23018 +++ linux-2.6.32.41/crypto/serpent.c 2011-05-16 21:46:57.000000000 -0400
23019 @@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
23020 u32 r0,r1,r2,r3,r4;
23021 int i;
23022
23023 + pax_track_stack();
23024 +
23025 /* Copy key, add padding */
23026
23027 for (i = 0; i < keylen; ++i)
23028 diff -urNp linux-2.6.32.41/Documentation/dontdiff linux-2.6.32.41/Documentation/dontdiff
23029 --- linux-2.6.32.41/Documentation/dontdiff 2011-03-27 14:31:47.000000000 -0400
23030 +++ linux-2.6.32.41/Documentation/dontdiff 2011-05-18 20:09:36.000000000 -0400
23031 @@ -1,13 +1,16 @@
23032 *.a
23033 *.aux
23034 *.bin
23035 +*.cis
23036 *.cpio
23037 *.csp
23038 +*.dbg
23039 *.dsp
23040 *.dvi
23041 *.elf
23042 *.eps
23043 *.fw
23044 +*.gcno
23045 *.gen.S
23046 *.gif
23047 *.grep
23048 @@ -38,8 +41,10 @@
23049 *.tab.h
23050 *.tex
23051 *.ver
23052 +*.vim
23053 *.xml
23054 *_MODULES
23055 +*_reg_safe.h
23056 *_vga16.c
23057 *~
23058 *.9
23059 @@ -49,11 +54,16 @@
23060 53c700_d.h
23061 CVS
23062 ChangeSet
23063 +GPATH
23064 +GRTAGS
23065 +GSYMS
23066 +GTAGS
23067 Image
23068 Kerntypes
23069 Module.markers
23070 Module.symvers
23071 PENDING
23072 +PERF*
23073 SCCS
23074 System.map*
23075 TAGS
23076 @@ -76,7 +86,11 @@ btfixupprep
23077 build
23078 bvmlinux
23079 bzImage*
23080 +capability_names.h
23081 +capflags.c
23082 classlist.h*
23083 +clut_vga16.c
23084 +common-cmds.h
23085 comp*.log
23086 compile.h*
23087 conf
23088 @@ -103,13 +117,14 @@ gen_crc32table
23089 gen_init_cpio
23090 genksyms
23091 *_gray256.c
23092 +hash
23093 ihex2fw
23094 ikconfig.h*
23095 initramfs_data.cpio
23096 +initramfs_data.cpio.bz2
23097 initramfs_data.cpio.gz
23098 initramfs_list
23099 kallsyms
23100 -kconfig
23101 keywords.c
23102 ksym.c*
23103 ksym.h*
23104 @@ -133,7 +148,9 @@ mkboot
23105 mkbugboot
23106 mkcpustr
23107 mkdep
23108 +mkpiggy
23109 mkprep
23110 +mkregtable
23111 mktables
23112 mktree
23113 modpost
23114 @@ -149,6 +166,7 @@ patches*
23115 pca200e.bin
23116 pca200e_ecd.bin2
23117 piggy.gz
23118 +piggy.S
23119 piggyback
23120 pnmtologo
23121 ppc_defs.h*
23122 @@ -157,12 +175,15 @@ qconf
23123 raid6altivec*.c
23124 raid6int*.c
23125 raid6tables.c
23126 +regdb.c
23127 relocs
23128 +rlim_names.h
23129 series
23130 setup
23131 setup.bin
23132 setup.elf
23133 sImage
23134 +slabinfo
23135 sm_tbl*
23136 split-include
23137 syscalltab.h
23138 @@ -186,14 +207,20 @@ version.h*
23139 vmlinux
23140 vmlinux-*
23141 vmlinux.aout
23142 +vmlinux.bin.all
23143 +vmlinux.bin.bz2
23144 vmlinux.lds
23145 +vmlinux.relocs
23146 +voffset.h
23147 vsyscall.lds
23148 vsyscall_32.lds
23149 wanxlfw.inc
23150 uImage
23151 unifdef
23152 +utsrelease.h
23153 wakeup.bin
23154 wakeup.elf
23155 wakeup.lds
23156 zImage*
23157 zconf.hash.c
23158 +zoffset.h
23159 diff -urNp linux-2.6.32.41/Documentation/kernel-parameters.txt linux-2.6.32.41/Documentation/kernel-parameters.txt
23160 --- linux-2.6.32.41/Documentation/kernel-parameters.txt 2011-03-27 14:31:47.000000000 -0400
23161 +++ linux-2.6.32.41/Documentation/kernel-parameters.txt 2011-04-17 15:56:45.000000000 -0400
23162 @@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters.
23163 the specified number of seconds. This is to be used if
23164 your oopses keep scrolling off the screen.
23165
23166 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
23167 + virtualization environments that don't cope well with the
23168 + expand down segment used by UDEREF on X86-32 or the frequent
23169 + page table updates on X86-64.
23170 +
23171 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
23172 +
23173 pcbit= [HW,ISDN]
23174
23175 pcd. [PARIDE]
23176 diff -urNp linux-2.6.32.41/drivers/acpi/acpi_pad.c linux-2.6.32.41/drivers/acpi/acpi_pad.c
23177 --- linux-2.6.32.41/drivers/acpi/acpi_pad.c 2011-03-27 14:31:47.000000000 -0400
23178 +++ linux-2.6.32.41/drivers/acpi/acpi_pad.c 2011-04-17 15:56:46.000000000 -0400
23179 @@ -30,7 +30,7 @@
23180 #include <acpi/acpi_bus.h>
23181 #include <acpi/acpi_drivers.h>
23182
23183 -#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
23184 +#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
23185 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
23186 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
23187 static DEFINE_MUTEX(isolated_cpus_lock);
23188 diff -urNp linux-2.6.32.41/drivers/acpi/battery.c linux-2.6.32.41/drivers/acpi/battery.c
23189 --- linux-2.6.32.41/drivers/acpi/battery.c 2011-03-27 14:31:47.000000000 -0400
23190 +++ linux-2.6.32.41/drivers/acpi/battery.c 2011-04-17 15:56:46.000000000 -0400
23191 @@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
23192 }
23193
23194 static struct battery_file {
23195 - struct file_operations ops;
23196 + const struct file_operations ops;
23197 mode_t mode;
23198 const char *name;
23199 } acpi_battery_file[] = {
23200 diff -urNp linux-2.6.32.41/drivers/acpi/dock.c linux-2.6.32.41/drivers/acpi/dock.c
23201 --- linux-2.6.32.41/drivers/acpi/dock.c 2011-03-27 14:31:47.000000000 -0400
23202 +++ linux-2.6.32.41/drivers/acpi/dock.c 2011-04-17 15:56:46.000000000 -0400
23203 @@ -77,7 +77,7 @@ struct dock_dependent_device {
23204 struct list_head list;
23205 struct list_head hotplug_list;
23206 acpi_handle handle;
23207 - struct acpi_dock_ops *ops;
23208 + const struct acpi_dock_ops *ops;
23209 void *context;
23210 };
23211
23212 @@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifi
23213 * the dock driver after _DCK is executed.
23214 */
23215 int
23216 -register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
23217 +register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
23218 void *context)
23219 {
23220 struct dock_dependent_device *dd;
23221 diff -urNp linux-2.6.32.41/drivers/acpi/osl.c linux-2.6.32.41/drivers/acpi/osl.c
23222 --- linux-2.6.32.41/drivers/acpi/osl.c 2011-03-27 14:31:47.000000000 -0400
23223 +++ linux-2.6.32.41/drivers/acpi/osl.c 2011-04-17 15:56:46.000000000 -0400
23224 @@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_addres
23225 void __iomem *virt_addr;
23226
23227 virt_addr = ioremap(phys_addr, width);
23228 + if (!virt_addr)
23229 + return AE_NO_MEMORY;
23230 if (!value)
23231 value = &dummy;
23232
23233 @@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_addre
23234 void __iomem *virt_addr;
23235
23236 virt_addr = ioremap(phys_addr, width);
23237 + if (!virt_addr)
23238 + return AE_NO_MEMORY;
23239
23240 switch (width) {
23241 case 8:
23242 diff -urNp linux-2.6.32.41/drivers/acpi/power_meter.c linux-2.6.32.41/drivers/acpi/power_meter.c
23243 --- linux-2.6.32.41/drivers/acpi/power_meter.c 2011-03-27 14:31:47.000000000 -0400
23244 +++ linux-2.6.32.41/drivers/acpi/power_meter.c 2011-04-17 15:56:46.000000000 -0400
23245 @@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *d
23246 return res;
23247
23248 temp /= 1000;
23249 - if (temp < 0)
23250 - return -EINVAL;
23251
23252 mutex_lock(&resource->lock);
23253 resource->trip[attr->index - 7] = temp;
23254 diff -urNp linux-2.6.32.41/drivers/acpi/proc.c linux-2.6.32.41/drivers/acpi/proc.c
23255 --- linux-2.6.32.41/drivers/acpi/proc.c 2011-03-27 14:31:47.000000000 -0400
23256 +++ linux-2.6.32.41/drivers/acpi/proc.c 2011-04-17 15:56:46.000000000 -0400
23257 @@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct f
23258 size_t count, loff_t * ppos)
23259 {
23260 struct list_head *node, *next;
23261 - char strbuf[5];
23262 - char str[5] = "";
23263 - unsigned int len = count;
23264 + char strbuf[5] = {0};
23265 struct acpi_device *found_dev = NULL;
23266
23267 - if (len > 4)
23268 - len = 4;
23269 - if (len < 0)
23270 - return -EFAULT;
23271 + if (count > 4)
23272 + count = 4;
23273
23274 - if (copy_from_user(strbuf, buffer, len))
23275 + if (copy_from_user(strbuf, buffer, count))
23276 return -EFAULT;
23277 - strbuf[len] = '\0';
23278 - sscanf(strbuf, "%s", str);
23279 + strbuf[count] = '\0';
23280
23281 mutex_lock(&acpi_device_lock);
23282 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
23283 @@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct f
23284 if (!dev->wakeup.flags.valid)
23285 continue;
23286
23287 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
23288 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
23289 dev->wakeup.state.enabled =
23290 dev->wakeup.state.enabled ? 0 : 1;
23291 found_dev = dev;
23292 diff -urNp linux-2.6.32.41/drivers/acpi/processor_core.c linux-2.6.32.41/drivers/acpi/processor_core.c
23293 --- linux-2.6.32.41/drivers/acpi/processor_core.c 2011-03-27 14:31:47.000000000 -0400
23294 +++ linux-2.6.32.41/drivers/acpi/processor_core.c 2011-04-17 15:56:46.000000000 -0400
23295 @@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(
23296 return 0;
23297 }
23298
23299 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
23300 + BUG_ON(pr->id >= nr_cpu_ids);
23301
23302 /*
23303 * Buggy BIOS check
23304 diff -urNp linux-2.6.32.41/drivers/acpi/sbshc.c linux-2.6.32.41/drivers/acpi/sbshc.c
23305 --- linux-2.6.32.41/drivers/acpi/sbshc.c 2011-03-27 14:31:47.000000000 -0400
23306 +++ linux-2.6.32.41/drivers/acpi/sbshc.c 2011-04-17 15:56:46.000000000 -0400
23307 @@ -17,7 +17,7 @@
23308
23309 #define PREFIX "ACPI: "
23310
23311 -#define ACPI_SMB_HC_CLASS "smbus_host_controller"
23312 +#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
23313 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
23314
23315 struct acpi_smb_hc {
23316 diff -urNp linux-2.6.32.41/drivers/acpi/sleep.c linux-2.6.32.41/drivers/acpi/sleep.c
23317 --- linux-2.6.32.41/drivers/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
23318 +++ linux-2.6.32.41/drivers/acpi/sleep.c 2011-04-17 15:56:46.000000000 -0400
23319 @@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(susp
23320 }
23321 }
23322
23323 -static struct platform_suspend_ops acpi_suspend_ops = {
23324 +static const struct platform_suspend_ops acpi_suspend_ops = {
23325 .valid = acpi_suspend_state_valid,
23326 .begin = acpi_suspend_begin,
23327 .prepare_late = acpi_pm_prepare,
23328 @@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspen
23329 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
23330 * been requested.
23331 */
23332 -static struct platform_suspend_ops acpi_suspend_ops_old = {
23333 +static const struct platform_suspend_ops acpi_suspend_ops_old = {
23334 .valid = acpi_suspend_state_valid,
23335 .begin = acpi_suspend_begin_old,
23336 .prepare_late = acpi_pm_disable_gpes,
23337 @@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
23338 acpi_enable_all_runtime_gpes();
23339 }
23340
23341 -static struct platform_hibernation_ops acpi_hibernation_ops = {
23342 +static const struct platform_hibernation_ops acpi_hibernation_ops = {
23343 .begin = acpi_hibernation_begin,
23344 .end = acpi_pm_end,
23345 .pre_snapshot = acpi_hibernation_pre_snapshot,
23346 @@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot
23347 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
23348 * been requested.
23349 */
23350 -static struct platform_hibernation_ops acpi_hibernation_ops_old = {
23351 +static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
23352 .begin = acpi_hibernation_begin_old,
23353 .end = acpi_pm_end,
23354 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
23355 diff -urNp linux-2.6.32.41/drivers/acpi/video.c linux-2.6.32.41/drivers/acpi/video.c
23356 --- linux-2.6.32.41/drivers/acpi/video.c 2011-03-27 14:31:47.000000000 -0400
23357 +++ linux-2.6.32.41/drivers/acpi/video.c 2011-04-17 15:56:46.000000000 -0400
23358 @@ -359,7 +359,7 @@ static int acpi_video_set_brightness(str
23359 vd->brightness->levels[request_level]);
23360 }
23361
23362 -static struct backlight_ops acpi_backlight_ops = {
23363 +static const struct backlight_ops acpi_backlight_ops = {
23364 .get_brightness = acpi_video_get_brightness,
23365 .update_status = acpi_video_set_brightness,
23366 };
23367 diff -urNp linux-2.6.32.41/drivers/ata/ahci.c linux-2.6.32.41/drivers/ata/ahci.c
23368 --- linux-2.6.32.41/drivers/ata/ahci.c 2011-03-27 14:31:47.000000000 -0400
23369 +++ linux-2.6.32.41/drivers/ata/ahci.c 2011-04-23 12:56:10.000000000 -0400
23370 @@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sh
23371 .sdev_attrs = ahci_sdev_attrs,
23372 };
23373
23374 -static struct ata_port_operations ahci_ops = {
23375 +static const struct ata_port_operations ahci_ops = {
23376 .inherits = &sata_pmp_port_ops,
23377
23378 .qc_defer = sata_pmp_qc_defer_cmd_switch,
23379 @@ -424,17 +424,17 @@ static struct ata_port_operations ahci_o
23380 .port_stop = ahci_port_stop,
23381 };
23382
23383 -static struct ata_port_operations ahci_vt8251_ops = {
23384 +static const struct ata_port_operations ahci_vt8251_ops = {
23385 .inherits = &ahci_ops,
23386 .hardreset = ahci_vt8251_hardreset,
23387 };
23388
23389 -static struct ata_port_operations ahci_p5wdh_ops = {
23390 +static const struct ata_port_operations ahci_p5wdh_ops = {
23391 .inherits = &ahci_ops,
23392 .hardreset = ahci_p5wdh_hardreset,
23393 };
23394
23395 -static struct ata_port_operations ahci_sb600_ops = {
23396 +static const struct ata_port_operations ahci_sb600_ops = {
23397 .inherits = &ahci_ops,
23398 .softreset = ahci_sb600_softreset,
23399 .pmp_softreset = ahci_sb600_softreset,
23400 diff -urNp linux-2.6.32.41/drivers/ata/ata_generic.c linux-2.6.32.41/drivers/ata/ata_generic.c
23401 --- linux-2.6.32.41/drivers/ata/ata_generic.c 2011-03-27 14:31:47.000000000 -0400
23402 +++ linux-2.6.32.41/drivers/ata/ata_generic.c 2011-04-17 15:56:46.000000000 -0400
23403 @@ -104,7 +104,7 @@ static struct scsi_host_template generic
23404 ATA_BMDMA_SHT(DRV_NAME),
23405 };
23406
23407 -static struct ata_port_operations generic_port_ops = {
23408 +static const struct ata_port_operations generic_port_ops = {
23409 .inherits = &ata_bmdma_port_ops,
23410 .cable_detect = ata_cable_unknown,
23411 .set_mode = generic_set_mode,
23412 diff -urNp linux-2.6.32.41/drivers/ata/ata_piix.c linux-2.6.32.41/drivers/ata/ata_piix.c
23413 --- linux-2.6.32.41/drivers/ata/ata_piix.c 2011-03-27 14:31:47.000000000 -0400
23414 +++ linux-2.6.32.41/drivers/ata/ata_piix.c 2011-04-23 12:56:10.000000000 -0400
23415 @@ -318,7 +318,7 @@ static struct scsi_host_template piix_sh
23416 ATA_BMDMA_SHT(DRV_NAME),
23417 };
23418
23419 -static struct ata_port_operations piix_pata_ops = {
23420 +static const struct ata_port_operations piix_pata_ops = {
23421 .inherits = &ata_bmdma32_port_ops,
23422 .cable_detect = ata_cable_40wire,
23423 .set_piomode = piix_set_piomode,
23424 @@ -326,22 +326,22 @@ static struct ata_port_operations piix_p
23425 .prereset = piix_pata_prereset,
23426 };
23427
23428 -static struct ata_port_operations piix_vmw_ops = {
23429 +static const struct ata_port_operations piix_vmw_ops = {
23430 .inherits = &piix_pata_ops,
23431 .bmdma_status = piix_vmw_bmdma_status,
23432 };
23433
23434 -static struct ata_port_operations ich_pata_ops = {
23435 +static const struct ata_port_operations ich_pata_ops = {
23436 .inherits = &piix_pata_ops,
23437 .cable_detect = ich_pata_cable_detect,
23438 .set_dmamode = ich_set_dmamode,
23439 };
23440
23441 -static struct ata_port_operations piix_sata_ops = {
23442 +static const struct ata_port_operations piix_sata_ops = {
23443 .inherits = &ata_bmdma_port_ops,
23444 };
23445
23446 -static struct ata_port_operations piix_sidpr_sata_ops = {
23447 +static const struct ata_port_operations piix_sidpr_sata_ops = {
23448 .inherits = &piix_sata_ops,
23449 .hardreset = sata_std_hardreset,
23450 .scr_read = piix_sidpr_scr_read,
23451 diff -urNp linux-2.6.32.41/drivers/ata/libata-acpi.c linux-2.6.32.41/drivers/ata/libata-acpi.c
23452 --- linux-2.6.32.41/drivers/ata/libata-acpi.c 2011-03-27 14:31:47.000000000 -0400
23453 +++ linux-2.6.32.41/drivers/ata/libata-acpi.c 2011-04-17 15:56:46.000000000 -0400
23454 @@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_han
23455 ata_acpi_uevent(dev->link->ap, dev, event);
23456 }
23457
23458 -static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
23459 +static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
23460 .handler = ata_acpi_dev_notify_dock,
23461 .uevent = ata_acpi_dev_uevent,
23462 };
23463
23464 -static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
23465 +static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
23466 .handler = ata_acpi_ap_notify_dock,
23467 .uevent = ata_acpi_ap_uevent,
23468 };
23469 diff -urNp linux-2.6.32.41/drivers/ata/libata-core.c linux-2.6.32.41/drivers/ata/libata-core.c
23470 --- linux-2.6.32.41/drivers/ata/libata-core.c 2011-03-27 14:31:47.000000000 -0400
23471 +++ linux-2.6.32.41/drivers/ata/libata-core.c 2011-04-23 12:56:10.000000000 -0400
23472 @@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *
23473 struct ata_port *ap;
23474 unsigned int tag;
23475
23476 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23477 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23478 ap = qc->ap;
23479
23480 qc->flags = 0;
23481 @@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued
23482 struct ata_port *ap;
23483 struct ata_link *link;
23484
23485 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23486 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23487 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
23488 ap = qc->ap;
23489 link = qc->dev->link;
23490 @@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device
23491 * LOCKING:
23492 * None.
23493 */
23494 -static void ata_finalize_port_ops(struct ata_port_operations *ops)
23495 +static void ata_finalize_port_ops(const struct ata_port_operations *ops)
23496 {
23497 static DEFINE_SPINLOCK(lock);
23498 const struct ata_port_operations *cur;
23499 @@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct
23500 return;
23501
23502 spin_lock(&lock);
23503 + pax_open_kernel();
23504
23505 for (cur = ops->inherits; cur; cur = cur->inherits) {
23506 void **inherit = (void **)cur;
23507 @@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct
23508 if (IS_ERR(*pp))
23509 *pp = NULL;
23510
23511 - ops->inherits = NULL;
23512 + ((struct ata_port_operations *)ops)->inherits = NULL;
23513
23514 + pax_close_kernel();
23515 spin_unlock(&lock);
23516 }
23517
23518 @@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host
23519 */
23520 /* KILLME - the only user left is ipr */
23521 void ata_host_init(struct ata_host *host, struct device *dev,
23522 - unsigned long flags, struct ata_port_operations *ops)
23523 + unsigned long flags, const struct ata_port_operations *ops)
23524 {
23525 spin_lock_init(&host->lock);
23526 host->dev = dev;
23527 @@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(stru
23528 /* truly dummy */
23529 }
23530
23531 -struct ata_port_operations ata_dummy_port_ops = {
23532 +const struct ata_port_operations ata_dummy_port_ops = {
23533 .qc_prep = ata_noop_qc_prep,
23534 .qc_issue = ata_dummy_qc_issue,
23535 .error_handler = ata_dummy_error_handler,
23536 diff -urNp linux-2.6.32.41/drivers/ata/libata-eh.c linux-2.6.32.41/drivers/ata/libata-eh.c
23537 --- linux-2.6.32.41/drivers/ata/libata-eh.c 2011-03-27 14:31:47.000000000 -0400
23538 +++ linux-2.6.32.41/drivers/ata/libata-eh.c 2011-05-16 21:46:57.000000000 -0400
23539 @@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
23540 {
23541 struct ata_link *link;
23542
23543 + pax_track_stack();
23544 +
23545 ata_for_each_link(link, ap, HOST_FIRST)
23546 ata_eh_link_report(link);
23547 }
23548 @@ -3590,7 +3592,7 @@ void ata_do_eh(struct ata_port *ap, ata_
23549 */
23550 void ata_std_error_handler(struct ata_port *ap)
23551 {
23552 - struct ata_port_operations *ops = ap->ops;
23553 + const struct ata_port_operations *ops = ap->ops;
23554 ata_reset_fn_t hardreset = ops->hardreset;
23555
23556 /* ignore built-in hardreset if SCR access is not available */
23557 diff -urNp linux-2.6.32.41/drivers/ata/libata-pmp.c linux-2.6.32.41/drivers/ata/libata-pmp.c
23558 --- linux-2.6.32.41/drivers/ata/libata-pmp.c 2011-03-27 14:31:47.000000000 -0400
23559 +++ linux-2.6.32.41/drivers/ata/libata-pmp.c 2011-04-17 15:56:46.000000000 -0400
23560 @@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(str
23561 */
23562 static int sata_pmp_eh_recover(struct ata_port *ap)
23563 {
23564 - struct ata_port_operations *ops = ap->ops;
23565 + const struct ata_port_operations *ops = ap->ops;
23566 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
23567 struct ata_link *pmp_link = &ap->link;
23568 struct ata_device *pmp_dev = pmp_link->device;
23569 diff -urNp linux-2.6.32.41/drivers/ata/pata_acpi.c linux-2.6.32.41/drivers/ata/pata_acpi.c
23570 --- linux-2.6.32.41/drivers/ata/pata_acpi.c 2011-03-27 14:31:47.000000000 -0400
23571 +++ linux-2.6.32.41/drivers/ata/pata_acpi.c 2011-04-17 15:56:46.000000000 -0400
23572 @@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_s
23573 ATA_BMDMA_SHT(DRV_NAME),
23574 };
23575
23576 -static struct ata_port_operations pacpi_ops = {
23577 +static const struct ata_port_operations pacpi_ops = {
23578 .inherits = &ata_bmdma_port_ops,
23579 .qc_issue = pacpi_qc_issue,
23580 .cable_detect = pacpi_cable_detect,
23581 diff -urNp linux-2.6.32.41/drivers/ata/pata_ali.c linux-2.6.32.41/drivers/ata/pata_ali.c
23582 --- linux-2.6.32.41/drivers/ata/pata_ali.c 2011-03-27 14:31:47.000000000 -0400
23583 +++ linux-2.6.32.41/drivers/ata/pata_ali.c 2011-04-17 15:56:46.000000000 -0400
23584 @@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht
23585 * Port operations for PIO only ALi
23586 */
23587
23588 -static struct ata_port_operations ali_early_port_ops = {
23589 +static const struct ata_port_operations ali_early_port_ops = {
23590 .inherits = &ata_sff_port_ops,
23591 .cable_detect = ata_cable_40wire,
23592 .set_piomode = ali_set_piomode,
23593 @@ -382,7 +382,7 @@ static const struct ata_port_operations
23594 * Port operations for DMA capable ALi without cable
23595 * detect
23596 */
23597 -static struct ata_port_operations ali_20_port_ops = {
23598 +static const struct ata_port_operations ali_20_port_ops = {
23599 .inherits = &ali_dma_base_ops,
23600 .cable_detect = ata_cable_40wire,
23601 .mode_filter = ali_20_filter,
23602 @@ -393,7 +393,7 @@ static struct ata_port_operations ali_20
23603 /*
23604 * Port operations for DMA capable ALi with cable detect
23605 */
23606 -static struct ata_port_operations ali_c2_port_ops = {
23607 +static const struct ata_port_operations ali_c2_port_ops = {
23608 .inherits = &ali_dma_base_ops,
23609 .check_atapi_dma = ali_check_atapi_dma,
23610 .cable_detect = ali_c2_cable_detect,
23611 @@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2
23612 /*
23613 * Port operations for DMA capable ALi with cable detect
23614 */
23615 -static struct ata_port_operations ali_c4_port_ops = {
23616 +static const struct ata_port_operations ali_c4_port_ops = {
23617 .inherits = &ali_dma_base_ops,
23618 .check_atapi_dma = ali_check_atapi_dma,
23619 .cable_detect = ali_c2_cable_detect,
23620 @@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4
23621 /*
23622 * Port operations for DMA capable ALi with cable detect and LBA48
23623 */
23624 -static struct ata_port_operations ali_c5_port_ops = {
23625 +static const struct ata_port_operations ali_c5_port_ops = {
23626 .inherits = &ali_dma_base_ops,
23627 .check_atapi_dma = ali_check_atapi_dma,
23628 .dev_config = ali_warn_atapi_dma,
23629 diff -urNp linux-2.6.32.41/drivers/ata/pata_amd.c linux-2.6.32.41/drivers/ata/pata_amd.c
23630 --- linux-2.6.32.41/drivers/ata/pata_amd.c 2011-03-27 14:31:47.000000000 -0400
23631 +++ linux-2.6.32.41/drivers/ata/pata_amd.c 2011-04-17 15:56:46.000000000 -0400
23632 @@ -397,28 +397,28 @@ static const struct ata_port_operations
23633 .prereset = amd_pre_reset,
23634 };
23635
23636 -static struct ata_port_operations amd33_port_ops = {
23637 +static const struct ata_port_operations amd33_port_ops = {
23638 .inherits = &amd_base_port_ops,
23639 .cable_detect = ata_cable_40wire,
23640 .set_piomode = amd33_set_piomode,
23641 .set_dmamode = amd33_set_dmamode,
23642 };
23643
23644 -static struct ata_port_operations amd66_port_ops = {
23645 +static const struct ata_port_operations amd66_port_ops = {
23646 .inherits = &amd_base_port_ops,
23647 .cable_detect = ata_cable_unknown,
23648 .set_piomode = amd66_set_piomode,
23649 .set_dmamode = amd66_set_dmamode,
23650 };
23651
23652 -static struct ata_port_operations amd100_port_ops = {
23653 +static const struct ata_port_operations amd100_port_ops = {
23654 .inherits = &amd_base_port_ops,
23655 .cable_detect = ata_cable_unknown,
23656 .set_piomode = amd100_set_piomode,
23657 .set_dmamode = amd100_set_dmamode,
23658 };
23659
23660 -static struct ata_port_operations amd133_port_ops = {
23661 +static const struct ata_port_operations amd133_port_ops = {
23662 .inherits = &amd_base_port_ops,
23663 .cable_detect = amd_cable_detect,
23664 .set_piomode = amd133_set_piomode,
23665 @@ -433,13 +433,13 @@ static const struct ata_port_operations
23666 .host_stop = nv_host_stop,
23667 };
23668
23669 -static struct ata_port_operations nv100_port_ops = {
23670 +static const struct ata_port_operations nv100_port_ops = {
23671 .inherits = &nv_base_port_ops,
23672 .set_piomode = nv100_set_piomode,
23673 .set_dmamode = nv100_set_dmamode,
23674 };
23675
23676 -static struct ata_port_operations nv133_port_ops = {
23677 +static const struct ata_port_operations nv133_port_ops = {
23678 .inherits = &nv_base_port_ops,
23679 .set_piomode = nv133_set_piomode,
23680 .set_dmamode = nv133_set_dmamode,
23681 diff -urNp linux-2.6.32.41/drivers/ata/pata_artop.c linux-2.6.32.41/drivers/ata/pata_artop.c
23682 --- linux-2.6.32.41/drivers/ata/pata_artop.c 2011-03-27 14:31:47.000000000 -0400
23683 +++ linux-2.6.32.41/drivers/ata/pata_artop.c 2011-04-17 15:56:46.000000000 -0400
23684 @@ -311,7 +311,7 @@ static struct scsi_host_template artop_s
23685 ATA_BMDMA_SHT(DRV_NAME),
23686 };
23687
23688 -static struct ata_port_operations artop6210_ops = {
23689 +static const struct ata_port_operations artop6210_ops = {
23690 .inherits = &ata_bmdma_port_ops,
23691 .cable_detect = ata_cable_40wire,
23692 .set_piomode = artop6210_set_piomode,
23693 @@ -320,7 +320,7 @@ static struct ata_port_operations artop6
23694 .qc_defer = artop6210_qc_defer,
23695 };
23696
23697 -static struct ata_port_operations artop6260_ops = {
23698 +static const struct ata_port_operations artop6260_ops = {
23699 .inherits = &ata_bmdma_port_ops,
23700 .cable_detect = artop6260_cable_detect,
23701 .set_piomode = artop6260_set_piomode,
23702 diff -urNp linux-2.6.32.41/drivers/ata/pata_at32.c linux-2.6.32.41/drivers/ata/pata_at32.c
23703 --- linux-2.6.32.41/drivers/ata/pata_at32.c 2011-03-27 14:31:47.000000000 -0400
23704 +++ linux-2.6.32.41/drivers/ata/pata_at32.c 2011-04-17 15:56:46.000000000 -0400
23705 @@ -172,7 +172,7 @@ static struct scsi_host_template at32_sh
23706 ATA_PIO_SHT(DRV_NAME),
23707 };
23708
23709 -static struct ata_port_operations at32_port_ops = {
23710 +static const struct ata_port_operations at32_port_ops = {
23711 .inherits = &ata_sff_port_ops,
23712 .cable_detect = ata_cable_40wire,
23713 .set_piomode = pata_at32_set_piomode,
23714 diff -urNp linux-2.6.32.41/drivers/ata/pata_at91.c linux-2.6.32.41/drivers/ata/pata_at91.c
23715 --- linux-2.6.32.41/drivers/ata/pata_at91.c 2011-03-27 14:31:47.000000000 -0400
23716 +++ linux-2.6.32.41/drivers/ata/pata_at91.c 2011-04-17 15:56:46.000000000 -0400
23717 @@ -195,7 +195,7 @@ static struct scsi_host_template pata_at
23718 ATA_PIO_SHT(DRV_NAME),
23719 };
23720
23721 -static struct ata_port_operations pata_at91_port_ops = {
23722 +static const struct ata_port_operations pata_at91_port_ops = {
23723 .inherits = &ata_sff_port_ops,
23724
23725 .sff_data_xfer = pata_at91_data_xfer_noirq,
23726 diff -urNp linux-2.6.32.41/drivers/ata/pata_atiixp.c linux-2.6.32.41/drivers/ata/pata_atiixp.c
23727 --- linux-2.6.32.41/drivers/ata/pata_atiixp.c 2011-03-27 14:31:47.000000000 -0400
23728 +++ linux-2.6.32.41/drivers/ata/pata_atiixp.c 2011-04-17 15:56:46.000000000 -0400
23729 @@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_
23730 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
23731 };
23732
23733 -static struct ata_port_operations atiixp_port_ops = {
23734 +static const struct ata_port_operations atiixp_port_ops = {
23735 .inherits = &ata_bmdma_port_ops,
23736
23737 .qc_prep = ata_sff_dumb_qc_prep,
23738 diff -urNp linux-2.6.32.41/drivers/ata/pata_atp867x.c linux-2.6.32.41/drivers/ata/pata_atp867x.c
23739 --- linux-2.6.32.41/drivers/ata/pata_atp867x.c 2011-03-27 14:31:47.000000000 -0400
23740 +++ linux-2.6.32.41/drivers/ata/pata_atp867x.c 2011-04-17 15:56:46.000000000 -0400
23741 @@ -274,7 +274,7 @@ static struct scsi_host_template atp867x
23742 ATA_BMDMA_SHT(DRV_NAME),
23743 };
23744
23745 -static struct ata_port_operations atp867x_ops = {
23746 +static const struct ata_port_operations atp867x_ops = {
23747 .inherits = &ata_bmdma_port_ops,
23748 .cable_detect = atp867x_cable_detect,
23749 .set_piomode = atp867x_set_piomode,
23750 diff -urNp linux-2.6.32.41/drivers/ata/pata_bf54x.c linux-2.6.32.41/drivers/ata/pata_bf54x.c
23751 --- linux-2.6.32.41/drivers/ata/pata_bf54x.c 2011-03-27 14:31:47.000000000 -0400
23752 +++ linux-2.6.32.41/drivers/ata/pata_bf54x.c 2011-04-17 15:56:46.000000000 -0400
23753 @@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sh
23754 .dma_boundary = ATA_DMA_BOUNDARY,
23755 };
23756
23757 -static struct ata_port_operations bfin_pata_ops = {
23758 +static const struct ata_port_operations bfin_pata_ops = {
23759 .inherits = &ata_sff_port_ops,
23760
23761 .set_piomode = bfin_set_piomode,
23762 diff -urNp linux-2.6.32.41/drivers/ata/pata_cmd640.c linux-2.6.32.41/drivers/ata/pata_cmd640.c
23763 --- linux-2.6.32.41/drivers/ata/pata_cmd640.c 2011-03-27 14:31:47.000000000 -0400
23764 +++ linux-2.6.32.41/drivers/ata/pata_cmd640.c 2011-04-17 15:56:46.000000000 -0400
23765 @@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_
23766 ATA_BMDMA_SHT(DRV_NAME),
23767 };
23768
23769 -static struct ata_port_operations cmd640_port_ops = {
23770 +static const struct ata_port_operations cmd640_port_ops = {
23771 .inherits = &ata_bmdma_port_ops,
23772 /* In theory xfer_noirq is not needed once we kill the prefetcher */
23773 .sff_data_xfer = ata_sff_data_xfer_noirq,
23774 diff -urNp linux-2.6.32.41/drivers/ata/pata_cmd64x.c linux-2.6.32.41/drivers/ata/pata_cmd64x.c
23775 --- linux-2.6.32.41/drivers/ata/pata_cmd64x.c 2011-03-27 14:31:47.000000000 -0400
23776 +++ linux-2.6.32.41/drivers/ata/pata_cmd64x.c 2011-04-17 15:56:46.000000000 -0400
23777 @@ -275,18 +275,18 @@ static const struct ata_port_operations
23778 .set_dmamode = cmd64x_set_dmamode,
23779 };
23780
23781 -static struct ata_port_operations cmd64x_port_ops = {
23782 +static const struct ata_port_operations cmd64x_port_ops = {
23783 .inherits = &cmd64x_base_ops,
23784 .cable_detect = ata_cable_40wire,
23785 };
23786
23787 -static struct ata_port_operations cmd646r1_port_ops = {
23788 +static const struct ata_port_operations cmd646r1_port_ops = {
23789 .inherits = &cmd64x_base_ops,
23790 .bmdma_stop = cmd646r1_bmdma_stop,
23791 .cable_detect = ata_cable_40wire,
23792 };
23793
23794 -static struct ata_port_operations cmd648_port_ops = {
23795 +static const struct ata_port_operations cmd648_port_ops = {
23796 .inherits = &cmd64x_base_ops,
23797 .bmdma_stop = cmd648_bmdma_stop,
23798 .cable_detect = cmd648_cable_detect,
23799 diff -urNp linux-2.6.32.41/drivers/ata/pata_cs5520.c linux-2.6.32.41/drivers/ata/pata_cs5520.c
23800 --- linux-2.6.32.41/drivers/ata/pata_cs5520.c 2011-03-27 14:31:47.000000000 -0400
23801 +++ linux-2.6.32.41/drivers/ata/pata_cs5520.c 2011-04-17 15:56:46.000000000 -0400
23802 @@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_
23803 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
23804 };
23805
23806 -static struct ata_port_operations cs5520_port_ops = {
23807 +static const struct ata_port_operations cs5520_port_ops = {
23808 .inherits = &ata_bmdma_port_ops,
23809 .qc_prep = ata_sff_dumb_qc_prep,
23810 .cable_detect = ata_cable_40wire,
23811 diff -urNp linux-2.6.32.41/drivers/ata/pata_cs5530.c linux-2.6.32.41/drivers/ata/pata_cs5530.c
23812 --- linux-2.6.32.41/drivers/ata/pata_cs5530.c 2011-03-27 14:31:47.000000000 -0400
23813 +++ linux-2.6.32.41/drivers/ata/pata_cs5530.c 2011-04-17 15:56:46.000000000 -0400
23814 @@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_
23815 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
23816 };
23817
23818 -static struct ata_port_operations cs5530_port_ops = {
23819 +static const struct ata_port_operations cs5530_port_ops = {
23820 .inherits = &ata_bmdma_port_ops,
23821
23822 .qc_prep = ata_sff_dumb_qc_prep,
23823 diff -urNp linux-2.6.32.41/drivers/ata/pata_cs5535.c linux-2.6.32.41/drivers/ata/pata_cs5535.c
23824 --- linux-2.6.32.41/drivers/ata/pata_cs5535.c 2011-03-27 14:31:47.000000000 -0400
23825 +++ linux-2.6.32.41/drivers/ata/pata_cs5535.c 2011-04-17 15:56:46.000000000 -0400
23826 @@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_
23827 ATA_BMDMA_SHT(DRV_NAME),
23828 };
23829
23830 -static struct ata_port_operations cs5535_port_ops = {
23831 +static const struct ata_port_operations cs5535_port_ops = {
23832 .inherits = &ata_bmdma_port_ops,
23833 .cable_detect = cs5535_cable_detect,
23834 .set_piomode = cs5535_set_piomode,
23835 diff -urNp linux-2.6.32.41/drivers/ata/pata_cs5536.c linux-2.6.32.41/drivers/ata/pata_cs5536.c
23836 --- linux-2.6.32.41/drivers/ata/pata_cs5536.c 2011-03-27 14:31:47.000000000 -0400
23837 +++ linux-2.6.32.41/drivers/ata/pata_cs5536.c 2011-04-17 15:56:46.000000000 -0400
23838 @@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_
23839 ATA_BMDMA_SHT(DRV_NAME),
23840 };
23841
23842 -static struct ata_port_operations cs5536_port_ops = {
23843 +static const struct ata_port_operations cs5536_port_ops = {
23844 .inherits = &ata_bmdma_port_ops,
23845 .cable_detect = cs5536_cable_detect,
23846 .set_piomode = cs5536_set_piomode,
23847 diff -urNp linux-2.6.32.41/drivers/ata/pata_cypress.c linux-2.6.32.41/drivers/ata/pata_cypress.c
23848 --- linux-2.6.32.41/drivers/ata/pata_cypress.c 2011-03-27 14:31:47.000000000 -0400
23849 +++ linux-2.6.32.41/drivers/ata/pata_cypress.c 2011-04-17 15:56:46.000000000 -0400
23850 @@ -113,7 +113,7 @@ static struct scsi_host_template cy82c69
23851 ATA_BMDMA_SHT(DRV_NAME),
23852 };
23853
23854 -static struct ata_port_operations cy82c693_port_ops = {
23855 +static const struct ata_port_operations cy82c693_port_ops = {
23856 .inherits = &ata_bmdma_port_ops,
23857 .cable_detect = ata_cable_40wire,
23858 .set_piomode = cy82c693_set_piomode,
23859 diff -urNp linux-2.6.32.41/drivers/ata/pata_efar.c linux-2.6.32.41/drivers/ata/pata_efar.c
23860 --- linux-2.6.32.41/drivers/ata/pata_efar.c 2011-03-27 14:31:47.000000000 -0400
23861 +++ linux-2.6.32.41/drivers/ata/pata_efar.c 2011-04-17 15:56:46.000000000 -0400
23862 @@ -222,7 +222,7 @@ static struct scsi_host_template efar_sh
23863 ATA_BMDMA_SHT(DRV_NAME),
23864 };
23865
23866 -static struct ata_port_operations efar_ops = {
23867 +static const struct ata_port_operations efar_ops = {
23868 .inherits = &ata_bmdma_port_ops,
23869 .cable_detect = efar_cable_detect,
23870 .set_piomode = efar_set_piomode,
23871 diff -urNp linux-2.6.32.41/drivers/ata/pata_hpt366.c linux-2.6.32.41/drivers/ata/pata_hpt366.c
23872 --- linux-2.6.32.41/drivers/ata/pata_hpt366.c 2011-03-27 14:31:47.000000000 -0400
23873 +++ linux-2.6.32.41/drivers/ata/pata_hpt366.c 2011-04-17 15:56:46.000000000 -0400
23874 @@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_
23875 * Configuration for HPT366/68
23876 */
23877
23878 -static struct ata_port_operations hpt366_port_ops = {
23879 +static const struct ata_port_operations hpt366_port_ops = {
23880 .inherits = &ata_bmdma_port_ops,
23881 .cable_detect = hpt36x_cable_detect,
23882 .mode_filter = hpt366_filter,
23883 diff -urNp linux-2.6.32.41/drivers/ata/pata_hpt37x.c linux-2.6.32.41/drivers/ata/pata_hpt37x.c
23884 --- linux-2.6.32.41/drivers/ata/pata_hpt37x.c 2011-03-27 14:31:47.000000000 -0400
23885 +++ linux-2.6.32.41/drivers/ata/pata_hpt37x.c 2011-04-17 15:56:46.000000000 -0400
23886 @@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_
23887 * Configuration for HPT370
23888 */
23889
23890 -static struct ata_port_operations hpt370_port_ops = {
23891 +static const struct ata_port_operations hpt370_port_ops = {
23892 .inherits = &ata_bmdma_port_ops,
23893
23894 .bmdma_stop = hpt370_bmdma_stop,
23895 @@ -591,7 +591,7 @@ static struct ata_port_operations hpt370
23896 * Configuration for HPT370A. Close to 370 but less filters
23897 */
23898
23899 -static struct ata_port_operations hpt370a_port_ops = {
23900 +static const struct ata_port_operations hpt370a_port_ops = {
23901 .inherits = &hpt370_port_ops,
23902 .mode_filter = hpt370a_filter,
23903 };
23904 @@ -601,7 +601,7 @@ static struct ata_port_operations hpt370
23905 * and DMA mode setting functionality.
23906 */
23907
23908 -static struct ata_port_operations hpt372_port_ops = {
23909 +static const struct ata_port_operations hpt372_port_ops = {
23910 .inherits = &ata_bmdma_port_ops,
23911
23912 .bmdma_stop = hpt37x_bmdma_stop,
23913 @@ -616,7 +616,7 @@ static struct ata_port_operations hpt372
23914 * but we have a different cable detection procedure for function 1.
23915 */
23916
23917 -static struct ata_port_operations hpt374_fn1_port_ops = {
23918 +static const struct ata_port_operations hpt374_fn1_port_ops = {
23919 .inherits = &hpt372_port_ops,
23920 .prereset = hpt374_fn1_pre_reset,
23921 };
23922 diff -urNp linux-2.6.32.41/drivers/ata/pata_hpt3x2n.c linux-2.6.32.41/drivers/ata/pata_hpt3x2n.c
23923 --- linux-2.6.32.41/drivers/ata/pata_hpt3x2n.c 2011-03-27 14:31:47.000000000 -0400
23924 +++ linux-2.6.32.41/drivers/ata/pata_hpt3x2n.c 2011-04-17 15:56:46.000000000 -0400
23925 @@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n
23926 * Configuration for HPT3x2n.
23927 */
23928
23929 -static struct ata_port_operations hpt3x2n_port_ops = {
23930 +static const struct ata_port_operations hpt3x2n_port_ops = {
23931 .inherits = &ata_bmdma_port_ops,
23932
23933 .bmdma_stop = hpt3x2n_bmdma_stop,
23934 diff -urNp linux-2.6.32.41/drivers/ata/pata_hpt3x3.c linux-2.6.32.41/drivers/ata/pata_hpt3x3.c
23935 --- linux-2.6.32.41/drivers/ata/pata_hpt3x3.c 2011-03-27 14:31:47.000000000 -0400
23936 +++ linux-2.6.32.41/drivers/ata/pata_hpt3x3.c 2011-04-17 15:56:46.000000000 -0400
23937 @@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_
23938 ATA_BMDMA_SHT(DRV_NAME),
23939 };
23940
23941 -static struct ata_port_operations hpt3x3_port_ops = {
23942 +static const struct ata_port_operations hpt3x3_port_ops = {
23943 .inherits = &ata_bmdma_port_ops,
23944 .cable_detect = ata_cable_40wire,
23945 .set_piomode = hpt3x3_set_piomode,
23946 diff -urNp linux-2.6.32.41/drivers/ata/pata_icside.c linux-2.6.32.41/drivers/ata/pata_icside.c
23947 --- linux-2.6.32.41/drivers/ata/pata_icside.c 2011-03-27 14:31:47.000000000 -0400
23948 +++ linux-2.6.32.41/drivers/ata/pata_icside.c 2011-04-17 15:56:46.000000000 -0400
23949 @@ -319,7 +319,7 @@ static void pata_icside_postreset(struct
23950 }
23951 }
23952
23953 -static struct ata_port_operations pata_icside_port_ops = {
23954 +static const struct ata_port_operations pata_icside_port_ops = {
23955 .inherits = &ata_sff_port_ops,
23956 /* no need to build any PRD tables for DMA */
23957 .qc_prep = ata_noop_qc_prep,
23958 diff -urNp linux-2.6.32.41/drivers/ata/pata_isapnp.c linux-2.6.32.41/drivers/ata/pata_isapnp.c
23959 --- linux-2.6.32.41/drivers/ata/pata_isapnp.c 2011-03-27 14:31:47.000000000 -0400
23960 +++ linux-2.6.32.41/drivers/ata/pata_isapnp.c 2011-04-17 15:56:46.000000000 -0400
23961 @@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_
23962 ATA_PIO_SHT(DRV_NAME),
23963 };
23964
23965 -static struct ata_port_operations isapnp_port_ops = {
23966 +static const struct ata_port_operations isapnp_port_ops = {
23967 .inherits = &ata_sff_port_ops,
23968 .cable_detect = ata_cable_40wire,
23969 };
23970
23971 -static struct ata_port_operations isapnp_noalt_port_ops = {
23972 +static const struct ata_port_operations isapnp_noalt_port_ops = {
23973 .inherits = &ata_sff_port_ops,
23974 .cable_detect = ata_cable_40wire,
23975 /* No altstatus so we don't want to use the lost interrupt poll */
23976 diff -urNp linux-2.6.32.41/drivers/ata/pata_it8213.c linux-2.6.32.41/drivers/ata/pata_it8213.c
23977 --- linux-2.6.32.41/drivers/ata/pata_it8213.c 2011-03-27 14:31:47.000000000 -0400
23978 +++ linux-2.6.32.41/drivers/ata/pata_it8213.c 2011-04-17 15:56:46.000000000 -0400
23979 @@ -234,7 +234,7 @@ static struct scsi_host_template it8213_
23980 };
23981
23982
23983 -static struct ata_port_operations it8213_ops = {
23984 +static const struct ata_port_operations it8213_ops = {
23985 .inherits = &ata_bmdma_port_ops,
23986 .cable_detect = it8213_cable_detect,
23987 .set_piomode = it8213_set_piomode,
23988 diff -urNp linux-2.6.32.41/drivers/ata/pata_it821x.c linux-2.6.32.41/drivers/ata/pata_it821x.c
23989 --- linux-2.6.32.41/drivers/ata/pata_it821x.c 2011-03-27 14:31:47.000000000 -0400
23990 +++ linux-2.6.32.41/drivers/ata/pata_it821x.c 2011-04-17 15:56:46.000000000 -0400
23991 @@ -800,7 +800,7 @@ static struct scsi_host_template it821x_
23992 ATA_BMDMA_SHT(DRV_NAME),
23993 };
23994
23995 -static struct ata_port_operations it821x_smart_port_ops = {
23996 +static const struct ata_port_operations it821x_smart_port_ops = {
23997 .inherits = &ata_bmdma_port_ops,
23998
23999 .check_atapi_dma= it821x_check_atapi_dma,
24000 @@ -814,7 +814,7 @@ static struct ata_port_operations it821x
24001 .port_start = it821x_port_start,
24002 };
24003
24004 -static struct ata_port_operations it821x_passthru_port_ops = {
24005 +static const struct ata_port_operations it821x_passthru_port_ops = {
24006 .inherits = &ata_bmdma_port_ops,
24007
24008 .check_atapi_dma= it821x_check_atapi_dma,
24009 @@ -830,7 +830,7 @@ static struct ata_port_operations it821x
24010 .port_start = it821x_port_start,
24011 };
24012
24013 -static struct ata_port_operations it821x_rdc_port_ops = {
24014 +static const struct ata_port_operations it821x_rdc_port_ops = {
24015 .inherits = &ata_bmdma_port_ops,
24016
24017 .check_atapi_dma= it821x_check_atapi_dma,
24018 diff -urNp linux-2.6.32.41/drivers/ata/pata_ixp4xx_cf.c linux-2.6.32.41/drivers/ata/pata_ixp4xx_cf.c
24019 --- linux-2.6.32.41/drivers/ata/pata_ixp4xx_cf.c 2011-03-27 14:31:47.000000000 -0400
24020 +++ linux-2.6.32.41/drivers/ata/pata_ixp4xx_cf.c 2011-04-17 15:56:46.000000000 -0400
24021 @@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_
24022 ATA_PIO_SHT(DRV_NAME),
24023 };
24024
24025 -static struct ata_port_operations ixp4xx_port_ops = {
24026 +static const struct ata_port_operations ixp4xx_port_ops = {
24027 .inherits = &ata_sff_port_ops,
24028 .sff_data_xfer = ixp4xx_mmio_data_xfer,
24029 .cable_detect = ata_cable_40wire,
24030 diff -urNp linux-2.6.32.41/drivers/ata/pata_jmicron.c linux-2.6.32.41/drivers/ata/pata_jmicron.c
24031 --- linux-2.6.32.41/drivers/ata/pata_jmicron.c 2011-03-27 14:31:47.000000000 -0400
24032 +++ linux-2.6.32.41/drivers/ata/pata_jmicron.c 2011-04-17 15:56:46.000000000 -0400
24033 @@ -111,7 +111,7 @@ static struct scsi_host_template jmicron
24034 ATA_BMDMA_SHT(DRV_NAME),
24035 };
24036
24037 -static struct ata_port_operations jmicron_ops = {
24038 +static const struct ata_port_operations jmicron_ops = {
24039 .inherits = &ata_bmdma_port_ops,
24040 .prereset = jmicron_pre_reset,
24041 };
24042 diff -urNp linux-2.6.32.41/drivers/ata/pata_legacy.c linux-2.6.32.41/drivers/ata/pata_legacy.c
24043 --- linux-2.6.32.41/drivers/ata/pata_legacy.c 2011-03-27 14:31:47.000000000 -0400
24044 +++ linux-2.6.32.41/drivers/ata/pata_legacy.c 2011-04-17 15:56:46.000000000 -0400
24045 @@ -106,7 +106,7 @@ struct legacy_probe {
24046
24047 struct legacy_controller {
24048 const char *name;
24049 - struct ata_port_operations *ops;
24050 + const struct ata_port_operations *ops;
24051 unsigned int pio_mask;
24052 unsigned int flags;
24053 unsigned int pflags;
24054 @@ -223,12 +223,12 @@ static const struct ata_port_operations
24055 * pio_mask as well.
24056 */
24057
24058 -static struct ata_port_operations simple_port_ops = {
24059 +static const struct ata_port_operations simple_port_ops = {
24060 .inherits = &legacy_base_port_ops,
24061 .sff_data_xfer = ata_sff_data_xfer_noirq,
24062 };
24063
24064 -static struct ata_port_operations legacy_port_ops = {
24065 +static const struct ata_port_operations legacy_port_ops = {
24066 .inherits = &legacy_base_port_ops,
24067 .sff_data_xfer = ata_sff_data_xfer_noirq,
24068 .set_mode = legacy_set_mode,
24069 @@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(st
24070 return buflen;
24071 }
24072
24073 -static struct ata_port_operations pdc20230_port_ops = {
24074 +static const struct ata_port_operations pdc20230_port_ops = {
24075 .inherits = &legacy_base_port_ops,
24076 .set_piomode = pdc20230_set_piomode,
24077 .sff_data_xfer = pdc_data_xfer_vlb,
24078 @@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct a
24079 ioread8(ap->ioaddr.status_addr);
24080 }
24081
24082 -static struct ata_port_operations ht6560a_port_ops = {
24083 +static const struct ata_port_operations ht6560a_port_ops = {
24084 .inherits = &legacy_base_port_ops,
24085 .set_piomode = ht6560a_set_piomode,
24086 };
24087 @@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct a
24088 ioread8(ap->ioaddr.status_addr);
24089 }
24090
24091 -static struct ata_port_operations ht6560b_port_ops = {
24092 +static const struct ata_port_operations ht6560b_port_ops = {
24093 .inherits = &legacy_base_port_ops,
24094 .set_piomode = ht6560b_set_piomode,
24095 };
24096 @@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(stru
24097 }
24098
24099
24100 -static struct ata_port_operations opti82c611a_port_ops = {
24101 +static const struct ata_port_operations opti82c611a_port_ops = {
24102 .inherits = &legacy_base_port_ops,
24103 .set_piomode = opti82c611a_set_piomode,
24104 };
24105 @@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(
24106 return ata_sff_qc_issue(qc);
24107 }
24108
24109 -static struct ata_port_operations opti82c46x_port_ops = {
24110 +static const struct ata_port_operations opti82c46x_port_ops = {
24111 .inherits = &legacy_base_port_ops,
24112 .set_piomode = opti82c46x_set_piomode,
24113 .qc_issue = opti82c46x_qc_issue,
24114 @@ -771,20 +771,20 @@ static int qdi_port(struct platform_devi
24115 return 0;
24116 }
24117
24118 -static struct ata_port_operations qdi6500_port_ops = {
24119 +static const struct ata_port_operations qdi6500_port_ops = {
24120 .inherits = &legacy_base_port_ops,
24121 .set_piomode = qdi6500_set_piomode,
24122 .qc_issue = qdi_qc_issue,
24123 .sff_data_xfer = vlb32_data_xfer,
24124 };
24125
24126 -static struct ata_port_operations qdi6580_port_ops = {
24127 +static const struct ata_port_operations qdi6580_port_ops = {
24128 .inherits = &legacy_base_port_ops,
24129 .set_piomode = qdi6580_set_piomode,
24130 .sff_data_xfer = vlb32_data_xfer,
24131 };
24132
24133 -static struct ata_port_operations qdi6580dp_port_ops = {
24134 +static const struct ata_port_operations qdi6580dp_port_ops = {
24135 .inherits = &legacy_base_port_ops,
24136 .set_piomode = qdi6580dp_set_piomode,
24137 .sff_data_xfer = vlb32_data_xfer,
24138 @@ -855,7 +855,7 @@ static int winbond_port(struct platform_
24139 return 0;
24140 }
24141
24142 -static struct ata_port_operations winbond_port_ops = {
24143 +static const struct ata_port_operations winbond_port_ops = {
24144 .inherits = &legacy_base_port_ops,
24145 .set_piomode = winbond_set_piomode,
24146 .sff_data_xfer = vlb32_data_xfer,
24147 @@ -978,7 +978,7 @@ static __init int legacy_init_one(struct
24148 int pio_modes = controller->pio_mask;
24149 unsigned long io = probe->port;
24150 u32 mask = (1 << probe->slot);
24151 - struct ata_port_operations *ops = controller->ops;
24152 + const struct ata_port_operations *ops = controller->ops;
24153 struct legacy_data *ld = &legacy_data[probe->slot];
24154 struct ata_host *host = NULL;
24155 struct ata_port *ap;
24156 diff -urNp linux-2.6.32.41/drivers/ata/pata_marvell.c linux-2.6.32.41/drivers/ata/pata_marvell.c
24157 --- linux-2.6.32.41/drivers/ata/pata_marvell.c 2011-03-27 14:31:47.000000000 -0400
24158 +++ linux-2.6.32.41/drivers/ata/pata_marvell.c 2011-04-17 15:56:46.000000000 -0400
24159 @@ -100,7 +100,7 @@ static struct scsi_host_template marvell
24160 ATA_BMDMA_SHT(DRV_NAME),
24161 };
24162
24163 -static struct ata_port_operations marvell_ops = {
24164 +static const struct ata_port_operations marvell_ops = {
24165 .inherits = &ata_bmdma_port_ops,
24166 .cable_detect = marvell_cable_detect,
24167 .prereset = marvell_pre_reset,
24168 diff -urNp linux-2.6.32.41/drivers/ata/pata_mpc52xx.c linux-2.6.32.41/drivers/ata/pata_mpc52xx.c
24169 --- linux-2.6.32.41/drivers/ata/pata_mpc52xx.c 2011-03-27 14:31:47.000000000 -0400
24170 +++ linux-2.6.32.41/drivers/ata/pata_mpc52xx.c 2011-04-17 15:56:46.000000000 -0400
24171 @@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx
24172 ATA_PIO_SHT(DRV_NAME),
24173 };
24174
24175 -static struct ata_port_operations mpc52xx_ata_port_ops = {
24176 +static const struct ata_port_operations mpc52xx_ata_port_ops = {
24177 .inherits = &ata_bmdma_port_ops,
24178 .sff_dev_select = mpc52xx_ata_dev_select,
24179 .set_piomode = mpc52xx_ata_set_piomode,
24180 diff -urNp linux-2.6.32.41/drivers/ata/pata_mpiix.c linux-2.6.32.41/drivers/ata/pata_mpiix.c
24181 --- linux-2.6.32.41/drivers/ata/pata_mpiix.c 2011-03-27 14:31:47.000000000 -0400
24182 +++ linux-2.6.32.41/drivers/ata/pata_mpiix.c 2011-04-17 15:56:46.000000000 -0400
24183 @@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_s
24184 ATA_PIO_SHT(DRV_NAME),
24185 };
24186
24187 -static struct ata_port_operations mpiix_port_ops = {
24188 +static const struct ata_port_operations mpiix_port_ops = {
24189 .inherits = &ata_sff_port_ops,
24190 .qc_issue = mpiix_qc_issue,
24191 .cable_detect = ata_cable_40wire,
24192 diff -urNp linux-2.6.32.41/drivers/ata/pata_netcell.c linux-2.6.32.41/drivers/ata/pata_netcell.c
24193 --- linux-2.6.32.41/drivers/ata/pata_netcell.c 2011-03-27 14:31:47.000000000 -0400
24194 +++ linux-2.6.32.41/drivers/ata/pata_netcell.c 2011-04-17 15:56:46.000000000 -0400
24195 @@ -34,7 +34,7 @@ static struct scsi_host_template netcell
24196 ATA_BMDMA_SHT(DRV_NAME),
24197 };
24198
24199 -static struct ata_port_operations netcell_ops = {
24200 +static const struct ata_port_operations netcell_ops = {
24201 .inherits = &ata_bmdma_port_ops,
24202 .cable_detect = ata_cable_80wire,
24203 .read_id = netcell_read_id,
24204 diff -urNp linux-2.6.32.41/drivers/ata/pata_ninja32.c linux-2.6.32.41/drivers/ata/pata_ninja32.c
24205 --- linux-2.6.32.41/drivers/ata/pata_ninja32.c 2011-03-27 14:31:47.000000000 -0400
24206 +++ linux-2.6.32.41/drivers/ata/pata_ninja32.c 2011-04-17 15:56:46.000000000 -0400
24207 @@ -81,7 +81,7 @@ static struct scsi_host_template ninja32
24208 ATA_BMDMA_SHT(DRV_NAME),
24209 };
24210
24211 -static struct ata_port_operations ninja32_port_ops = {
24212 +static const struct ata_port_operations ninja32_port_ops = {
24213 .inherits = &ata_bmdma_port_ops,
24214 .sff_dev_select = ninja32_dev_select,
24215 .cable_detect = ata_cable_40wire,
24216 diff -urNp linux-2.6.32.41/drivers/ata/pata_ns87410.c linux-2.6.32.41/drivers/ata/pata_ns87410.c
24217 --- linux-2.6.32.41/drivers/ata/pata_ns87410.c 2011-03-27 14:31:47.000000000 -0400
24218 +++ linux-2.6.32.41/drivers/ata/pata_ns87410.c 2011-04-17 15:56:46.000000000 -0400
24219 @@ -132,7 +132,7 @@ static struct scsi_host_template ns87410
24220 ATA_PIO_SHT(DRV_NAME),
24221 };
24222
24223 -static struct ata_port_operations ns87410_port_ops = {
24224 +static const struct ata_port_operations ns87410_port_ops = {
24225 .inherits = &ata_sff_port_ops,
24226 .qc_issue = ns87410_qc_issue,
24227 .cable_detect = ata_cable_40wire,
24228 diff -urNp linux-2.6.32.41/drivers/ata/pata_ns87415.c linux-2.6.32.41/drivers/ata/pata_ns87415.c
24229 --- linux-2.6.32.41/drivers/ata/pata_ns87415.c 2011-03-27 14:31:47.000000000 -0400
24230 +++ linux-2.6.32.41/drivers/ata/pata_ns87415.c 2011-04-17 15:56:46.000000000 -0400
24231 @@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct at
24232 }
24233 #endif /* 87560 SuperIO Support */
24234
24235 -static struct ata_port_operations ns87415_pata_ops = {
24236 +static const struct ata_port_operations ns87415_pata_ops = {
24237 .inherits = &ata_bmdma_port_ops,
24238
24239 .check_atapi_dma = ns87415_check_atapi_dma,
24240 @@ -313,7 +313,7 @@ static struct ata_port_operations ns8741
24241 };
24242
24243 #if defined(CONFIG_SUPERIO)
24244 -static struct ata_port_operations ns87560_pata_ops = {
24245 +static const struct ata_port_operations ns87560_pata_ops = {
24246 .inherits = &ns87415_pata_ops,
24247 .sff_tf_read = ns87560_tf_read,
24248 .sff_check_status = ns87560_check_status,
24249 diff -urNp linux-2.6.32.41/drivers/ata/pata_octeon_cf.c linux-2.6.32.41/drivers/ata/pata_octeon_cf.c
24250 --- linux-2.6.32.41/drivers/ata/pata_octeon_cf.c 2011-03-27 14:31:47.000000000 -0400
24251 +++ linux-2.6.32.41/drivers/ata/pata_octeon_cf.c 2011-04-17 15:56:46.000000000 -0400
24252 @@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(s
24253 return 0;
24254 }
24255
24256 +/* cannot be const */
24257 static struct ata_port_operations octeon_cf_ops = {
24258 .inherits = &ata_sff_port_ops,
24259 .check_atapi_dma = octeon_cf_check_atapi_dma,
24260 diff -urNp linux-2.6.32.41/drivers/ata/pata_oldpiix.c linux-2.6.32.41/drivers/ata/pata_oldpiix.c
24261 --- linux-2.6.32.41/drivers/ata/pata_oldpiix.c 2011-03-27 14:31:47.000000000 -0400
24262 +++ linux-2.6.32.41/drivers/ata/pata_oldpiix.c 2011-04-17 15:56:46.000000000 -0400
24263 @@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix
24264 ATA_BMDMA_SHT(DRV_NAME),
24265 };
24266
24267 -static struct ata_port_operations oldpiix_pata_ops = {
24268 +static const struct ata_port_operations oldpiix_pata_ops = {
24269 .inherits = &ata_bmdma_port_ops,
24270 .qc_issue = oldpiix_qc_issue,
24271 .cable_detect = ata_cable_40wire,
24272 diff -urNp linux-2.6.32.41/drivers/ata/pata_opti.c linux-2.6.32.41/drivers/ata/pata_opti.c
24273 --- linux-2.6.32.41/drivers/ata/pata_opti.c 2011-03-27 14:31:47.000000000 -0400
24274 +++ linux-2.6.32.41/drivers/ata/pata_opti.c 2011-04-17 15:56:46.000000000 -0400
24275 @@ -152,7 +152,7 @@ static struct scsi_host_template opti_sh
24276 ATA_PIO_SHT(DRV_NAME),
24277 };
24278
24279 -static struct ata_port_operations opti_port_ops = {
24280 +static const struct ata_port_operations opti_port_ops = {
24281 .inherits = &ata_sff_port_ops,
24282 .cable_detect = ata_cable_40wire,
24283 .set_piomode = opti_set_piomode,
24284 diff -urNp linux-2.6.32.41/drivers/ata/pata_optidma.c linux-2.6.32.41/drivers/ata/pata_optidma.c
24285 --- linux-2.6.32.41/drivers/ata/pata_optidma.c 2011-03-27 14:31:47.000000000 -0400
24286 +++ linux-2.6.32.41/drivers/ata/pata_optidma.c 2011-04-17 15:56:46.000000000 -0400
24287 @@ -337,7 +337,7 @@ static struct scsi_host_template optidma
24288 ATA_BMDMA_SHT(DRV_NAME),
24289 };
24290
24291 -static struct ata_port_operations optidma_port_ops = {
24292 +static const struct ata_port_operations optidma_port_ops = {
24293 .inherits = &ata_bmdma_port_ops,
24294 .cable_detect = ata_cable_40wire,
24295 .set_piomode = optidma_set_pio_mode,
24296 @@ -346,7 +346,7 @@ static struct ata_port_operations optidm
24297 .prereset = optidma_pre_reset,
24298 };
24299
24300 -static struct ata_port_operations optiplus_port_ops = {
24301 +static const struct ata_port_operations optiplus_port_ops = {
24302 .inherits = &optidma_port_ops,
24303 .set_piomode = optiplus_set_pio_mode,
24304 .set_dmamode = optiplus_set_dma_mode,
24305 diff -urNp linux-2.6.32.41/drivers/ata/pata_palmld.c linux-2.6.32.41/drivers/ata/pata_palmld.c
24306 --- linux-2.6.32.41/drivers/ata/pata_palmld.c 2011-03-27 14:31:47.000000000 -0400
24307 +++ linux-2.6.32.41/drivers/ata/pata_palmld.c 2011-04-17 15:56:46.000000000 -0400
24308 @@ -37,7 +37,7 @@ static struct scsi_host_template palmld_
24309 ATA_PIO_SHT(DRV_NAME),
24310 };
24311
24312 -static struct ata_port_operations palmld_port_ops = {
24313 +static const struct ata_port_operations palmld_port_ops = {
24314 .inherits = &ata_sff_port_ops,
24315 .sff_data_xfer = ata_sff_data_xfer_noirq,
24316 .cable_detect = ata_cable_40wire,
24317 diff -urNp linux-2.6.32.41/drivers/ata/pata_pcmcia.c linux-2.6.32.41/drivers/ata/pata_pcmcia.c
24318 --- linux-2.6.32.41/drivers/ata/pata_pcmcia.c 2011-03-27 14:31:47.000000000 -0400
24319 +++ linux-2.6.32.41/drivers/ata/pata_pcmcia.c 2011-04-17 15:56:46.000000000 -0400
24320 @@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_
24321 ATA_PIO_SHT(DRV_NAME),
24322 };
24323
24324 -static struct ata_port_operations pcmcia_port_ops = {
24325 +static const struct ata_port_operations pcmcia_port_ops = {
24326 .inherits = &ata_sff_port_ops,
24327 .sff_data_xfer = ata_sff_data_xfer_noirq,
24328 .cable_detect = ata_cable_40wire,
24329 .set_mode = pcmcia_set_mode,
24330 };
24331
24332 -static struct ata_port_operations pcmcia_8bit_port_ops = {
24333 +static const struct ata_port_operations pcmcia_8bit_port_ops = {
24334 .inherits = &ata_sff_port_ops,
24335 .sff_data_xfer = ata_data_xfer_8bit,
24336 .cable_detect = ata_cable_40wire,
24337 @@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia
24338 unsigned long io_base, ctl_base;
24339 void __iomem *io_addr, *ctl_addr;
24340 int n_ports = 1;
24341 - struct ata_port_operations *ops = &pcmcia_port_ops;
24342 + const struct ata_port_operations *ops = &pcmcia_port_ops;
24343
24344 info = kzalloc(sizeof(*info), GFP_KERNEL);
24345 if (info == NULL)
24346 diff -urNp linux-2.6.32.41/drivers/ata/pata_pdc2027x.c linux-2.6.32.41/drivers/ata/pata_pdc2027x.c
24347 --- linux-2.6.32.41/drivers/ata/pata_pdc2027x.c 2011-03-27 14:31:47.000000000 -0400
24348 +++ linux-2.6.32.41/drivers/ata/pata_pdc2027x.c 2011-04-17 15:56:46.000000000 -0400
24349 @@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027
24350 ATA_BMDMA_SHT(DRV_NAME),
24351 };
24352
24353 -static struct ata_port_operations pdc2027x_pata100_ops = {
24354 +static const struct ata_port_operations pdc2027x_pata100_ops = {
24355 .inherits = &ata_bmdma_port_ops,
24356 .check_atapi_dma = pdc2027x_check_atapi_dma,
24357 .cable_detect = pdc2027x_cable_detect,
24358 .prereset = pdc2027x_prereset,
24359 };
24360
24361 -static struct ata_port_operations pdc2027x_pata133_ops = {
24362 +static const struct ata_port_operations pdc2027x_pata133_ops = {
24363 .inherits = &pdc2027x_pata100_ops,
24364 .mode_filter = pdc2027x_mode_filter,
24365 .set_piomode = pdc2027x_set_piomode,
24366 diff -urNp linux-2.6.32.41/drivers/ata/pata_pdc202xx_old.c linux-2.6.32.41/drivers/ata/pata_pdc202xx_old.c
24367 --- linux-2.6.32.41/drivers/ata/pata_pdc202xx_old.c 2011-03-27 14:31:47.000000000 -0400
24368 +++ linux-2.6.32.41/drivers/ata/pata_pdc202xx_old.c 2011-04-17 15:56:46.000000000 -0400
24369 @@ -274,7 +274,7 @@ static struct scsi_host_template pdc202x
24370 ATA_BMDMA_SHT(DRV_NAME),
24371 };
24372
24373 -static struct ata_port_operations pdc2024x_port_ops = {
24374 +static const struct ata_port_operations pdc2024x_port_ops = {
24375 .inherits = &ata_bmdma_port_ops,
24376
24377 .cable_detect = ata_cable_40wire,
24378 @@ -284,7 +284,7 @@ static struct ata_port_operations pdc202
24379 .sff_exec_command = pdc202xx_exec_command,
24380 };
24381
24382 -static struct ata_port_operations pdc2026x_port_ops = {
24383 +static const struct ata_port_operations pdc2026x_port_ops = {
24384 .inherits = &pdc2024x_port_ops,
24385
24386 .check_atapi_dma = pdc2026x_check_atapi_dma,
24387 diff -urNp linux-2.6.32.41/drivers/ata/pata_platform.c linux-2.6.32.41/drivers/ata/pata_platform.c
24388 --- linux-2.6.32.41/drivers/ata/pata_platform.c 2011-03-27 14:31:47.000000000 -0400
24389 +++ linux-2.6.32.41/drivers/ata/pata_platform.c 2011-04-17 15:56:46.000000000 -0400
24390 @@ -48,7 +48,7 @@ static struct scsi_host_template pata_pl
24391 ATA_PIO_SHT(DRV_NAME),
24392 };
24393
24394 -static struct ata_port_operations pata_platform_port_ops = {
24395 +static const struct ata_port_operations pata_platform_port_ops = {
24396 .inherits = &ata_sff_port_ops,
24397 .sff_data_xfer = ata_sff_data_xfer_noirq,
24398 .cable_detect = ata_cable_unknown,
24399 diff -urNp linux-2.6.32.41/drivers/ata/pata_qdi.c linux-2.6.32.41/drivers/ata/pata_qdi.c
24400 --- linux-2.6.32.41/drivers/ata/pata_qdi.c 2011-03-27 14:31:47.000000000 -0400
24401 +++ linux-2.6.32.41/drivers/ata/pata_qdi.c 2011-04-17 15:56:46.000000000 -0400
24402 @@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht
24403 ATA_PIO_SHT(DRV_NAME),
24404 };
24405
24406 -static struct ata_port_operations qdi6500_port_ops = {
24407 +static const struct ata_port_operations qdi6500_port_ops = {
24408 .inherits = &ata_sff_port_ops,
24409 .qc_issue = qdi_qc_issue,
24410 .sff_data_xfer = qdi_data_xfer,
24411 @@ -165,7 +165,7 @@ static struct ata_port_operations qdi650
24412 .set_piomode = qdi6500_set_piomode,
24413 };
24414
24415 -static struct ata_port_operations qdi6580_port_ops = {
24416 +static const struct ata_port_operations qdi6580_port_ops = {
24417 .inherits = &qdi6500_port_ops,
24418 .set_piomode = qdi6580_set_piomode,
24419 };
24420 diff -urNp linux-2.6.32.41/drivers/ata/pata_radisys.c linux-2.6.32.41/drivers/ata/pata_radisys.c
24421 --- linux-2.6.32.41/drivers/ata/pata_radisys.c 2011-03-27 14:31:47.000000000 -0400
24422 +++ linux-2.6.32.41/drivers/ata/pata_radisys.c 2011-04-17 15:56:46.000000000 -0400
24423 @@ -187,7 +187,7 @@ static struct scsi_host_template radisys
24424 ATA_BMDMA_SHT(DRV_NAME),
24425 };
24426
24427 -static struct ata_port_operations radisys_pata_ops = {
24428 +static const struct ata_port_operations radisys_pata_ops = {
24429 .inherits = &ata_bmdma_port_ops,
24430 .qc_issue = radisys_qc_issue,
24431 .cable_detect = ata_cable_unknown,
24432 diff -urNp linux-2.6.32.41/drivers/ata/pata_rb532_cf.c linux-2.6.32.41/drivers/ata/pata_rb532_cf.c
24433 --- linux-2.6.32.41/drivers/ata/pata_rb532_cf.c 2011-03-27 14:31:47.000000000 -0400
24434 +++ linux-2.6.32.41/drivers/ata/pata_rb532_cf.c 2011-04-17 15:56:46.000000000 -0400
24435 @@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handle
24436 return IRQ_HANDLED;
24437 }
24438
24439 -static struct ata_port_operations rb532_pata_port_ops = {
24440 +static const struct ata_port_operations rb532_pata_port_ops = {
24441 .inherits = &ata_sff_port_ops,
24442 .sff_data_xfer = ata_sff_data_xfer32,
24443 };
24444 diff -urNp linux-2.6.32.41/drivers/ata/pata_rdc.c linux-2.6.32.41/drivers/ata/pata_rdc.c
24445 --- linux-2.6.32.41/drivers/ata/pata_rdc.c 2011-03-27 14:31:47.000000000 -0400
24446 +++ linux-2.6.32.41/drivers/ata/pata_rdc.c 2011-04-17 15:56:46.000000000 -0400
24447 @@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_p
24448 pci_write_config_byte(dev, 0x48, udma_enable);
24449 }
24450
24451 -static struct ata_port_operations rdc_pata_ops = {
24452 +static const struct ata_port_operations rdc_pata_ops = {
24453 .inherits = &ata_bmdma32_port_ops,
24454 .cable_detect = rdc_pata_cable_detect,
24455 .set_piomode = rdc_set_piomode,
24456 diff -urNp linux-2.6.32.41/drivers/ata/pata_rz1000.c linux-2.6.32.41/drivers/ata/pata_rz1000.c
24457 --- linux-2.6.32.41/drivers/ata/pata_rz1000.c 2011-03-27 14:31:47.000000000 -0400
24458 +++ linux-2.6.32.41/drivers/ata/pata_rz1000.c 2011-04-17 15:56:46.000000000 -0400
24459 @@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_
24460 ATA_PIO_SHT(DRV_NAME),
24461 };
24462
24463 -static struct ata_port_operations rz1000_port_ops = {
24464 +static const struct ata_port_operations rz1000_port_ops = {
24465 .inherits = &ata_sff_port_ops,
24466 .cable_detect = ata_cable_40wire,
24467 .set_mode = rz1000_set_mode,
24468 diff -urNp linux-2.6.32.41/drivers/ata/pata_sc1200.c linux-2.6.32.41/drivers/ata/pata_sc1200.c
24469 --- linux-2.6.32.41/drivers/ata/pata_sc1200.c 2011-03-27 14:31:47.000000000 -0400
24470 +++ linux-2.6.32.41/drivers/ata/pata_sc1200.c 2011-04-17 15:56:46.000000000 -0400
24471 @@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_
24472 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24473 };
24474
24475 -static struct ata_port_operations sc1200_port_ops = {
24476 +static const struct ata_port_operations sc1200_port_ops = {
24477 .inherits = &ata_bmdma_port_ops,
24478 .qc_prep = ata_sff_dumb_qc_prep,
24479 .qc_issue = sc1200_qc_issue,
24480 diff -urNp linux-2.6.32.41/drivers/ata/pata_scc.c linux-2.6.32.41/drivers/ata/pata_scc.c
24481 --- linux-2.6.32.41/drivers/ata/pata_scc.c 2011-03-27 14:31:47.000000000 -0400
24482 +++ linux-2.6.32.41/drivers/ata/pata_scc.c 2011-04-17 15:56:46.000000000 -0400
24483 @@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht
24484 ATA_BMDMA_SHT(DRV_NAME),
24485 };
24486
24487 -static struct ata_port_operations scc_pata_ops = {
24488 +static const struct ata_port_operations scc_pata_ops = {
24489 .inherits = &ata_bmdma_port_ops,
24490
24491 .set_piomode = scc_set_piomode,
24492 diff -urNp linux-2.6.32.41/drivers/ata/pata_sch.c linux-2.6.32.41/drivers/ata/pata_sch.c
24493 --- linux-2.6.32.41/drivers/ata/pata_sch.c 2011-03-27 14:31:47.000000000 -0400
24494 +++ linux-2.6.32.41/drivers/ata/pata_sch.c 2011-04-17 15:56:46.000000000 -0400
24495 @@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht
24496 ATA_BMDMA_SHT(DRV_NAME),
24497 };
24498
24499 -static struct ata_port_operations sch_pata_ops = {
24500 +static const struct ata_port_operations sch_pata_ops = {
24501 .inherits = &ata_bmdma_port_ops,
24502 .cable_detect = ata_cable_unknown,
24503 .set_piomode = sch_set_piomode,
24504 diff -urNp linux-2.6.32.41/drivers/ata/pata_serverworks.c linux-2.6.32.41/drivers/ata/pata_serverworks.c
24505 --- linux-2.6.32.41/drivers/ata/pata_serverworks.c 2011-03-27 14:31:47.000000000 -0400
24506 +++ linux-2.6.32.41/drivers/ata/pata_serverworks.c 2011-04-17 15:56:46.000000000 -0400
24507 @@ -299,7 +299,7 @@ static struct scsi_host_template serverw
24508 ATA_BMDMA_SHT(DRV_NAME),
24509 };
24510
24511 -static struct ata_port_operations serverworks_osb4_port_ops = {
24512 +static const struct ata_port_operations serverworks_osb4_port_ops = {
24513 .inherits = &ata_bmdma_port_ops,
24514 .cable_detect = serverworks_cable_detect,
24515 .mode_filter = serverworks_osb4_filter,
24516 @@ -307,7 +307,7 @@ static struct ata_port_operations server
24517 .set_dmamode = serverworks_set_dmamode,
24518 };
24519
24520 -static struct ata_port_operations serverworks_csb_port_ops = {
24521 +static const struct ata_port_operations serverworks_csb_port_ops = {
24522 .inherits = &serverworks_osb4_port_ops,
24523 .mode_filter = serverworks_csb_filter,
24524 };
24525 diff -urNp linux-2.6.32.41/drivers/ata/pata_sil680.c linux-2.6.32.41/drivers/ata/pata_sil680.c
24526 --- linux-2.6.32.41/drivers/ata/pata_sil680.c 2011-03-27 14:31:47.000000000 -0400
24527 +++ linux-2.6.32.41/drivers/ata/pata_sil680.c 2011-04-17 15:56:46.000000000 -0400
24528 @@ -194,7 +194,7 @@ static struct scsi_host_template sil680_
24529 ATA_BMDMA_SHT(DRV_NAME),
24530 };
24531
24532 -static struct ata_port_operations sil680_port_ops = {
24533 +static const struct ata_port_operations sil680_port_ops = {
24534 .inherits = &ata_bmdma32_port_ops,
24535 .cable_detect = sil680_cable_detect,
24536 .set_piomode = sil680_set_piomode,
24537 diff -urNp linux-2.6.32.41/drivers/ata/pata_sis.c linux-2.6.32.41/drivers/ata/pata_sis.c
24538 --- linux-2.6.32.41/drivers/ata/pata_sis.c 2011-03-27 14:31:47.000000000 -0400
24539 +++ linux-2.6.32.41/drivers/ata/pata_sis.c 2011-04-17 15:56:46.000000000 -0400
24540 @@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht
24541 ATA_BMDMA_SHT(DRV_NAME),
24542 };
24543
24544 -static struct ata_port_operations sis_133_for_sata_ops = {
24545 +static const struct ata_port_operations sis_133_for_sata_ops = {
24546 .inherits = &ata_bmdma_port_ops,
24547 .set_piomode = sis_133_set_piomode,
24548 .set_dmamode = sis_133_set_dmamode,
24549 .cable_detect = sis_133_cable_detect,
24550 };
24551
24552 -static struct ata_port_operations sis_base_ops = {
24553 +static const struct ata_port_operations sis_base_ops = {
24554 .inherits = &ata_bmdma_port_ops,
24555 .prereset = sis_pre_reset,
24556 };
24557
24558 -static struct ata_port_operations sis_133_ops = {
24559 +static const struct ata_port_operations sis_133_ops = {
24560 .inherits = &sis_base_ops,
24561 .set_piomode = sis_133_set_piomode,
24562 .set_dmamode = sis_133_set_dmamode,
24563 .cable_detect = sis_133_cable_detect,
24564 };
24565
24566 -static struct ata_port_operations sis_133_early_ops = {
24567 +static const struct ata_port_operations sis_133_early_ops = {
24568 .inherits = &sis_base_ops,
24569 .set_piomode = sis_100_set_piomode,
24570 .set_dmamode = sis_133_early_set_dmamode,
24571 .cable_detect = sis_66_cable_detect,
24572 };
24573
24574 -static struct ata_port_operations sis_100_ops = {
24575 +static const struct ata_port_operations sis_100_ops = {
24576 .inherits = &sis_base_ops,
24577 .set_piomode = sis_100_set_piomode,
24578 .set_dmamode = sis_100_set_dmamode,
24579 .cable_detect = sis_66_cable_detect,
24580 };
24581
24582 -static struct ata_port_operations sis_66_ops = {
24583 +static const struct ata_port_operations sis_66_ops = {
24584 .inherits = &sis_base_ops,
24585 .set_piomode = sis_old_set_piomode,
24586 .set_dmamode = sis_66_set_dmamode,
24587 .cable_detect = sis_66_cable_detect,
24588 };
24589
24590 -static struct ata_port_operations sis_old_ops = {
24591 +static const struct ata_port_operations sis_old_ops = {
24592 .inherits = &sis_base_ops,
24593 .set_piomode = sis_old_set_piomode,
24594 .set_dmamode = sis_old_set_dmamode,
24595 diff -urNp linux-2.6.32.41/drivers/ata/pata_sl82c105.c linux-2.6.32.41/drivers/ata/pata_sl82c105.c
24596 --- linux-2.6.32.41/drivers/ata/pata_sl82c105.c 2011-03-27 14:31:47.000000000 -0400
24597 +++ linux-2.6.32.41/drivers/ata/pata_sl82c105.c 2011-04-17 15:56:46.000000000 -0400
24598 @@ -231,7 +231,7 @@ static struct scsi_host_template sl82c10
24599 ATA_BMDMA_SHT(DRV_NAME),
24600 };
24601
24602 -static struct ata_port_operations sl82c105_port_ops = {
24603 +static const struct ata_port_operations sl82c105_port_ops = {
24604 .inherits = &ata_bmdma_port_ops,
24605 .qc_defer = sl82c105_qc_defer,
24606 .bmdma_start = sl82c105_bmdma_start,
24607 diff -urNp linux-2.6.32.41/drivers/ata/pata_triflex.c linux-2.6.32.41/drivers/ata/pata_triflex.c
24608 --- linux-2.6.32.41/drivers/ata/pata_triflex.c 2011-03-27 14:31:47.000000000 -0400
24609 +++ linux-2.6.32.41/drivers/ata/pata_triflex.c 2011-04-17 15:56:46.000000000 -0400
24610 @@ -178,7 +178,7 @@ static struct scsi_host_template triflex
24611 ATA_BMDMA_SHT(DRV_NAME),
24612 };
24613
24614 -static struct ata_port_operations triflex_port_ops = {
24615 +static const struct ata_port_operations triflex_port_ops = {
24616 .inherits = &ata_bmdma_port_ops,
24617 .bmdma_start = triflex_bmdma_start,
24618 .bmdma_stop = triflex_bmdma_stop,
24619 diff -urNp linux-2.6.32.41/drivers/ata/pata_via.c linux-2.6.32.41/drivers/ata/pata_via.c
24620 --- linux-2.6.32.41/drivers/ata/pata_via.c 2011-03-27 14:31:47.000000000 -0400
24621 +++ linux-2.6.32.41/drivers/ata/pata_via.c 2011-04-17 15:56:46.000000000 -0400
24622 @@ -419,7 +419,7 @@ static struct scsi_host_template via_sht
24623 ATA_BMDMA_SHT(DRV_NAME),
24624 };
24625
24626 -static struct ata_port_operations via_port_ops = {
24627 +static const struct ata_port_operations via_port_ops = {
24628 .inherits = &ata_bmdma_port_ops,
24629 .cable_detect = via_cable_detect,
24630 .set_piomode = via_set_piomode,
24631 @@ -429,7 +429,7 @@ static struct ata_port_operations via_po
24632 .port_start = via_port_start,
24633 };
24634
24635 -static struct ata_port_operations via_port_ops_noirq = {
24636 +static const struct ata_port_operations via_port_ops_noirq = {
24637 .inherits = &via_port_ops,
24638 .sff_data_xfer = ata_sff_data_xfer_noirq,
24639 };
24640 diff -urNp linux-2.6.32.41/drivers/ata/pata_winbond.c linux-2.6.32.41/drivers/ata/pata_winbond.c
24641 --- linux-2.6.32.41/drivers/ata/pata_winbond.c 2011-03-27 14:31:47.000000000 -0400
24642 +++ linux-2.6.32.41/drivers/ata/pata_winbond.c 2011-04-17 15:56:46.000000000 -0400
24643 @@ -125,7 +125,7 @@ static struct scsi_host_template winbond
24644 ATA_PIO_SHT(DRV_NAME),
24645 };
24646
24647 -static struct ata_port_operations winbond_port_ops = {
24648 +static const struct ata_port_operations winbond_port_ops = {
24649 .inherits = &ata_sff_port_ops,
24650 .sff_data_xfer = winbond_data_xfer,
24651 .cable_detect = ata_cable_40wire,
24652 diff -urNp linux-2.6.32.41/drivers/ata/pdc_adma.c linux-2.6.32.41/drivers/ata/pdc_adma.c
24653 --- linux-2.6.32.41/drivers/ata/pdc_adma.c 2011-03-27 14:31:47.000000000 -0400
24654 +++ linux-2.6.32.41/drivers/ata/pdc_adma.c 2011-04-17 15:56:46.000000000 -0400
24655 @@ -145,7 +145,7 @@ static struct scsi_host_template adma_at
24656 .dma_boundary = ADMA_DMA_BOUNDARY,
24657 };
24658
24659 -static struct ata_port_operations adma_ata_ops = {
24660 +static const struct ata_port_operations adma_ata_ops = {
24661 .inherits = &ata_sff_port_ops,
24662
24663 .lost_interrupt = ATA_OP_NULL,
24664 diff -urNp linux-2.6.32.41/drivers/ata/sata_fsl.c linux-2.6.32.41/drivers/ata/sata_fsl.c
24665 --- linux-2.6.32.41/drivers/ata/sata_fsl.c 2011-03-27 14:31:47.000000000 -0400
24666 +++ linux-2.6.32.41/drivers/ata/sata_fsl.c 2011-04-17 15:56:46.000000000 -0400
24667 @@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fs
24668 .dma_boundary = ATA_DMA_BOUNDARY,
24669 };
24670
24671 -static struct ata_port_operations sata_fsl_ops = {
24672 +static const struct ata_port_operations sata_fsl_ops = {
24673 .inherits = &sata_pmp_port_ops,
24674
24675 .qc_defer = ata_std_qc_defer,
24676 diff -urNp linux-2.6.32.41/drivers/ata/sata_inic162x.c linux-2.6.32.41/drivers/ata/sata_inic162x.c
24677 --- linux-2.6.32.41/drivers/ata/sata_inic162x.c 2011-03-27 14:31:47.000000000 -0400
24678 +++ linux-2.6.32.41/drivers/ata/sata_inic162x.c 2011-04-17 15:56:46.000000000 -0400
24679 @@ -721,7 +721,7 @@ static int inic_port_start(struct ata_po
24680 return 0;
24681 }
24682
24683 -static struct ata_port_operations inic_port_ops = {
24684 +static const struct ata_port_operations inic_port_ops = {
24685 .inherits = &sata_port_ops,
24686
24687 .check_atapi_dma = inic_check_atapi_dma,
24688 diff -urNp linux-2.6.32.41/drivers/ata/sata_mv.c linux-2.6.32.41/drivers/ata/sata_mv.c
24689 --- linux-2.6.32.41/drivers/ata/sata_mv.c 2011-03-27 14:31:47.000000000 -0400
24690 +++ linux-2.6.32.41/drivers/ata/sata_mv.c 2011-04-17 15:56:46.000000000 -0400
24691 @@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht
24692 .dma_boundary = MV_DMA_BOUNDARY,
24693 };
24694
24695 -static struct ata_port_operations mv5_ops = {
24696 +static const struct ata_port_operations mv5_ops = {
24697 .inherits = &ata_sff_port_ops,
24698
24699 .lost_interrupt = ATA_OP_NULL,
24700 @@ -678,7 +678,7 @@ static struct ata_port_operations mv5_op
24701 .port_stop = mv_port_stop,
24702 };
24703
24704 -static struct ata_port_operations mv6_ops = {
24705 +static const struct ata_port_operations mv6_ops = {
24706 .inherits = &mv5_ops,
24707 .dev_config = mv6_dev_config,
24708 .scr_read = mv_scr_read,
24709 @@ -698,7 +698,7 @@ static struct ata_port_operations mv6_op
24710 .bmdma_status = mv_bmdma_status,
24711 };
24712
24713 -static struct ata_port_operations mv_iie_ops = {
24714 +static const struct ata_port_operations mv_iie_ops = {
24715 .inherits = &mv6_ops,
24716 .dev_config = ATA_OP_NULL,
24717 .qc_prep = mv_qc_prep_iie,
24718 diff -urNp linux-2.6.32.41/drivers/ata/sata_nv.c linux-2.6.32.41/drivers/ata/sata_nv.c
24719 --- linux-2.6.32.41/drivers/ata/sata_nv.c 2011-03-27 14:31:47.000000000 -0400
24720 +++ linux-2.6.32.41/drivers/ata/sata_nv.c 2011-04-17 15:56:46.000000000 -0400
24721 @@ -464,7 +464,7 @@ static struct scsi_host_template nv_swnc
24722 * cases. Define nv_hardreset() which only kicks in for post-boot
24723 * probing and use it for all variants.
24724 */
24725 -static struct ata_port_operations nv_generic_ops = {
24726 +static const struct ata_port_operations nv_generic_ops = {
24727 .inherits = &ata_bmdma_port_ops,
24728 .lost_interrupt = ATA_OP_NULL,
24729 .scr_read = nv_scr_read,
24730 @@ -472,20 +472,20 @@ static struct ata_port_operations nv_gen
24731 .hardreset = nv_hardreset,
24732 };
24733
24734 -static struct ata_port_operations nv_nf2_ops = {
24735 +static const struct ata_port_operations nv_nf2_ops = {
24736 .inherits = &nv_generic_ops,
24737 .freeze = nv_nf2_freeze,
24738 .thaw = nv_nf2_thaw,
24739 };
24740
24741 -static struct ata_port_operations nv_ck804_ops = {
24742 +static const struct ata_port_operations nv_ck804_ops = {
24743 .inherits = &nv_generic_ops,
24744 .freeze = nv_ck804_freeze,
24745 .thaw = nv_ck804_thaw,
24746 .host_stop = nv_ck804_host_stop,
24747 };
24748
24749 -static struct ata_port_operations nv_adma_ops = {
24750 +static const struct ata_port_operations nv_adma_ops = {
24751 .inherits = &nv_ck804_ops,
24752
24753 .check_atapi_dma = nv_adma_check_atapi_dma,
24754 @@ -509,7 +509,7 @@ static struct ata_port_operations nv_adm
24755 .host_stop = nv_adma_host_stop,
24756 };
24757
24758 -static struct ata_port_operations nv_swncq_ops = {
24759 +static const struct ata_port_operations nv_swncq_ops = {
24760 .inherits = &nv_generic_ops,
24761
24762 .qc_defer = ata_std_qc_defer,
24763 diff -urNp linux-2.6.32.41/drivers/ata/sata_promise.c linux-2.6.32.41/drivers/ata/sata_promise.c
24764 --- linux-2.6.32.41/drivers/ata/sata_promise.c 2011-03-27 14:31:47.000000000 -0400
24765 +++ linux-2.6.32.41/drivers/ata/sata_promise.c 2011-04-17 15:56:46.000000000 -0400
24766 @@ -195,7 +195,7 @@ static const struct ata_port_operations
24767 .error_handler = pdc_error_handler,
24768 };
24769
24770 -static struct ata_port_operations pdc_sata_ops = {
24771 +static const struct ata_port_operations pdc_sata_ops = {
24772 .inherits = &pdc_common_ops,
24773 .cable_detect = pdc_sata_cable_detect,
24774 .freeze = pdc_sata_freeze,
24775 @@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sa
24776
24777 /* First-generation chips need a more restrictive ->check_atapi_dma op,
24778 and ->freeze/thaw that ignore the hotplug controls. */
24779 -static struct ata_port_operations pdc_old_sata_ops = {
24780 +static const struct ata_port_operations pdc_old_sata_ops = {
24781 .inherits = &pdc_sata_ops,
24782 .freeze = pdc_freeze,
24783 .thaw = pdc_thaw,
24784 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
24785 };
24786
24787 -static struct ata_port_operations pdc_pata_ops = {
24788 +static const struct ata_port_operations pdc_pata_ops = {
24789 .inherits = &pdc_common_ops,
24790 .cable_detect = pdc_pata_cable_detect,
24791 .freeze = pdc_freeze,
24792 diff -urNp linux-2.6.32.41/drivers/ata/sata_qstor.c linux-2.6.32.41/drivers/ata/sata_qstor.c
24793 --- linux-2.6.32.41/drivers/ata/sata_qstor.c 2011-03-27 14:31:47.000000000 -0400
24794 +++ linux-2.6.32.41/drivers/ata/sata_qstor.c 2011-04-17 15:56:46.000000000 -0400
24795 @@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_
24796 .dma_boundary = QS_DMA_BOUNDARY,
24797 };
24798
24799 -static struct ata_port_operations qs_ata_ops = {
24800 +static const struct ata_port_operations qs_ata_ops = {
24801 .inherits = &ata_sff_port_ops,
24802
24803 .check_atapi_dma = qs_check_atapi_dma,
24804 diff -urNp linux-2.6.32.41/drivers/ata/sata_sil24.c linux-2.6.32.41/drivers/ata/sata_sil24.c
24805 --- linux-2.6.32.41/drivers/ata/sata_sil24.c 2011-03-27 14:31:47.000000000 -0400
24806 +++ linux-2.6.32.41/drivers/ata/sata_sil24.c 2011-04-17 15:56:46.000000000 -0400
24807 @@ -388,7 +388,7 @@ static struct scsi_host_template sil24_s
24808 .dma_boundary = ATA_DMA_BOUNDARY,
24809 };
24810
24811 -static struct ata_port_operations sil24_ops = {
24812 +static const struct ata_port_operations sil24_ops = {
24813 .inherits = &sata_pmp_port_ops,
24814
24815 .qc_defer = sil24_qc_defer,
24816 diff -urNp linux-2.6.32.41/drivers/ata/sata_sil.c linux-2.6.32.41/drivers/ata/sata_sil.c
24817 --- linux-2.6.32.41/drivers/ata/sata_sil.c 2011-03-27 14:31:47.000000000 -0400
24818 +++ linux-2.6.32.41/drivers/ata/sata_sil.c 2011-04-17 15:56:46.000000000 -0400
24819 @@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht
24820 .sg_tablesize = ATA_MAX_PRD
24821 };
24822
24823 -static struct ata_port_operations sil_ops = {
24824 +static const struct ata_port_operations sil_ops = {
24825 .inherits = &ata_bmdma32_port_ops,
24826 .dev_config = sil_dev_config,
24827 .set_mode = sil_set_mode,
24828 diff -urNp linux-2.6.32.41/drivers/ata/sata_sis.c linux-2.6.32.41/drivers/ata/sata_sis.c
24829 --- linux-2.6.32.41/drivers/ata/sata_sis.c 2011-03-27 14:31:47.000000000 -0400
24830 +++ linux-2.6.32.41/drivers/ata/sata_sis.c 2011-04-17 15:56:46.000000000 -0400
24831 @@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht
24832 ATA_BMDMA_SHT(DRV_NAME),
24833 };
24834
24835 -static struct ata_port_operations sis_ops = {
24836 +static const struct ata_port_operations sis_ops = {
24837 .inherits = &ata_bmdma_port_ops,
24838 .scr_read = sis_scr_read,
24839 .scr_write = sis_scr_write,
24840 diff -urNp linux-2.6.32.41/drivers/ata/sata_svw.c linux-2.6.32.41/drivers/ata/sata_svw.c
24841 --- linux-2.6.32.41/drivers/ata/sata_svw.c 2011-03-27 14:31:47.000000000 -0400
24842 +++ linux-2.6.32.41/drivers/ata/sata_svw.c 2011-04-17 15:56:46.000000000 -0400
24843 @@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata
24844 };
24845
24846
24847 -static struct ata_port_operations k2_sata_ops = {
24848 +static const struct ata_port_operations k2_sata_ops = {
24849 .inherits = &ata_bmdma_port_ops,
24850 .sff_tf_load = k2_sata_tf_load,
24851 .sff_tf_read = k2_sata_tf_read,
24852 diff -urNp linux-2.6.32.41/drivers/ata/sata_sx4.c linux-2.6.32.41/drivers/ata/sata_sx4.c
24853 --- linux-2.6.32.41/drivers/ata/sata_sx4.c 2011-03-27 14:31:47.000000000 -0400
24854 +++ linux-2.6.32.41/drivers/ata/sata_sx4.c 2011-04-17 15:56:46.000000000 -0400
24855 @@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sat
24856 };
24857
24858 /* TODO: inherit from base port_ops after converting to new EH */
24859 -static struct ata_port_operations pdc_20621_ops = {
24860 +static const struct ata_port_operations pdc_20621_ops = {
24861 .inherits = &ata_sff_port_ops,
24862
24863 .check_atapi_dma = pdc_check_atapi_dma,
24864 diff -urNp linux-2.6.32.41/drivers/ata/sata_uli.c linux-2.6.32.41/drivers/ata/sata_uli.c
24865 --- linux-2.6.32.41/drivers/ata/sata_uli.c 2011-03-27 14:31:47.000000000 -0400
24866 +++ linux-2.6.32.41/drivers/ata/sata_uli.c 2011-04-17 15:56:46.000000000 -0400
24867 @@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht
24868 ATA_BMDMA_SHT(DRV_NAME),
24869 };
24870
24871 -static struct ata_port_operations uli_ops = {
24872 +static const struct ata_port_operations uli_ops = {
24873 .inherits = &ata_bmdma_port_ops,
24874 .scr_read = uli_scr_read,
24875 .scr_write = uli_scr_write,
24876 diff -urNp linux-2.6.32.41/drivers/ata/sata_via.c linux-2.6.32.41/drivers/ata/sata_via.c
24877 --- linux-2.6.32.41/drivers/ata/sata_via.c 2011-05-10 22:12:01.000000000 -0400
24878 +++ linux-2.6.32.41/drivers/ata/sata_via.c 2011-05-10 22:15:08.000000000 -0400
24879 @@ -115,32 +115,32 @@ static struct scsi_host_template svia_sh
24880 ATA_BMDMA_SHT(DRV_NAME),
24881 };
24882
24883 -static struct ata_port_operations svia_base_ops = {
24884 +static const struct ata_port_operations svia_base_ops = {
24885 .inherits = &ata_bmdma_port_ops,
24886 .sff_tf_load = svia_tf_load,
24887 };
24888
24889 -static struct ata_port_operations vt6420_sata_ops = {
24890 +static const struct ata_port_operations vt6420_sata_ops = {
24891 .inherits = &svia_base_ops,
24892 .freeze = svia_noop_freeze,
24893 .prereset = vt6420_prereset,
24894 .bmdma_start = vt6420_bmdma_start,
24895 };
24896
24897 -static struct ata_port_operations vt6421_pata_ops = {
24898 +static const struct ata_port_operations vt6421_pata_ops = {
24899 .inherits = &svia_base_ops,
24900 .cable_detect = vt6421_pata_cable_detect,
24901 .set_piomode = vt6421_set_pio_mode,
24902 .set_dmamode = vt6421_set_dma_mode,
24903 };
24904
24905 -static struct ata_port_operations vt6421_sata_ops = {
24906 +static const struct ata_port_operations vt6421_sata_ops = {
24907 .inherits = &svia_base_ops,
24908 .scr_read = svia_scr_read,
24909 .scr_write = svia_scr_write,
24910 };
24911
24912 -static struct ata_port_operations vt8251_ops = {
24913 +static const struct ata_port_operations vt8251_ops = {
24914 .inherits = &svia_base_ops,
24915 .hardreset = sata_std_hardreset,
24916 .scr_read = vt8251_scr_read,
24917 diff -urNp linux-2.6.32.41/drivers/ata/sata_vsc.c linux-2.6.32.41/drivers/ata/sata_vsc.c
24918 --- linux-2.6.32.41/drivers/ata/sata_vsc.c 2011-03-27 14:31:47.000000000 -0400
24919 +++ linux-2.6.32.41/drivers/ata/sata_vsc.c 2011-04-17 15:56:46.000000000 -0400
24920 @@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sat
24921 };
24922
24923
24924 -static struct ata_port_operations vsc_sata_ops = {
24925 +static const struct ata_port_operations vsc_sata_ops = {
24926 .inherits = &ata_bmdma_port_ops,
24927 /* The IRQ handling is not quite standard SFF behaviour so we
24928 cannot use the default lost interrupt handler */
24929 diff -urNp linux-2.6.32.41/drivers/atm/adummy.c linux-2.6.32.41/drivers/atm/adummy.c
24930 --- linux-2.6.32.41/drivers/atm/adummy.c 2011-03-27 14:31:47.000000000 -0400
24931 +++ linux-2.6.32.41/drivers/atm/adummy.c 2011-04-17 15:56:46.000000000 -0400
24932 @@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct
24933 vcc->pop(vcc, skb);
24934 else
24935 dev_kfree_skb_any(skb);
24936 - atomic_inc(&vcc->stats->tx);
24937 + atomic_inc_unchecked(&vcc->stats->tx);
24938
24939 return 0;
24940 }
24941 diff -urNp linux-2.6.32.41/drivers/atm/ambassador.c linux-2.6.32.41/drivers/atm/ambassador.c
24942 --- linux-2.6.32.41/drivers/atm/ambassador.c 2011-03-27 14:31:47.000000000 -0400
24943 +++ linux-2.6.32.41/drivers/atm/ambassador.c 2011-04-17 15:56:46.000000000 -0400
24944 @@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev,
24945 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
24946
24947 // VC layer stats
24948 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
24949 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
24950
24951 // free the descriptor
24952 kfree (tx_descr);
24953 @@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev,
24954 dump_skb ("<<<", vc, skb);
24955
24956 // VC layer stats
24957 - atomic_inc(&atm_vcc->stats->rx);
24958 + atomic_inc_unchecked(&atm_vcc->stats->rx);
24959 __net_timestamp(skb);
24960 // end of our responsability
24961 atm_vcc->push (atm_vcc, skb);
24962 @@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev,
24963 } else {
24964 PRINTK (KERN_INFO, "dropped over-size frame");
24965 // should we count this?
24966 - atomic_inc(&atm_vcc->stats->rx_drop);
24967 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
24968 }
24969
24970 } else {
24971 @@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * at
24972 }
24973
24974 if (check_area (skb->data, skb->len)) {
24975 - atomic_inc(&atm_vcc->stats->tx_err);
24976 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
24977 return -ENOMEM; // ?
24978 }
24979
24980 diff -urNp linux-2.6.32.41/drivers/atm/atmtcp.c linux-2.6.32.41/drivers/atm/atmtcp.c
24981 --- linux-2.6.32.41/drivers/atm/atmtcp.c 2011-03-27 14:31:47.000000000 -0400
24982 +++ linux-2.6.32.41/drivers/atm/atmtcp.c 2011-04-17 15:56:46.000000000 -0400
24983 @@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc
24984 if (vcc->pop) vcc->pop(vcc,skb);
24985 else dev_kfree_skb(skb);
24986 if (dev_data) return 0;
24987 - atomic_inc(&vcc->stats->tx_err);
24988 + atomic_inc_unchecked(&vcc->stats->tx_err);
24989 return -ENOLINK;
24990 }
24991 size = skb->len+sizeof(struct atmtcp_hdr);
24992 @@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc
24993 if (!new_skb) {
24994 if (vcc->pop) vcc->pop(vcc,skb);
24995 else dev_kfree_skb(skb);
24996 - atomic_inc(&vcc->stats->tx_err);
24997 + atomic_inc_unchecked(&vcc->stats->tx_err);
24998 return -ENOBUFS;
24999 }
25000 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
25001 @@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc
25002 if (vcc->pop) vcc->pop(vcc,skb);
25003 else dev_kfree_skb(skb);
25004 out_vcc->push(out_vcc,new_skb);
25005 - atomic_inc(&vcc->stats->tx);
25006 - atomic_inc(&out_vcc->stats->rx);
25007 + atomic_inc_unchecked(&vcc->stats->tx);
25008 + atomic_inc_unchecked(&out_vcc->stats->rx);
25009 return 0;
25010 }
25011
25012 @@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc
25013 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
25014 read_unlock(&vcc_sklist_lock);
25015 if (!out_vcc) {
25016 - atomic_inc(&vcc->stats->tx_err);
25017 + atomic_inc_unchecked(&vcc->stats->tx_err);
25018 goto done;
25019 }
25020 skb_pull(skb,sizeof(struct atmtcp_hdr));
25021 @@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc
25022 __net_timestamp(new_skb);
25023 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
25024 out_vcc->push(out_vcc,new_skb);
25025 - atomic_inc(&vcc->stats->tx);
25026 - atomic_inc(&out_vcc->stats->rx);
25027 + atomic_inc_unchecked(&vcc->stats->tx);
25028 + atomic_inc_unchecked(&out_vcc->stats->rx);
25029 done:
25030 if (vcc->pop) vcc->pop(vcc,skb);
25031 else dev_kfree_skb(skb);
25032 diff -urNp linux-2.6.32.41/drivers/atm/eni.c linux-2.6.32.41/drivers/atm/eni.c
25033 --- linux-2.6.32.41/drivers/atm/eni.c 2011-03-27 14:31:47.000000000 -0400
25034 +++ linux-2.6.32.41/drivers/atm/eni.c 2011-04-17 15:56:46.000000000 -0400
25035 @@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
25036 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
25037 vcc->dev->number);
25038 length = 0;
25039 - atomic_inc(&vcc->stats->rx_err);
25040 + atomic_inc_unchecked(&vcc->stats->rx_err);
25041 }
25042 else {
25043 length = ATM_CELL_SIZE-1; /* no HEC */
25044 @@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25045 size);
25046 }
25047 eff = length = 0;
25048 - atomic_inc(&vcc->stats->rx_err);
25049 + atomic_inc_unchecked(&vcc->stats->rx_err);
25050 }
25051 else {
25052 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
25053 @@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25054 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
25055 vcc->dev->number,vcc->vci,length,size << 2,descr);
25056 length = eff = 0;
25057 - atomic_inc(&vcc->stats->rx_err);
25058 + atomic_inc_unchecked(&vcc->stats->rx_err);
25059 }
25060 }
25061 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
25062 @@ -770,7 +770,7 @@ rx_dequeued++;
25063 vcc->push(vcc,skb);
25064 pushed++;
25065 }
25066 - atomic_inc(&vcc->stats->rx);
25067 + atomic_inc_unchecked(&vcc->stats->rx);
25068 }
25069 wake_up(&eni_dev->rx_wait);
25070 }
25071 @@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *d
25072 PCI_DMA_TODEVICE);
25073 if (vcc->pop) vcc->pop(vcc,skb);
25074 else dev_kfree_skb_irq(skb);
25075 - atomic_inc(&vcc->stats->tx);
25076 + atomic_inc_unchecked(&vcc->stats->tx);
25077 wake_up(&eni_dev->tx_wait);
25078 dma_complete++;
25079 }
25080 diff -urNp linux-2.6.32.41/drivers/atm/firestream.c linux-2.6.32.41/drivers/atm/firestream.c
25081 --- linux-2.6.32.41/drivers/atm/firestream.c 2011-03-27 14:31:47.000000000 -0400
25082 +++ linux-2.6.32.41/drivers/atm/firestream.c 2011-04-17 15:56:46.000000000 -0400
25083 @@ -748,7 +748,7 @@ static void process_txdone_queue (struct
25084 }
25085 }
25086
25087 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25088 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25089
25090 fs_dprintk (FS_DEBUG_TXMEM, "i");
25091 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
25092 @@ -815,7 +815,7 @@ static void process_incoming (struct fs_
25093 #endif
25094 skb_put (skb, qe->p1 & 0xffff);
25095 ATM_SKB(skb)->vcc = atm_vcc;
25096 - atomic_inc(&atm_vcc->stats->rx);
25097 + atomic_inc_unchecked(&atm_vcc->stats->rx);
25098 __net_timestamp(skb);
25099 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
25100 atm_vcc->push (atm_vcc, skb);
25101 @@ -836,12 +836,12 @@ static void process_incoming (struct fs_
25102 kfree (pe);
25103 }
25104 if (atm_vcc)
25105 - atomic_inc(&atm_vcc->stats->rx_drop);
25106 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25107 break;
25108 case 0x1f: /* Reassembly abort: no buffers. */
25109 /* Silently increment error counter. */
25110 if (atm_vcc)
25111 - atomic_inc(&atm_vcc->stats->rx_drop);
25112 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25113 break;
25114 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
25115 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
25116 diff -urNp linux-2.6.32.41/drivers/atm/fore200e.c linux-2.6.32.41/drivers/atm/fore200e.c
25117 --- linux-2.6.32.41/drivers/atm/fore200e.c 2011-03-27 14:31:47.000000000 -0400
25118 +++ linux-2.6.32.41/drivers/atm/fore200e.c 2011-04-17 15:56:46.000000000 -0400
25119 @@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200
25120 #endif
25121 /* check error condition */
25122 if (*entry->status & STATUS_ERROR)
25123 - atomic_inc(&vcc->stats->tx_err);
25124 + atomic_inc_unchecked(&vcc->stats->tx_err);
25125 else
25126 - atomic_inc(&vcc->stats->tx);
25127 + atomic_inc_unchecked(&vcc->stats->tx);
25128 }
25129 }
25130
25131 @@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore2
25132 if (skb == NULL) {
25133 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
25134
25135 - atomic_inc(&vcc->stats->rx_drop);
25136 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25137 return -ENOMEM;
25138 }
25139
25140 @@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore2
25141
25142 dev_kfree_skb_any(skb);
25143
25144 - atomic_inc(&vcc->stats->rx_drop);
25145 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25146 return -ENOMEM;
25147 }
25148
25149 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25150
25151 vcc->push(vcc, skb);
25152 - atomic_inc(&vcc->stats->rx);
25153 + atomic_inc_unchecked(&vcc->stats->rx);
25154
25155 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25156
25157 @@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200
25158 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
25159 fore200e->atm_dev->number,
25160 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
25161 - atomic_inc(&vcc->stats->rx_err);
25162 + atomic_inc_unchecked(&vcc->stats->rx_err);
25163 }
25164 }
25165
25166 @@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struc
25167 goto retry_here;
25168 }
25169
25170 - atomic_inc(&vcc->stats->tx_err);
25171 + atomic_inc_unchecked(&vcc->stats->tx_err);
25172
25173 fore200e->tx_sat++;
25174 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
25175 diff -urNp linux-2.6.32.41/drivers/atm/he.c linux-2.6.32.41/drivers/atm/he.c
25176 --- linux-2.6.32.41/drivers/atm/he.c 2011-03-27 14:31:47.000000000 -0400
25177 +++ linux-2.6.32.41/drivers/atm/he.c 2011-04-17 15:56:46.000000000 -0400
25178 @@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, i
25179
25180 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
25181 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
25182 - atomic_inc(&vcc->stats->rx_drop);
25183 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25184 goto return_host_buffers;
25185 }
25186
25187 @@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, i
25188 RBRQ_LEN_ERR(he_dev->rbrq_head)
25189 ? "LEN_ERR" : "",
25190 vcc->vpi, vcc->vci);
25191 - atomic_inc(&vcc->stats->rx_err);
25192 + atomic_inc_unchecked(&vcc->stats->rx_err);
25193 goto return_host_buffers;
25194 }
25195
25196 @@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, i
25197 vcc->push(vcc, skb);
25198 spin_lock(&he_dev->global_lock);
25199
25200 - atomic_inc(&vcc->stats->rx);
25201 + atomic_inc_unchecked(&vcc->stats->rx);
25202
25203 return_host_buffers:
25204 ++pdus_assembled;
25205 @@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
25206 tpd->vcc->pop(tpd->vcc, tpd->skb);
25207 else
25208 dev_kfree_skb_any(tpd->skb);
25209 - atomic_inc(&tpd->vcc->stats->tx_err);
25210 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
25211 }
25212 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
25213 return;
25214 @@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25215 vcc->pop(vcc, skb);
25216 else
25217 dev_kfree_skb_any(skb);
25218 - atomic_inc(&vcc->stats->tx_err);
25219 + atomic_inc_unchecked(&vcc->stats->tx_err);
25220 return -EINVAL;
25221 }
25222
25223 @@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25224 vcc->pop(vcc, skb);
25225 else
25226 dev_kfree_skb_any(skb);
25227 - atomic_inc(&vcc->stats->tx_err);
25228 + atomic_inc_unchecked(&vcc->stats->tx_err);
25229 return -EINVAL;
25230 }
25231 #endif
25232 @@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25233 vcc->pop(vcc, skb);
25234 else
25235 dev_kfree_skb_any(skb);
25236 - atomic_inc(&vcc->stats->tx_err);
25237 + atomic_inc_unchecked(&vcc->stats->tx_err);
25238 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25239 return -ENOMEM;
25240 }
25241 @@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25242 vcc->pop(vcc, skb);
25243 else
25244 dev_kfree_skb_any(skb);
25245 - atomic_inc(&vcc->stats->tx_err);
25246 + atomic_inc_unchecked(&vcc->stats->tx_err);
25247 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25248 return -ENOMEM;
25249 }
25250 @@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25251 __enqueue_tpd(he_dev, tpd, cid);
25252 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25253
25254 - atomic_inc(&vcc->stats->tx);
25255 + atomic_inc_unchecked(&vcc->stats->tx);
25256
25257 return 0;
25258 }
25259 diff -urNp linux-2.6.32.41/drivers/atm/horizon.c linux-2.6.32.41/drivers/atm/horizon.c
25260 --- linux-2.6.32.41/drivers/atm/horizon.c 2011-03-27 14:31:47.000000000 -0400
25261 +++ linux-2.6.32.41/drivers/atm/horizon.c 2011-04-17 15:56:46.000000000 -0400
25262 @@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev,
25263 {
25264 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
25265 // VC layer stats
25266 - atomic_inc(&vcc->stats->rx);
25267 + atomic_inc_unchecked(&vcc->stats->rx);
25268 __net_timestamp(skb);
25269 // end of our responsability
25270 vcc->push (vcc, skb);
25271 @@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const
25272 dev->tx_iovec = NULL;
25273
25274 // VC layer stats
25275 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25276 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25277
25278 // free the skb
25279 hrz_kfree_skb (skb);
25280 diff -urNp linux-2.6.32.41/drivers/atm/idt77252.c linux-2.6.32.41/drivers/atm/idt77252.c
25281 --- linux-2.6.32.41/drivers/atm/idt77252.c 2011-03-27 14:31:47.000000000 -0400
25282 +++ linux-2.6.32.41/drivers/atm/idt77252.c 2011-04-17 15:56:46.000000000 -0400
25283 @@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, str
25284 else
25285 dev_kfree_skb(skb);
25286
25287 - atomic_inc(&vcc->stats->tx);
25288 + atomic_inc_unchecked(&vcc->stats->tx);
25289 }
25290
25291 atomic_dec(&scq->used);
25292 @@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, st
25293 if ((sb = dev_alloc_skb(64)) == NULL) {
25294 printk("%s: Can't allocate buffers for aal0.\n",
25295 card->name);
25296 - atomic_add(i, &vcc->stats->rx_drop);
25297 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
25298 break;
25299 }
25300 if (!atm_charge(vcc, sb->truesize)) {
25301 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
25302 card->name);
25303 - atomic_add(i - 1, &vcc->stats->rx_drop);
25304 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
25305 dev_kfree_skb(sb);
25306 break;
25307 }
25308 @@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, st
25309 ATM_SKB(sb)->vcc = vcc;
25310 __net_timestamp(sb);
25311 vcc->push(vcc, sb);
25312 - atomic_inc(&vcc->stats->rx);
25313 + atomic_inc_unchecked(&vcc->stats->rx);
25314
25315 cell += ATM_CELL_PAYLOAD;
25316 }
25317 @@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, st
25318 "(CDC: %08x)\n",
25319 card->name, len, rpp->len, readl(SAR_REG_CDC));
25320 recycle_rx_pool_skb(card, rpp);
25321 - atomic_inc(&vcc->stats->rx_err);
25322 + atomic_inc_unchecked(&vcc->stats->rx_err);
25323 return;
25324 }
25325 if (stat & SAR_RSQE_CRC) {
25326 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
25327 recycle_rx_pool_skb(card, rpp);
25328 - atomic_inc(&vcc->stats->rx_err);
25329 + atomic_inc_unchecked(&vcc->stats->rx_err);
25330 return;
25331 }
25332 if (skb_queue_len(&rpp->queue) > 1) {
25333 @@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, st
25334 RXPRINTK("%s: Can't alloc RX skb.\n",
25335 card->name);
25336 recycle_rx_pool_skb(card, rpp);
25337 - atomic_inc(&vcc->stats->rx_err);
25338 + atomic_inc_unchecked(&vcc->stats->rx_err);
25339 return;
25340 }
25341 if (!atm_charge(vcc, skb->truesize)) {
25342 @@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, st
25343 __net_timestamp(skb);
25344
25345 vcc->push(vcc, skb);
25346 - atomic_inc(&vcc->stats->rx);
25347 + atomic_inc_unchecked(&vcc->stats->rx);
25348
25349 return;
25350 }
25351 @@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, st
25352 __net_timestamp(skb);
25353
25354 vcc->push(vcc, skb);
25355 - atomic_inc(&vcc->stats->rx);
25356 + atomic_inc_unchecked(&vcc->stats->rx);
25357
25358 if (skb->truesize > SAR_FB_SIZE_3)
25359 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
25360 @@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
25361 if (vcc->qos.aal != ATM_AAL0) {
25362 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
25363 card->name, vpi, vci);
25364 - atomic_inc(&vcc->stats->rx_drop);
25365 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25366 goto drop;
25367 }
25368
25369 if ((sb = dev_alloc_skb(64)) == NULL) {
25370 printk("%s: Can't allocate buffers for AAL0.\n",
25371 card->name);
25372 - atomic_inc(&vcc->stats->rx_err);
25373 + atomic_inc_unchecked(&vcc->stats->rx_err);
25374 goto drop;
25375 }
25376
25377 @@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
25378 ATM_SKB(sb)->vcc = vcc;
25379 __net_timestamp(sb);
25380 vcc->push(vcc, sb);
25381 - atomic_inc(&vcc->stats->rx);
25382 + atomic_inc_unchecked(&vcc->stats->rx);
25383
25384 drop:
25385 skb_pull(queue, 64);
25386 @@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
25387
25388 if (vc == NULL) {
25389 printk("%s: NULL connection in send().\n", card->name);
25390 - atomic_inc(&vcc->stats->tx_err);
25391 + atomic_inc_unchecked(&vcc->stats->tx_err);
25392 dev_kfree_skb(skb);
25393 return -EINVAL;
25394 }
25395 if (!test_bit(VCF_TX, &vc->flags)) {
25396 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
25397 - atomic_inc(&vcc->stats->tx_err);
25398 + atomic_inc_unchecked(&vcc->stats->tx_err);
25399 dev_kfree_skb(skb);
25400 return -EINVAL;
25401 }
25402 @@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
25403 break;
25404 default:
25405 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
25406 - atomic_inc(&vcc->stats->tx_err);
25407 + atomic_inc_unchecked(&vcc->stats->tx_err);
25408 dev_kfree_skb(skb);
25409 return -EINVAL;
25410 }
25411
25412 if (skb_shinfo(skb)->nr_frags != 0) {
25413 printk("%s: No scatter-gather yet.\n", card->name);
25414 - atomic_inc(&vcc->stats->tx_err);
25415 + atomic_inc_unchecked(&vcc->stats->tx_err);
25416 dev_kfree_skb(skb);
25417 return -EINVAL;
25418 }
25419 @@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
25420
25421 err = queue_skb(card, vc, skb, oam);
25422 if (err) {
25423 - atomic_inc(&vcc->stats->tx_err);
25424 + atomic_inc_unchecked(&vcc->stats->tx_err);
25425 dev_kfree_skb(skb);
25426 return err;
25427 }
25428 @@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
25429 skb = dev_alloc_skb(64);
25430 if (!skb) {
25431 printk("%s: Out of memory in send_oam().\n", card->name);
25432 - atomic_inc(&vcc->stats->tx_err);
25433 + atomic_inc_unchecked(&vcc->stats->tx_err);
25434 return -ENOMEM;
25435 }
25436 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
25437 diff -urNp linux-2.6.32.41/drivers/atm/iphase.c linux-2.6.32.41/drivers/atm/iphase.c
25438 --- linux-2.6.32.41/drivers/atm/iphase.c 2011-03-27 14:31:47.000000000 -0400
25439 +++ linux-2.6.32.41/drivers/atm/iphase.c 2011-04-17 15:56:46.000000000 -0400
25440 @@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
25441 status = (u_short) (buf_desc_ptr->desc_mode);
25442 if (status & (RX_CER | RX_PTE | RX_OFL))
25443 {
25444 - atomic_inc(&vcc->stats->rx_err);
25445 + atomic_inc_unchecked(&vcc->stats->rx_err);
25446 IF_ERR(printk("IA: bad packet, dropping it");)
25447 if (status & RX_CER) {
25448 IF_ERR(printk(" cause: packet CRC error\n");)
25449 @@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
25450 len = dma_addr - buf_addr;
25451 if (len > iadev->rx_buf_sz) {
25452 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
25453 - atomic_inc(&vcc->stats->rx_err);
25454 + atomic_inc_unchecked(&vcc->stats->rx_err);
25455 goto out_free_desc;
25456 }
25457
25458 @@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *
25459 ia_vcc = INPH_IA_VCC(vcc);
25460 if (ia_vcc == NULL)
25461 {
25462 - atomic_inc(&vcc->stats->rx_err);
25463 + atomic_inc_unchecked(&vcc->stats->rx_err);
25464 dev_kfree_skb_any(skb);
25465 atm_return(vcc, atm_guess_pdu2truesize(len));
25466 goto INCR_DLE;
25467 @@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *
25468 if ((length > iadev->rx_buf_sz) || (length >
25469 (skb->len - sizeof(struct cpcs_trailer))))
25470 {
25471 - atomic_inc(&vcc->stats->rx_err);
25472 + atomic_inc_unchecked(&vcc->stats->rx_err);
25473 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
25474 length, skb->len);)
25475 dev_kfree_skb_any(skb);
25476 @@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *
25477
25478 IF_RX(printk("rx_dle_intr: skb push");)
25479 vcc->push(vcc,skb);
25480 - atomic_inc(&vcc->stats->rx);
25481 + atomic_inc_unchecked(&vcc->stats->rx);
25482 iadev->rx_pkt_cnt++;
25483 }
25484 INCR_DLE:
25485 @@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev,
25486 {
25487 struct k_sonet_stats *stats;
25488 stats = &PRIV(_ia_dev[board])->sonet_stats;
25489 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
25490 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
25491 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
25492 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
25493 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
25494 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
25495 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
25496 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
25497 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
25498 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
25499 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
25500 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
25501 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
25502 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
25503 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
25504 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
25505 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
25506 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
25507 }
25508 ia_cmds.status = 0;
25509 break;
25510 @@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
25511 if ((desc == 0) || (desc > iadev->num_tx_desc))
25512 {
25513 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
25514 - atomic_inc(&vcc->stats->tx);
25515 + atomic_inc_unchecked(&vcc->stats->tx);
25516 if (vcc->pop)
25517 vcc->pop(vcc, skb);
25518 else
25519 @@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
25520 ATM_DESC(skb) = vcc->vci;
25521 skb_queue_tail(&iadev->tx_dma_q, skb);
25522
25523 - atomic_inc(&vcc->stats->tx);
25524 + atomic_inc_unchecked(&vcc->stats->tx);
25525 iadev->tx_pkt_cnt++;
25526 /* Increment transaction counter */
25527 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
25528
25529 #if 0
25530 /* add flow control logic */
25531 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
25532 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
25533 if (iavcc->vc_desc_cnt > 10) {
25534 vcc->tx_quota = vcc->tx_quota * 3 / 4;
25535 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
25536 diff -urNp linux-2.6.32.41/drivers/atm/lanai.c linux-2.6.32.41/drivers/atm/lanai.c
25537 --- linux-2.6.32.41/drivers/atm/lanai.c 2011-03-27 14:31:47.000000000 -0400
25538 +++ linux-2.6.32.41/drivers/atm/lanai.c 2011-04-17 15:56:46.000000000 -0400
25539 @@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct l
25540 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
25541 lanai_endtx(lanai, lvcc);
25542 lanai_free_skb(lvcc->tx.atmvcc, skb);
25543 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
25544 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
25545 }
25546
25547 /* Try to fill the buffer - don't call unless there is backlog */
25548 @@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc
25549 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
25550 __net_timestamp(skb);
25551 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
25552 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
25553 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
25554 out:
25555 lvcc->rx.buf.ptr = end;
25556 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
25557 @@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_d
25558 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
25559 "vcc %d\n", lanai->number, (unsigned int) s, vci);
25560 lanai->stats.service_rxnotaal5++;
25561 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25562 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25563 return 0;
25564 }
25565 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
25566 @@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_d
25567 int bytes;
25568 read_unlock(&vcc_sklist_lock);
25569 DPRINTK("got trashed rx pdu on vci %d\n", vci);
25570 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25571 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25572 lvcc->stats.x.aal5.service_trash++;
25573 bytes = (SERVICE_GET_END(s) * 16) -
25574 (((unsigned long) lvcc->rx.buf.ptr) -
25575 @@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_d
25576 }
25577 if (s & SERVICE_STREAM) {
25578 read_unlock(&vcc_sklist_lock);
25579 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25580 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25581 lvcc->stats.x.aal5.service_stream++;
25582 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
25583 "PDU on VCI %d!\n", lanai->number, vci);
25584 @@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_d
25585 return 0;
25586 }
25587 DPRINTK("got rx crc error on vci %d\n", vci);
25588 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25589 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25590 lvcc->stats.x.aal5.service_rxcrc++;
25591 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
25592 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
25593 diff -urNp linux-2.6.32.41/drivers/atm/nicstar.c linux-2.6.32.41/drivers/atm/nicstar.c
25594 --- linux-2.6.32.41/drivers/atm/nicstar.c 2011-03-27 14:31:47.000000000 -0400
25595 +++ linux-2.6.32.41/drivers/atm/nicstar.c 2011-04-17 15:56:46.000000000 -0400
25596 @@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc,
25597 if ((vc = (vc_map *) vcc->dev_data) == NULL)
25598 {
25599 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
25600 - atomic_inc(&vcc->stats->tx_err);
25601 + atomic_inc_unchecked(&vcc->stats->tx_err);
25602 dev_kfree_skb_any(skb);
25603 return -EINVAL;
25604 }
25605 @@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc,
25606 if (!vc->tx)
25607 {
25608 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
25609 - atomic_inc(&vcc->stats->tx_err);
25610 + atomic_inc_unchecked(&vcc->stats->tx_err);
25611 dev_kfree_skb_any(skb);
25612 return -EINVAL;
25613 }
25614 @@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc,
25615 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
25616 {
25617 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
25618 - atomic_inc(&vcc->stats->tx_err);
25619 + atomic_inc_unchecked(&vcc->stats->tx_err);
25620 dev_kfree_skb_any(skb);
25621 return -EINVAL;
25622 }
25623 @@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc,
25624 if (skb_shinfo(skb)->nr_frags != 0)
25625 {
25626 printk("nicstar%d: No scatter-gather yet.\n", card->index);
25627 - atomic_inc(&vcc->stats->tx_err);
25628 + atomic_inc_unchecked(&vcc->stats->tx_err);
25629 dev_kfree_skb_any(skb);
25630 return -EINVAL;
25631 }
25632 @@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc,
25633
25634 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
25635 {
25636 - atomic_inc(&vcc->stats->tx_err);
25637 + atomic_inc_unchecked(&vcc->stats->tx_err);
25638 dev_kfree_skb_any(skb);
25639 return -EIO;
25640 }
25641 - atomic_inc(&vcc->stats->tx);
25642 + atomic_inc_unchecked(&vcc->stats->tx);
25643
25644 return 0;
25645 }
25646 @@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_
25647 {
25648 printk("nicstar%d: Can't allocate buffers for aal0.\n",
25649 card->index);
25650 - atomic_add(i,&vcc->stats->rx_drop);
25651 + atomic_add_unchecked(i,&vcc->stats->rx_drop);
25652 break;
25653 }
25654 if (!atm_charge(vcc, sb->truesize))
25655 {
25656 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
25657 card->index);
25658 - atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
25659 + atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
25660 dev_kfree_skb_any(sb);
25661 break;
25662 }
25663 @@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_
25664 ATM_SKB(sb)->vcc = vcc;
25665 __net_timestamp(sb);
25666 vcc->push(vcc, sb);
25667 - atomic_inc(&vcc->stats->rx);
25668 + atomic_inc_unchecked(&vcc->stats->rx);
25669 cell += ATM_CELL_PAYLOAD;
25670 }
25671
25672 @@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_
25673 if (iovb == NULL)
25674 {
25675 printk("nicstar%d: Out of iovec buffers.\n", card->index);
25676 - atomic_inc(&vcc->stats->rx_drop);
25677 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25678 recycle_rx_buf(card, skb);
25679 return;
25680 }
25681 @@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_
25682 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
25683 {
25684 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
25685 - atomic_inc(&vcc->stats->rx_err);
25686 + atomic_inc_unchecked(&vcc->stats->rx_err);
25687 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
25688 NS_SKB(iovb)->iovcnt = 0;
25689 iovb->len = 0;
25690 @@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_
25691 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
25692 card->index);
25693 which_list(card, skb);
25694 - atomic_inc(&vcc->stats->rx_err);
25695 + atomic_inc_unchecked(&vcc->stats->rx_err);
25696 recycle_rx_buf(card, skb);
25697 vc->rx_iov = NULL;
25698 recycle_iov_buf(card, iovb);
25699 @@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_
25700 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
25701 card->index);
25702 which_list(card, skb);
25703 - atomic_inc(&vcc->stats->rx_err);
25704 + atomic_inc_unchecked(&vcc->stats->rx_err);
25705 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
25706 NS_SKB(iovb)->iovcnt);
25707 vc->rx_iov = NULL;
25708 @@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_
25709 printk(" - PDU size mismatch.\n");
25710 else
25711 printk(".\n");
25712 - atomic_inc(&vcc->stats->rx_err);
25713 + atomic_inc_unchecked(&vcc->stats->rx_err);
25714 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
25715 NS_SKB(iovb)->iovcnt);
25716 vc->rx_iov = NULL;
25717 @@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_
25718 if (!atm_charge(vcc, skb->truesize))
25719 {
25720 push_rxbufs(card, skb);
25721 - atomic_inc(&vcc->stats->rx_drop);
25722 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25723 }
25724 else
25725 {
25726 @@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_
25727 ATM_SKB(skb)->vcc = vcc;
25728 __net_timestamp(skb);
25729 vcc->push(vcc, skb);
25730 - atomic_inc(&vcc->stats->rx);
25731 + atomic_inc_unchecked(&vcc->stats->rx);
25732 }
25733 }
25734 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
25735 @@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_
25736 if (!atm_charge(vcc, sb->truesize))
25737 {
25738 push_rxbufs(card, sb);
25739 - atomic_inc(&vcc->stats->rx_drop);
25740 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25741 }
25742 else
25743 {
25744 @@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_
25745 ATM_SKB(sb)->vcc = vcc;
25746 __net_timestamp(sb);
25747 vcc->push(vcc, sb);
25748 - atomic_inc(&vcc->stats->rx);
25749 + atomic_inc_unchecked(&vcc->stats->rx);
25750 }
25751
25752 push_rxbufs(card, skb);
25753 @@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_
25754 if (!atm_charge(vcc, skb->truesize))
25755 {
25756 push_rxbufs(card, skb);
25757 - atomic_inc(&vcc->stats->rx_drop);
25758 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25759 }
25760 else
25761 {
25762 @@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_
25763 ATM_SKB(skb)->vcc = vcc;
25764 __net_timestamp(skb);
25765 vcc->push(vcc, skb);
25766 - atomic_inc(&vcc->stats->rx);
25767 + atomic_inc_unchecked(&vcc->stats->rx);
25768 }
25769
25770 push_rxbufs(card, sb);
25771 @@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_
25772 if (hb == NULL)
25773 {
25774 printk("nicstar%d: Out of huge buffers.\n", card->index);
25775 - atomic_inc(&vcc->stats->rx_drop);
25776 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25777 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
25778 NS_SKB(iovb)->iovcnt);
25779 vc->rx_iov = NULL;
25780 @@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_
25781 }
25782 else
25783 dev_kfree_skb_any(hb);
25784 - atomic_inc(&vcc->stats->rx_drop);
25785 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25786 }
25787 else
25788 {
25789 @@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_
25790 #endif /* NS_USE_DESTRUCTORS */
25791 __net_timestamp(hb);
25792 vcc->push(vcc, hb);
25793 - atomic_inc(&vcc->stats->rx);
25794 + atomic_inc_unchecked(&vcc->stats->rx);
25795 }
25796 }
25797
25798 diff -urNp linux-2.6.32.41/drivers/atm/solos-pci.c linux-2.6.32.41/drivers/atm/solos-pci.c
25799 --- linux-2.6.32.41/drivers/atm/solos-pci.c 2011-04-17 17:00:52.000000000 -0400
25800 +++ linux-2.6.32.41/drivers/atm/solos-pci.c 2011-05-16 21:46:57.000000000 -0400
25801 @@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
25802 }
25803 atm_charge(vcc, skb->truesize);
25804 vcc->push(vcc, skb);
25805 - atomic_inc(&vcc->stats->rx);
25806 + atomic_inc_unchecked(&vcc->stats->rx);
25807 break;
25808
25809 case PKT_STATUS:
25810 @@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *
25811 char msg[500];
25812 char item[10];
25813
25814 + pax_track_stack();
25815 +
25816 len = buf->len;
25817 for (i = 0; i < len; i++){
25818 if(i % 8 == 0)
25819 @@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_car
25820 vcc = SKB_CB(oldskb)->vcc;
25821
25822 if (vcc) {
25823 - atomic_inc(&vcc->stats->tx);
25824 + atomic_inc_unchecked(&vcc->stats->tx);
25825 solos_pop(vcc, oldskb);
25826 } else
25827 dev_kfree_skb_irq(oldskb);
25828 diff -urNp linux-2.6.32.41/drivers/atm/suni.c linux-2.6.32.41/drivers/atm/suni.c
25829 --- linux-2.6.32.41/drivers/atm/suni.c 2011-03-27 14:31:47.000000000 -0400
25830 +++ linux-2.6.32.41/drivers/atm/suni.c 2011-04-17 15:56:46.000000000 -0400
25831 @@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
25832
25833
25834 #define ADD_LIMITED(s,v) \
25835 - atomic_add((v),&stats->s); \
25836 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
25837 + atomic_add_unchecked((v),&stats->s); \
25838 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
25839
25840
25841 static void suni_hz(unsigned long from_timer)
25842 diff -urNp linux-2.6.32.41/drivers/atm/uPD98402.c linux-2.6.32.41/drivers/atm/uPD98402.c
25843 --- linux-2.6.32.41/drivers/atm/uPD98402.c 2011-03-27 14:31:47.000000000 -0400
25844 +++ linux-2.6.32.41/drivers/atm/uPD98402.c 2011-04-17 15:56:46.000000000 -0400
25845 @@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *d
25846 struct sonet_stats tmp;
25847 int error = 0;
25848
25849 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
25850 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
25851 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
25852 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
25853 if (zero && !error) {
25854 @@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev
25855
25856
25857 #define ADD_LIMITED(s,v) \
25858 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
25859 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
25860 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
25861 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
25862 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
25863 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
25864
25865
25866 static void stat_event(struct atm_dev *dev)
25867 @@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev
25868 if (reason & uPD98402_INT_PFM) stat_event(dev);
25869 if (reason & uPD98402_INT_PCO) {
25870 (void) GET(PCOCR); /* clear interrupt cause */
25871 - atomic_add(GET(HECCT),
25872 + atomic_add_unchecked(GET(HECCT),
25873 &PRIV(dev)->sonet_stats.uncorr_hcs);
25874 }
25875 if ((reason & uPD98402_INT_RFO) &&
25876 @@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev
25877 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
25878 uPD98402_INT_LOS),PIMR); /* enable them */
25879 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
25880 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
25881 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
25882 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
25883 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
25884 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
25885 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
25886 return 0;
25887 }
25888
25889 diff -urNp linux-2.6.32.41/drivers/atm/zatm.c linux-2.6.32.41/drivers/atm/zatm.c
25890 --- linux-2.6.32.41/drivers/atm/zatm.c 2011-03-27 14:31:47.000000000 -0400
25891 +++ linux-2.6.32.41/drivers/atm/zatm.c 2011-04-17 15:56:46.000000000 -0400
25892 @@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
25893 }
25894 if (!size) {
25895 dev_kfree_skb_irq(skb);
25896 - if (vcc) atomic_inc(&vcc->stats->rx_err);
25897 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
25898 continue;
25899 }
25900 if (!atm_charge(vcc,skb->truesize)) {
25901 @@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
25902 skb->len = size;
25903 ATM_SKB(skb)->vcc = vcc;
25904 vcc->push(vcc,skb);
25905 - atomic_inc(&vcc->stats->rx);
25906 + atomic_inc_unchecked(&vcc->stats->rx);
25907 }
25908 zout(pos & 0xffff,MTA(mbx));
25909 #if 0 /* probably a stupid idea */
25910 @@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
25911 skb_queue_head(&zatm_vcc->backlog,skb);
25912 break;
25913 }
25914 - atomic_inc(&vcc->stats->tx);
25915 + atomic_inc_unchecked(&vcc->stats->tx);
25916 wake_up(&zatm_vcc->tx_wait);
25917 }
25918
25919 diff -urNp linux-2.6.32.41/drivers/base/bus.c linux-2.6.32.41/drivers/base/bus.c
25920 --- linux-2.6.32.41/drivers/base/bus.c 2011-03-27 14:31:47.000000000 -0400
25921 +++ linux-2.6.32.41/drivers/base/bus.c 2011-04-17 15:56:46.000000000 -0400
25922 @@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kob
25923 return ret;
25924 }
25925
25926 -static struct sysfs_ops driver_sysfs_ops = {
25927 +static const struct sysfs_ops driver_sysfs_ops = {
25928 .show = drv_attr_show,
25929 .store = drv_attr_store,
25930 };
25931 @@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kob
25932 return ret;
25933 }
25934
25935 -static struct sysfs_ops bus_sysfs_ops = {
25936 +static const struct sysfs_ops bus_sysfs_ops = {
25937 .show = bus_attr_show,
25938 .store = bus_attr_store,
25939 };
25940 @@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset
25941 return 0;
25942 }
25943
25944 -static struct kset_uevent_ops bus_uevent_ops = {
25945 +static const struct kset_uevent_ops bus_uevent_ops = {
25946 .filter = bus_uevent_filter,
25947 };
25948
25949 diff -urNp linux-2.6.32.41/drivers/base/class.c linux-2.6.32.41/drivers/base/class.c
25950 --- linux-2.6.32.41/drivers/base/class.c 2011-03-27 14:31:47.000000000 -0400
25951 +++ linux-2.6.32.41/drivers/base/class.c 2011-04-17 15:56:46.000000000 -0400
25952 @@ -63,7 +63,7 @@ static void class_release(struct kobject
25953 kfree(cp);
25954 }
25955
25956 -static struct sysfs_ops class_sysfs_ops = {
25957 +static const struct sysfs_ops class_sysfs_ops = {
25958 .show = class_attr_show,
25959 .store = class_attr_store,
25960 };
25961 diff -urNp linux-2.6.32.41/drivers/base/core.c linux-2.6.32.41/drivers/base/core.c
25962 --- linux-2.6.32.41/drivers/base/core.c 2011-03-27 14:31:47.000000000 -0400
25963 +++ linux-2.6.32.41/drivers/base/core.c 2011-04-17 15:56:46.000000000 -0400
25964 @@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kob
25965 return ret;
25966 }
25967
25968 -static struct sysfs_ops dev_sysfs_ops = {
25969 +static const struct sysfs_ops dev_sysfs_ops = {
25970 .show = dev_attr_show,
25971 .store = dev_attr_store,
25972 };
25973 @@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset,
25974 return retval;
25975 }
25976
25977 -static struct kset_uevent_ops device_uevent_ops = {
25978 +static const struct kset_uevent_ops device_uevent_ops = {
25979 .filter = dev_uevent_filter,
25980 .name = dev_uevent_name,
25981 .uevent = dev_uevent,
25982 diff -urNp linux-2.6.32.41/drivers/base/memory.c linux-2.6.32.41/drivers/base/memory.c
25983 --- linux-2.6.32.41/drivers/base/memory.c 2011-03-27 14:31:47.000000000 -0400
25984 +++ linux-2.6.32.41/drivers/base/memory.c 2011-04-17 15:56:46.000000000 -0400
25985 @@ -44,7 +44,7 @@ static int memory_uevent(struct kset *ks
25986 return retval;
25987 }
25988
25989 -static struct kset_uevent_ops memory_uevent_ops = {
25990 +static const struct kset_uevent_ops memory_uevent_ops = {
25991 .name = memory_uevent_name,
25992 .uevent = memory_uevent,
25993 };
25994 diff -urNp linux-2.6.32.41/drivers/base/sys.c linux-2.6.32.41/drivers/base/sys.c
25995 --- linux-2.6.32.41/drivers/base/sys.c 2011-03-27 14:31:47.000000000 -0400
25996 +++ linux-2.6.32.41/drivers/base/sys.c 2011-04-17 15:56:46.000000000 -0400
25997 @@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struc
25998 return -EIO;
25999 }
26000
26001 -static struct sysfs_ops sysfs_ops = {
26002 +static const struct sysfs_ops sysfs_ops = {
26003 .show = sysdev_show,
26004 .store = sysdev_store,
26005 };
26006 @@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct
26007 return -EIO;
26008 }
26009
26010 -static struct sysfs_ops sysfs_class_ops = {
26011 +static const struct sysfs_ops sysfs_class_ops = {
26012 .show = sysdev_class_show,
26013 .store = sysdev_class_store,
26014 };
26015 diff -urNp linux-2.6.32.41/drivers/block/cciss.c linux-2.6.32.41/drivers/block/cciss.c
26016 --- linux-2.6.32.41/drivers/block/cciss.c 2011-03-27 14:31:47.000000000 -0400
26017 +++ linux-2.6.32.41/drivers/block/cciss.c 2011-04-17 15:56:46.000000000 -0400
26018 @@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct
26019 int err;
26020 u32 cp;
26021
26022 + memset(&arg64, 0, sizeof(arg64));
26023 +
26024 err = 0;
26025 err |=
26026 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
26027 diff -urNp linux-2.6.32.41/drivers/block/cpqarray.c linux-2.6.32.41/drivers/block/cpqarray.c
26028 --- linux-2.6.32.41/drivers/block/cpqarray.c 2011-03-27 14:31:47.000000000 -0400
26029 +++ linux-2.6.32.41/drivers/block/cpqarray.c 2011-05-16 21:46:57.000000000 -0400
26030 @@ -896,6 +896,8 @@ static void do_ida_request(struct reques
26031 struct scatterlist tmp_sg[SG_MAX];
26032 int i, dir, seg;
26033
26034 + pax_track_stack();
26035 +
26036 if (blk_queue_plugged(q))
26037 goto startio;
26038
26039 diff -urNp linux-2.6.32.41/drivers/block/DAC960.c linux-2.6.32.41/drivers/block/DAC960.c
26040 --- linux-2.6.32.41/drivers/block/DAC960.c 2011-03-27 14:31:47.000000000 -0400
26041 +++ linux-2.6.32.41/drivers/block/DAC960.c 2011-05-16 21:46:57.000000000 -0400
26042 @@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfigur
26043 unsigned long flags;
26044 int Channel, TargetID;
26045
26046 + pax_track_stack();
26047 +
26048 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
26049 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
26050 sizeof(DAC960_SCSI_Inquiry_T) +
26051 diff -urNp linux-2.6.32.41/drivers/block/nbd.c linux-2.6.32.41/drivers/block/nbd.c
26052 --- linux-2.6.32.41/drivers/block/nbd.c 2011-03-27 14:31:47.000000000 -0400
26053 +++ linux-2.6.32.41/drivers/block/nbd.c 2011-05-16 21:46:57.000000000 -0400
26054 @@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *
26055 struct kvec iov;
26056 sigset_t blocked, oldset;
26057
26058 + pax_track_stack();
26059 +
26060 if (unlikely(!sock)) {
26061 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
26062 lo->disk->disk_name, (send ? "send" : "recv"));
26063 @@ -569,6 +571,8 @@ static void do_nbd_request(struct reques
26064 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
26065 unsigned int cmd, unsigned long arg)
26066 {
26067 + pax_track_stack();
26068 +
26069 switch (cmd) {
26070 case NBD_DISCONNECT: {
26071 struct request sreq;
26072 diff -urNp linux-2.6.32.41/drivers/block/pktcdvd.c linux-2.6.32.41/drivers/block/pktcdvd.c
26073 --- linux-2.6.32.41/drivers/block/pktcdvd.c 2011-03-27 14:31:47.000000000 -0400
26074 +++ linux-2.6.32.41/drivers/block/pktcdvd.c 2011-04-17 15:56:46.000000000 -0400
26075 @@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kob
26076 return len;
26077 }
26078
26079 -static struct sysfs_ops kobj_pkt_ops = {
26080 +static const struct sysfs_ops kobj_pkt_ops = {
26081 .show = kobj_pkt_show,
26082 .store = kobj_pkt_store
26083 };
26084 diff -urNp linux-2.6.32.41/drivers/char/agp/frontend.c linux-2.6.32.41/drivers/char/agp/frontend.c
26085 --- linux-2.6.32.41/drivers/char/agp/frontend.c 2011-03-27 14:31:47.000000000 -0400
26086 +++ linux-2.6.32.41/drivers/char/agp/frontend.c 2011-04-17 15:56:46.000000000 -0400
26087 @@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct ag
26088 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
26089 return -EFAULT;
26090
26091 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
26092 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
26093 return -EFAULT;
26094
26095 client = agp_find_client_by_pid(reserve.pid);
26096 diff -urNp linux-2.6.32.41/drivers/char/briq_panel.c linux-2.6.32.41/drivers/char/briq_panel.c
26097 --- linux-2.6.32.41/drivers/char/briq_panel.c 2011-03-27 14:31:47.000000000 -0400
26098 +++ linux-2.6.32.41/drivers/char/briq_panel.c 2011-04-18 19:48:57.000000000 -0400
26099 @@ -10,6 +10,7 @@
26100 #include <linux/types.h>
26101 #include <linux/errno.h>
26102 #include <linux/tty.h>
26103 +#include <linux/mutex.h>
26104 #include <linux/timer.h>
26105 #include <linux/kernel.h>
26106 #include <linux/wait.h>
26107 @@ -36,6 +37,7 @@ static int vfd_is_open;
26108 static unsigned char vfd[40];
26109 static int vfd_cursor;
26110 static unsigned char ledpb, led;
26111 +static DEFINE_MUTEX(vfd_mutex);
26112
26113 static void update_vfd(void)
26114 {
26115 @@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct f
26116 if (!vfd_is_open)
26117 return -EBUSY;
26118
26119 + mutex_lock(&vfd_mutex);
26120 for (;;) {
26121 char c;
26122 if (!indx)
26123 break;
26124 - if (get_user(c, buf))
26125 + if (get_user(c, buf)) {
26126 + mutex_unlock(&vfd_mutex);
26127 return -EFAULT;
26128 + }
26129 if (esc) {
26130 set_led(c);
26131 esc = 0;
26132 @@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct f
26133 buf++;
26134 }
26135 update_vfd();
26136 + mutex_unlock(&vfd_mutex);
26137
26138 return len;
26139 }
26140 diff -urNp linux-2.6.32.41/drivers/char/genrtc.c linux-2.6.32.41/drivers/char/genrtc.c
26141 --- linux-2.6.32.41/drivers/char/genrtc.c 2011-03-27 14:31:47.000000000 -0400
26142 +++ linux-2.6.32.41/drivers/char/genrtc.c 2011-04-18 19:45:42.000000000 -0400
26143 @@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *i
26144 switch (cmd) {
26145
26146 case RTC_PLL_GET:
26147 + memset(&pll, 0, sizeof(pll));
26148 if (get_rtc_pll(&pll))
26149 return -EINVAL;
26150 else
26151 diff -urNp linux-2.6.32.41/drivers/char/hpet.c linux-2.6.32.41/drivers/char/hpet.c
26152 --- linux-2.6.32.41/drivers/char/hpet.c 2011-03-27 14:31:47.000000000 -0400
26153 +++ linux-2.6.32.41/drivers/char/hpet.c 2011-04-23 12:56:11.000000000 -0400
26154 @@ -430,7 +430,7 @@ static int hpet_release(struct inode *in
26155 return 0;
26156 }
26157
26158 -static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
26159 +static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
26160
26161 static int
26162 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
26163 @@ -565,7 +565,7 @@ static inline unsigned long hpet_time_di
26164 }
26165
26166 static int
26167 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
26168 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
26169 {
26170 struct hpet_timer __iomem *timer;
26171 struct hpet __iomem *hpet;
26172 @@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp,
26173 {
26174 struct hpet_info info;
26175
26176 + memset(&info, 0, sizeof(info));
26177 +
26178 if (devp->hd_ireqfreq)
26179 info.hi_ireqfreq =
26180 hpet_time_div(hpetp, devp->hd_ireqfreq);
26181 - else
26182 - info.hi_ireqfreq = 0;
26183 info.hi_flags =
26184 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
26185 info.hi_hpet = hpetp->hp_which;
26186 diff -urNp linux-2.6.32.41/drivers/char/hvc_beat.c linux-2.6.32.41/drivers/char/hvc_beat.c
26187 --- linux-2.6.32.41/drivers/char/hvc_beat.c 2011-03-27 14:31:47.000000000 -0400
26188 +++ linux-2.6.32.41/drivers/char/hvc_beat.c 2011-04-17 15:56:46.000000000 -0400
26189 @@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t v
26190 return cnt;
26191 }
26192
26193 -static struct hv_ops hvc_beat_get_put_ops = {
26194 +static const struct hv_ops hvc_beat_get_put_ops = {
26195 .get_chars = hvc_beat_get_chars,
26196 .put_chars = hvc_beat_put_chars,
26197 };
26198 diff -urNp linux-2.6.32.41/drivers/char/hvc_console.c linux-2.6.32.41/drivers/char/hvc_console.c
26199 --- linux-2.6.32.41/drivers/char/hvc_console.c 2011-03-27 14:31:47.000000000 -0400
26200 +++ linux-2.6.32.41/drivers/char/hvc_console.c 2011-04-17 15:56:46.000000000 -0400
26201 @@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_ind
26202 * console interfaces but can still be used as a tty device. This has to be
26203 * static because kmalloc will not work during early console init.
26204 */
26205 -static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
26206 +static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
26207 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
26208 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
26209
26210 @@ -247,7 +247,7 @@ static void destroy_hvc_struct(struct kr
26211 * vty adapters do NOT get an hvc_instantiate() callback since they
26212 * appear after early console init.
26213 */
26214 -int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
26215 +int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
26216 {
26217 struct hvc_struct *hp;
26218
26219 @@ -756,7 +756,7 @@ static const struct tty_operations hvc_o
26220 };
26221
26222 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
26223 - struct hv_ops *ops, int outbuf_size)
26224 + const struct hv_ops *ops, int outbuf_size)
26225 {
26226 struct hvc_struct *hp;
26227 int i;
26228 diff -urNp linux-2.6.32.41/drivers/char/hvc_console.h linux-2.6.32.41/drivers/char/hvc_console.h
26229 --- linux-2.6.32.41/drivers/char/hvc_console.h 2011-03-27 14:31:47.000000000 -0400
26230 +++ linux-2.6.32.41/drivers/char/hvc_console.h 2011-04-17 15:56:46.000000000 -0400
26231 @@ -55,7 +55,7 @@ struct hvc_struct {
26232 int outbuf_size;
26233 int n_outbuf;
26234 uint32_t vtermno;
26235 - struct hv_ops *ops;
26236 + const struct hv_ops *ops;
26237 int irq_requested;
26238 int data;
26239 struct winsize ws;
26240 @@ -76,11 +76,11 @@ struct hv_ops {
26241 };
26242
26243 /* Register a vterm and a slot index for use as a console (console_init) */
26244 -extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
26245 +extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
26246
26247 /* register a vterm for hvc tty operation (module_init or hotplug add) */
26248 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
26249 - struct hv_ops *ops, int outbuf_size);
26250 + const struct hv_ops *ops, int outbuf_size);
26251 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
26252 extern int hvc_remove(struct hvc_struct *hp);
26253
26254 diff -urNp linux-2.6.32.41/drivers/char/hvc_iseries.c linux-2.6.32.41/drivers/char/hvc_iseries.c
26255 --- linux-2.6.32.41/drivers/char/hvc_iseries.c 2011-03-27 14:31:47.000000000 -0400
26256 +++ linux-2.6.32.41/drivers/char/hvc_iseries.c 2011-04-17 15:56:46.000000000 -0400
26257 @@ -197,7 +197,7 @@ done:
26258 return sent;
26259 }
26260
26261 -static struct hv_ops hvc_get_put_ops = {
26262 +static const struct hv_ops hvc_get_put_ops = {
26263 .get_chars = get_chars,
26264 .put_chars = put_chars,
26265 .notifier_add = notifier_add_irq,
26266 diff -urNp linux-2.6.32.41/drivers/char/hvc_iucv.c linux-2.6.32.41/drivers/char/hvc_iucv.c
26267 --- linux-2.6.32.41/drivers/char/hvc_iucv.c 2011-03-27 14:31:47.000000000 -0400
26268 +++ linux-2.6.32.41/drivers/char/hvc_iucv.c 2011-04-17 15:56:46.000000000 -0400
26269 @@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(stru
26270
26271
26272 /* HVC operations */
26273 -static struct hv_ops hvc_iucv_ops = {
26274 +static const struct hv_ops hvc_iucv_ops = {
26275 .get_chars = hvc_iucv_get_chars,
26276 .put_chars = hvc_iucv_put_chars,
26277 .notifier_add = hvc_iucv_notifier_add,
26278 diff -urNp linux-2.6.32.41/drivers/char/hvc_rtas.c linux-2.6.32.41/drivers/char/hvc_rtas.c
26279 --- linux-2.6.32.41/drivers/char/hvc_rtas.c 2011-03-27 14:31:47.000000000 -0400
26280 +++ linux-2.6.32.41/drivers/char/hvc_rtas.c 2011-04-17 15:56:46.000000000 -0400
26281 @@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_
26282 return i;
26283 }
26284
26285 -static struct hv_ops hvc_rtas_get_put_ops = {
26286 +static const struct hv_ops hvc_rtas_get_put_ops = {
26287 .get_chars = hvc_rtas_read_console,
26288 .put_chars = hvc_rtas_write_console,
26289 };
26290 diff -urNp linux-2.6.32.41/drivers/char/hvcs.c linux-2.6.32.41/drivers/char/hvcs.c
26291 --- linux-2.6.32.41/drivers/char/hvcs.c 2011-03-27 14:31:47.000000000 -0400
26292 +++ linux-2.6.32.41/drivers/char/hvcs.c 2011-04-17 15:56:46.000000000 -0400
26293 @@ -82,6 +82,7 @@
26294 #include <asm/hvcserver.h>
26295 #include <asm/uaccess.h>
26296 #include <asm/vio.h>
26297 +#include <asm/local.h>
26298
26299 /*
26300 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
26301 @@ -269,7 +270,7 @@ struct hvcs_struct {
26302 unsigned int index;
26303
26304 struct tty_struct *tty;
26305 - int open_count;
26306 + local_t open_count;
26307
26308 /*
26309 * Used to tell the driver kernel_thread what operations need to take
26310 @@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(st
26311
26312 spin_lock_irqsave(&hvcsd->lock, flags);
26313
26314 - if (hvcsd->open_count > 0) {
26315 + if (local_read(&hvcsd->open_count) > 0) {
26316 spin_unlock_irqrestore(&hvcsd->lock, flags);
26317 printk(KERN_INFO "HVCS: vterm state unchanged. "
26318 "The hvcs device node is still in use.\n");
26319 @@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *
26320 if ((retval = hvcs_partner_connect(hvcsd)))
26321 goto error_release;
26322
26323 - hvcsd->open_count = 1;
26324 + local_set(&hvcsd->open_count, 1);
26325 hvcsd->tty = tty;
26326 tty->driver_data = hvcsd;
26327
26328 @@ -1169,7 +1170,7 @@ fast_open:
26329
26330 spin_lock_irqsave(&hvcsd->lock, flags);
26331 kref_get(&hvcsd->kref);
26332 - hvcsd->open_count++;
26333 + local_inc(&hvcsd->open_count);
26334 hvcsd->todo_mask |= HVCS_SCHED_READ;
26335 spin_unlock_irqrestore(&hvcsd->lock, flags);
26336
26337 @@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct
26338 hvcsd = tty->driver_data;
26339
26340 spin_lock_irqsave(&hvcsd->lock, flags);
26341 - if (--hvcsd->open_count == 0) {
26342 + if (local_dec_and_test(&hvcsd->open_count)) {
26343
26344 vio_disable_interrupts(hvcsd->vdev);
26345
26346 @@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct
26347 free_irq(irq, hvcsd);
26348 kref_put(&hvcsd->kref, destroy_hvcs_struct);
26349 return;
26350 - } else if (hvcsd->open_count < 0) {
26351 + } else if (local_read(&hvcsd->open_count) < 0) {
26352 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
26353 " is missmanaged.\n",
26354 - hvcsd->vdev->unit_address, hvcsd->open_count);
26355 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
26356 }
26357
26358 spin_unlock_irqrestore(&hvcsd->lock, flags);
26359 @@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struc
26360
26361 spin_lock_irqsave(&hvcsd->lock, flags);
26362 /* Preserve this so that we know how many kref refs to put */
26363 - temp_open_count = hvcsd->open_count;
26364 + temp_open_count = local_read(&hvcsd->open_count);
26365
26366 /*
26367 * Don't kref put inside the spinlock because the destruction
26368 @@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struc
26369 hvcsd->tty->driver_data = NULL;
26370 hvcsd->tty = NULL;
26371
26372 - hvcsd->open_count = 0;
26373 + local_set(&hvcsd->open_count, 0);
26374
26375 /* This will drop any buffered data on the floor which is OK in a hangup
26376 * scenario. */
26377 @@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct
26378 * the middle of a write operation? This is a crummy place to do this
26379 * but we want to keep it all in the spinlock.
26380 */
26381 - if (hvcsd->open_count <= 0) {
26382 + if (local_read(&hvcsd->open_count) <= 0) {
26383 spin_unlock_irqrestore(&hvcsd->lock, flags);
26384 return -ENODEV;
26385 }
26386 @@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_st
26387 {
26388 struct hvcs_struct *hvcsd = tty->driver_data;
26389
26390 - if (!hvcsd || hvcsd->open_count <= 0)
26391 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
26392 return 0;
26393
26394 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
26395 diff -urNp linux-2.6.32.41/drivers/char/hvc_udbg.c linux-2.6.32.41/drivers/char/hvc_udbg.c
26396 --- linux-2.6.32.41/drivers/char/hvc_udbg.c 2011-03-27 14:31:47.000000000 -0400
26397 +++ linux-2.6.32.41/drivers/char/hvc_udbg.c 2011-04-17 15:56:46.000000000 -0400
26398 @@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno
26399 return i;
26400 }
26401
26402 -static struct hv_ops hvc_udbg_ops = {
26403 +static const struct hv_ops hvc_udbg_ops = {
26404 .get_chars = hvc_udbg_get,
26405 .put_chars = hvc_udbg_put,
26406 };
26407 diff -urNp linux-2.6.32.41/drivers/char/hvc_vio.c linux-2.6.32.41/drivers/char/hvc_vio.c
26408 --- linux-2.6.32.41/drivers/char/hvc_vio.c 2011-03-27 14:31:47.000000000 -0400
26409 +++ linux-2.6.32.41/drivers/char/hvc_vio.c 2011-04-17 15:56:46.000000000 -0400
26410 @@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t v
26411 return got;
26412 }
26413
26414 -static struct hv_ops hvc_get_put_ops = {
26415 +static const struct hv_ops hvc_get_put_ops = {
26416 .get_chars = filtered_get_chars,
26417 .put_chars = hvc_put_chars,
26418 .notifier_add = notifier_add_irq,
26419 diff -urNp linux-2.6.32.41/drivers/char/hvc_xen.c linux-2.6.32.41/drivers/char/hvc_xen.c
26420 --- linux-2.6.32.41/drivers/char/hvc_xen.c 2011-03-27 14:31:47.000000000 -0400
26421 +++ linux-2.6.32.41/drivers/char/hvc_xen.c 2011-04-17 15:56:46.000000000 -0400
26422 @@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno
26423 return recv;
26424 }
26425
26426 -static struct hv_ops hvc_ops = {
26427 +static const struct hv_ops hvc_ops = {
26428 .get_chars = read_console,
26429 .put_chars = write_console,
26430 .notifier_add = notifier_add_irq,
26431 diff -urNp linux-2.6.32.41/drivers/char/ipmi/ipmi_msghandler.c linux-2.6.32.41/drivers/char/ipmi/ipmi_msghandler.c
26432 --- linux-2.6.32.41/drivers/char/ipmi/ipmi_msghandler.c 2011-03-27 14:31:47.000000000 -0400
26433 +++ linux-2.6.32.41/drivers/char/ipmi/ipmi_msghandler.c 2011-05-16 21:46:57.000000000 -0400
26434 @@ -414,7 +414,7 @@ struct ipmi_smi {
26435 struct proc_dir_entry *proc_dir;
26436 char proc_dir_name[10];
26437
26438 - atomic_t stats[IPMI_NUM_STATS];
26439 + atomic_unchecked_t stats[IPMI_NUM_STATS];
26440
26441 /*
26442 * run_to_completion duplicate of smb_info, smi_info
26443 @@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
26444
26445
26446 #define ipmi_inc_stat(intf, stat) \
26447 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
26448 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
26449 #define ipmi_get_stat(intf, stat) \
26450 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
26451 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
26452
26453 static int is_lan_addr(struct ipmi_addr *addr)
26454 {
26455 @@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
26456 INIT_LIST_HEAD(&intf->cmd_rcvrs);
26457 init_waitqueue_head(&intf->waitq);
26458 for (i = 0; i < IPMI_NUM_STATS; i++)
26459 - atomic_set(&intf->stats[i], 0);
26460 + atomic_set_unchecked(&intf->stats[i], 0);
26461
26462 intf->proc_dir = NULL;
26463
26464 @@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
26465 struct ipmi_smi_msg smi_msg;
26466 struct ipmi_recv_msg recv_msg;
26467
26468 + pax_track_stack();
26469 +
26470 si = (struct ipmi_system_interface_addr *) &addr;
26471 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
26472 si->channel = IPMI_BMC_CHANNEL;
26473 diff -urNp linux-2.6.32.41/drivers/char/ipmi/ipmi_si_intf.c linux-2.6.32.41/drivers/char/ipmi/ipmi_si_intf.c
26474 --- linux-2.6.32.41/drivers/char/ipmi/ipmi_si_intf.c 2011-03-27 14:31:47.000000000 -0400
26475 +++ linux-2.6.32.41/drivers/char/ipmi/ipmi_si_intf.c 2011-04-17 15:56:46.000000000 -0400
26476 @@ -277,7 +277,7 @@ struct smi_info {
26477 unsigned char slave_addr;
26478
26479 /* Counters and things for the proc filesystem. */
26480 - atomic_t stats[SI_NUM_STATS];
26481 + atomic_unchecked_t stats[SI_NUM_STATS];
26482
26483 struct task_struct *thread;
26484
26485 @@ -285,9 +285,9 @@ struct smi_info {
26486 };
26487
26488 #define smi_inc_stat(smi, stat) \
26489 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
26490 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
26491 #define smi_get_stat(smi, stat) \
26492 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
26493 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
26494
26495 #define SI_MAX_PARMS 4
26496
26497 @@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info
26498 atomic_set(&new_smi->req_events, 0);
26499 new_smi->run_to_completion = 0;
26500 for (i = 0; i < SI_NUM_STATS; i++)
26501 - atomic_set(&new_smi->stats[i], 0);
26502 + atomic_set_unchecked(&new_smi->stats[i], 0);
26503
26504 new_smi->interrupt_disabled = 0;
26505 atomic_set(&new_smi->stop_operation, 0);
26506 diff -urNp linux-2.6.32.41/drivers/char/istallion.c linux-2.6.32.41/drivers/char/istallion.c
26507 --- linux-2.6.32.41/drivers/char/istallion.c 2011-03-27 14:31:47.000000000 -0400
26508 +++ linux-2.6.32.41/drivers/char/istallion.c 2011-05-16 21:46:57.000000000 -0400
26509 @@ -187,7 +187,6 @@ static struct ktermios stli_deftermios
26510 * re-used for each stats call.
26511 */
26512 static comstats_t stli_comstats;
26513 -static combrd_t stli_brdstats;
26514 static struct asystats stli_cdkstats;
26515
26516 /*****************************************************************************/
26517 @@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __u
26518 {
26519 struct stlibrd *brdp;
26520 unsigned int i;
26521 + combrd_t stli_brdstats;
26522
26523 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
26524 return -EFAULT;
26525 @@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stl
26526 struct stliport stli_dummyport;
26527 struct stliport *portp;
26528
26529 + pax_track_stack();
26530 +
26531 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
26532 return -EFAULT;
26533 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
26534 @@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stli
26535 struct stlibrd stli_dummybrd;
26536 struct stlibrd *brdp;
26537
26538 + pax_track_stack();
26539 +
26540 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
26541 return -EFAULT;
26542 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
26543 diff -urNp linux-2.6.32.41/drivers/char/Kconfig linux-2.6.32.41/drivers/char/Kconfig
26544 --- linux-2.6.32.41/drivers/char/Kconfig 2011-03-27 14:31:47.000000000 -0400
26545 +++ linux-2.6.32.41/drivers/char/Kconfig 2011-04-18 19:20:15.000000000 -0400
26546 @@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
26547
26548 config DEVKMEM
26549 bool "/dev/kmem virtual device support"
26550 - default y
26551 + default n
26552 + depends on !GRKERNSEC_KMEM
26553 help
26554 Say Y here if you want to support the /dev/kmem device. The
26555 /dev/kmem device is rarely used, but can be used for certain
26556 @@ -1114,6 +1115,7 @@ config DEVPORT
26557 bool
26558 depends on !M68K
26559 depends on ISA || PCI
26560 + depends on !GRKERNSEC_KMEM
26561 default y
26562
26563 source "drivers/s390/char/Kconfig"
26564 diff -urNp linux-2.6.32.41/drivers/char/keyboard.c linux-2.6.32.41/drivers/char/keyboard.c
26565 --- linux-2.6.32.41/drivers/char/keyboard.c 2011-03-27 14:31:47.000000000 -0400
26566 +++ linux-2.6.32.41/drivers/char/keyboard.c 2011-04-17 15:56:46.000000000 -0400
26567 @@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, u
26568 kbd->kbdmode == VC_MEDIUMRAW) &&
26569 value != KVAL(K_SAK))
26570 return; /* SAK is allowed even in raw mode */
26571 +
26572 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
26573 + {
26574 + void *func = fn_handler[value];
26575 + if (func == fn_show_state || func == fn_show_ptregs ||
26576 + func == fn_show_mem)
26577 + return;
26578 + }
26579 +#endif
26580 +
26581 fn_handler[value](vc);
26582 }
26583
26584 @@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_
26585 .evbit = { BIT_MASK(EV_SND) },
26586 },
26587
26588 - { }, /* Terminating entry */
26589 + { 0 }, /* Terminating entry */
26590 };
26591
26592 MODULE_DEVICE_TABLE(input, kbd_ids);
26593 diff -urNp linux-2.6.32.41/drivers/char/mem.c linux-2.6.32.41/drivers/char/mem.c
26594 --- linux-2.6.32.41/drivers/char/mem.c 2011-03-27 14:31:47.000000000 -0400
26595 +++ linux-2.6.32.41/drivers/char/mem.c 2011-04-17 15:56:46.000000000 -0400
26596 @@ -18,6 +18,7 @@
26597 #include <linux/raw.h>
26598 #include <linux/tty.h>
26599 #include <linux/capability.h>
26600 +#include <linux/security.h>
26601 #include <linux/ptrace.h>
26602 #include <linux/device.h>
26603 #include <linux/highmem.h>
26604 @@ -35,6 +36,10 @@
26605 # include <linux/efi.h>
26606 #endif
26607
26608 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
26609 +extern struct file_operations grsec_fops;
26610 +#endif
26611 +
26612 static inline unsigned long size_inside_page(unsigned long start,
26613 unsigned long size)
26614 {
26615 @@ -102,9 +107,13 @@ static inline int range_is_allowed(unsig
26616
26617 while (cursor < to) {
26618 if (!devmem_is_allowed(pfn)) {
26619 +#ifdef CONFIG_GRKERNSEC_KMEM
26620 + gr_handle_mem_readwrite(from, to);
26621 +#else
26622 printk(KERN_INFO
26623 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
26624 current->comm, from, to);
26625 +#endif
26626 return 0;
26627 }
26628 cursor += PAGE_SIZE;
26629 @@ -112,6 +121,11 @@ static inline int range_is_allowed(unsig
26630 }
26631 return 1;
26632 }
26633 +#elif defined(CONFIG_GRKERNSEC_KMEM)
26634 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
26635 +{
26636 + return 0;
26637 +}
26638 #else
26639 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
26640 {
26641 @@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * fi
26642 #endif
26643
26644 while (count > 0) {
26645 + char *temp;
26646 +
26647 /*
26648 * Handle first page in case it's not aligned
26649 */
26650 @@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * fi
26651 if (!ptr)
26652 return -EFAULT;
26653
26654 - if (copy_to_user(buf, ptr, sz)) {
26655 +#ifdef CONFIG_PAX_USERCOPY
26656 + temp = kmalloc(sz, GFP_KERNEL);
26657 + if (!temp) {
26658 + unxlate_dev_mem_ptr(p, ptr);
26659 + return -ENOMEM;
26660 + }
26661 + memcpy(temp, ptr, sz);
26662 +#else
26663 + temp = ptr;
26664 +#endif
26665 +
26666 + if (copy_to_user(buf, temp, sz)) {
26667 +
26668 +#ifdef CONFIG_PAX_USERCOPY
26669 + kfree(temp);
26670 +#endif
26671 +
26672 unxlate_dev_mem_ptr(p, ptr);
26673 return -EFAULT;
26674 }
26675
26676 +#ifdef CONFIG_PAX_USERCOPY
26677 + kfree(temp);
26678 +#endif
26679 +
26680 unxlate_dev_mem_ptr(p, ptr);
26681
26682 buf += sz;
26683 @@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *fi
26684 size_t count, loff_t *ppos)
26685 {
26686 unsigned long p = *ppos;
26687 - ssize_t low_count, read, sz;
26688 + ssize_t low_count, read, sz, err = 0;
26689 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
26690 - int err = 0;
26691
26692 read = 0;
26693 if (p < (unsigned long) high_memory) {
26694 @@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *fi
26695 }
26696 #endif
26697 while (low_count > 0) {
26698 + char *temp;
26699 +
26700 sz = size_inside_page(p, low_count);
26701
26702 /*
26703 @@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *fi
26704 */
26705 kbuf = xlate_dev_kmem_ptr((char *)p);
26706
26707 - if (copy_to_user(buf, kbuf, sz))
26708 +#ifdef CONFIG_PAX_USERCOPY
26709 + temp = kmalloc(sz, GFP_KERNEL);
26710 + if (!temp)
26711 + return -ENOMEM;
26712 + memcpy(temp, kbuf, sz);
26713 +#else
26714 + temp = kbuf;
26715 +#endif
26716 +
26717 + err = copy_to_user(buf, temp, sz);
26718 +
26719 +#ifdef CONFIG_PAX_USERCOPY
26720 + kfree(temp);
26721 +#endif
26722 +
26723 + if (err)
26724 return -EFAULT;
26725 buf += sz;
26726 p += sz;
26727 @@ -889,6 +941,9 @@ static const struct memdev {
26728 #ifdef CONFIG_CRASH_DUMP
26729 [12] = { "oldmem", 0, &oldmem_fops, NULL },
26730 #endif
26731 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
26732 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
26733 +#endif
26734 };
26735
26736 static int memory_open(struct inode *inode, struct file *filp)
26737 diff -urNp linux-2.6.32.41/drivers/char/pcmcia/ipwireless/tty.c linux-2.6.32.41/drivers/char/pcmcia/ipwireless/tty.c
26738 --- linux-2.6.32.41/drivers/char/pcmcia/ipwireless/tty.c 2011-03-27 14:31:47.000000000 -0400
26739 +++ linux-2.6.32.41/drivers/char/pcmcia/ipwireless/tty.c 2011-04-17 15:56:46.000000000 -0400
26740 @@ -29,6 +29,7 @@
26741 #include <linux/tty_driver.h>
26742 #include <linux/tty_flip.h>
26743 #include <linux/uaccess.h>
26744 +#include <asm/local.h>
26745
26746 #include "tty.h"
26747 #include "network.h"
26748 @@ -51,7 +52,7 @@ struct ipw_tty {
26749 int tty_type;
26750 struct ipw_network *network;
26751 struct tty_struct *linux_tty;
26752 - int open_count;
26753 + local_t open_count;
26754 unsigned int control_lines;
26755 struct mutex ipw_tty_mutex;
26756 int tx_bytes_queued;
26757 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
26758 mutex_unlock(&tty->ipw_tty_mutex);
26759 return -ENODEV;
26760 }
26761 - if (tty->open_count == 0)
26762 + if (local_read(&tty->open_count) == 0)
26763 tty->tx_bytes_queued = 0;
26764
26765 - tty->open_count++;
26766 + local_inc(&tty->open_count);
26767
26768 tty->linux_tty = linux_tty;
26769 linux_tty->driver_data = tty;
26770 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
26771
26772 static void do_ipw_close(struct ipw_tty *tty)
26773 {
26774 - tty->open_count--;
26775 -
26776 - if (tty->open_count == 0) {
26777 + if (local_dec_return(&tty->open_count) == 0) {
26778 struct tty_struct *linux_tty = tty->linux_tty;
26779
26780 if (linux_tty != NULL) {
26781 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
26782 return;
26783
26784 mutex_lock(&tty->ipw_tty_mutex);
26785 - if (tty->open_count == 0) {
26786 + if (local_read(&tty->open_count) == 0) {
26787 mutex_unlock(&tty->ipw_tty_mutex);
26788 return;
26789 }
26790 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
26791 return;
26792 }
26793
26794 - if (!tty->open_count) {
26795 + if (!local_read(&tty->open_count)) {
26796 mutex_unlock(&tty->ipw_tty_mutex);
26797 return;
26798 }
26799 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
26800 return -ENODEV;
26801
26802 mutex_lock(&tty->ipw_tty_mutex);
26803 - if (!tty->open_count) {
26804 + if (!local_read(&tty->open_count)) {
26805 mutex_unlock(&tty->ipw_tty_mutex);
26806 return -EINVAL;
26807 }
26808 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
26809 if (!tty)
26810 return -ENODEV;
26811
26812 - if (!tty->open_count)
26813 + if (!local_read(&tty->open_count))
26814 return -EINVAL;
26815
26816 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
26817 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
26818 if (!tty)
26819 return 0;
26820
26821 - if (!tty->open_count)
26822 + if (!local_read(&tty->open_count))
26823 return 0;
26824
26825 return tty->tx_bytes_queued;
26826 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
26827 if (!tty)
26828 return -ENODEV;
26829
26830 - if (!tty->open_count)
26831 + if (!local_read(&tty->open_count))
26832 return -EINVAL;
26833
26834 return get_control_lines(tty);
26835 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
26836 if (!tty)
26837 return -ENODEV;
26838
26839 - if (!tty->open_count)
26840 + if (!local_read(&tty->open_count))
26841 return -EINVAL;
26842
26843 return set_control_lines(tty, set, clear);
26844 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
26845 if (!tty)
26846 return -ENODEV;
26847
26848 - if (!tty->open_count)
26849 + if (!local_read(&tty->open_count))
26850 return -EINVAL;
26851
26852 /* FIXME: Exactly how is the tty object locked here .. */
26853 @@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty
26854 against a parallel ioctl etc */
26855 mutex_lock(&ttyj->ipw_tty_mutex);
26856 }
26857 - while (ttyj->open_count)
26858 + while (local_read(&ttyj->open_count))
26859 do_ipw_close(ttyj);
26860 ipwireless_disassociate_network_ttys(network,
26861 ttyj->channel_idx);
26862 diff -urNp linux-2.6.32.41/drivers/char/pty.c linux-2.6.32.41/drivers/char/pty.c
26863 --- linux-2.6.32.41/drivers/char/pty.c 2011-03-27 14:31:47.000000000 -0400
26864 +++ linux-2.6.32.41/drivers/char/pty.c 2011-04-17 15:56:46.000000000 -0400
26865 @@ -682,7 +682,18 @@ static int ptmx_open(struct inode *inode
26866 return ret;
26867 }
26868
26869 -static struct file_operations ptmx_fops;
26870 +static const struct file_operations ptmx_fops = {
26871 + .llseek = no_llseek,
26872 + .read = tty_read,
26873 + .write = tty_write,
26874 + .poll = tty_poll,
26875 + .unlocked_ioctl = tty_ioctl,
26876 + .compat_ioctl = tty_compat_ioctl,
26877 + .open = ptmx_open,
26878 + .release = tty_release,
26879 + .fasync = tty_fasync,
26880 +};
26881 +
26882
26883 static void __init unix98_pty_init(void)
26884 {
26885 @@ -736,9 +747,6 @@ static void __init unix98_pty_init(void)
26886 register_sysctl_table(pty_root_table);
26887
26888 /* Now create the /dev/ptmx special device */
26889 - tty_default_fops(&ptmx_fops);
26890 - ptmx_fops.open = ptmx_open;
26891 -
26892 cdev_init(&ptmx_cdev, &ptmx_fops);
26893 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
26894 register_chrdev_region(MKDEV(TTYAUX_MAJOR, 2), 1, "/dev/ptmx") < 0)
26895 diff -urNp linux-2.6.32.41/drivers/char/random.c linux-2.6.32.41/drivers/char/random.c
26896 --- linux-2.6.32.41/drivers/char/random.c 2011-03-27 14:31:47.000000000 -0400
26897 +++ linux-2.6.32.41/drivers/char/random.c 2011-04-17 15:56:46.000000000 -0400
26898 @@ -254,8 +254,13 @@
26899 /*
26900 * Configuration information
26901 */
26902 +#ifdef CONFIG_GRKERNSEC_RANDNET
26903 +#define INPUT_POOL_WORDS 512
26904 +#define OUTPUT_POOL_WORDS 128
26905 +#else
26906 #define INPUT_POOL_WORDS 128
26907 #define OUTPUT_POOL_WORDS 32
26908 +#endif
26909 #define SEC_XFER_SIZE 512
26910
26911 /*
26912 @@ -292,10 +297,17 @@ static struct poolinfo {
26913 int poolwords;
26914 int tap1, tap2, tap3, tap4, tap5;
26915 } poolinfo_table[] = {
26916 +#ifdef CONFIG_GRKERNSEC_RANDNET
26917 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
26918 + { 512, 411, 308, 208, 104, 1 },
26919 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
26920 + { 128, 103, 76, 51, 25, 1 },
26921 +#else
26922 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
26923 { 128, 103, 76, 51, 25, 1 },
26924 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
26925 { 32, 26, 20, 14, 7, 1 },
26926 +#endif
26927 #if 0
26928 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
26929 { 2048, 1638, 1231, 819, 411, 1 },
26930 @@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
26931 #include <linux/sysctl.h>
26932
26933 static int min_read_thresh = 8, min_write_thresh;
26934 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
26935 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
26936 static int max_write_thresh = INPUT_POOL_WORDS * 32;
26937 static char sysctl_bootid[16];
26938
26939 diff -urNp linux-2.6.32.41/drivers/char/rocket.c linux-2.6.32.41/drivers/char/rocket.c
26940 --- linux-2.6.32.41/drivers/char/rocket.c 2011-03-27 14:31:47.000000000 -0400
26941 +++ linux-2.6.32.41/drivers/char/rocket.c 2011-05-16 21:46:57.000000000 -0400
26942 @@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info
26943 struct rocket_ports tmp;
26944 int board;
26945
26946 + pax_track_stack();
26947 +
26948 if (!retports)
26949 return -EFAULT;
26950 memset(&tmp, 0, sizeof (tmp));
26951 diff -urNp linux-2.6.32.41/drivers/char/sonypi.c linux-2.6.32.41/drivers/char/sonypi.c
26952 --- linux-2.6.32.41/drivers/char/sonypi.c 2011-03-27 14:31:47.000000000 -0400
26953 +++ linux-2.6.32.41/drivers/char/sonypi.c 2011-04-17 15:56:46.000000000 -0400
26954 @@ -55,6 +55,7 @@
26955 #include <asm/uaccess.h>
26956 #include <asm/io.h>
26957 #include <asm/system.h>
26958 +#include <asm/local.h>
26959
26960 #include <linux/sonypi.h>
26961
26962 @@ -491,7 +492,7 @@ static struct sonypi_device {
26963 spinlock_t fifo_lock;
26964 wait_queue_head_t fifo_proc_list;
26965 struct fasync_struct *fifo_async;
26966 - int open_count;
26967 + local_t open_count;
26968 int model;
26969 struct input_dev *input_jog_dev;
26970 struct input_dev *input_key_dev;
26971 @@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, st
26972 static int sonypi_misc_release(struct inode *inode, struct file *file)
26973 {
26974 mutex_lock(&sonypi_device.lock);
26975 - sonypi_device.open_count--;
26976 + local_dec(&sonypi_device.open_count);
26977 mutex_unlock(&sonypi_device.lock);
26978 return 0;
26979 }
26980 @@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode
26981 lock_kernel();
26982 mutex_lock(&sonypi_device.lock);
26983 /* Flush input queue on first open */
26984 - if (!sonypi_device.open_count)
26985 + if (!local_read(&sonypi_device.open_count))
26986 kfifo_reset(sonypi_device.fifo);
26987 - sonypi_device.open_count++;
26988 + local_inc(&sonypi_device.open_count);
26989 mutex_unlock(&sonypi_device.lock);
26990 unlock_kernel();
26991 return 0;
26992 diff -urNp linux-2.6.32.41/drivers/char/stallion.c linux-2.6.32.41/drivers/char/stallion.c
26993 --- linux-2.6.32.41/drivers/char/stallion.c 2011-03-27 14:31:47.000000000 -0400
26994 +++ linux-2.6.32.41/drivers/char/stallion.c 2011-05-16 21:46:57.000000000 -0400
26995 @@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlp
26996 struct stlport stl_dummyport;
26997 struct stlport *portp;
26998
26999 + pax_track_stack();
27000 +
27001 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
27002 return -EFAULT;
27003 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
27004 diff -urNp linux-2.6.32.41/drivers/char/tpm/tpm_bios.c linux-2.6.32.41/drivers/char/tpm/tpm_bios.c
27005 --- linux-2.6.32.41/drivers/char/tpm/tpm_bios.c 2011-03-27 14:31:47.000000000 -0400
27006 +++ linux-2.6.32.41/drivers/char/tpm/tpm_bios.c 2011-04-17 15:56:46.000000000 -0400
27007 @@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start
27008 event = addr;
27009
27010 if ((event->event_type == 0 && event->event_size == 0) ||
27011 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
27012 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
27013 return NULL;
27014
27015 return addr;
27016 @@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(
27017 return NULL;
27018
27019 if ((event->event_type == 0 && event->event_size == 0) ||
27020 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
27021 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
27022 return NULL;
27023
27024 (*pos)++;
27025 @@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_
27026 int i;
27027
27028 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
27029 - seq_putc(m, data[i]);
27030 + if (!seq_putc(m, data[i]))
27031 + return -EFAULT;
27032
27033 return 0;
27034 }
27035 @@ -409,6 +410,11 @@ static int read_log(struct tpm_bios_log
27036 log->bios_event_log_end = log->bios_event_log + len;
27037
27038 virt = acpi_os_map_memory(start, len);
27039 + if (!virt) {
27040 + kfree(log->bios_event_log);
27041 + log->bios_event_log = NULL;
27042 + return -EFAULT;
27043 + }
27044
27045 memcpy(log->bios_event_log, virt, len);
27046
27047 diff -urNp linux-2.6.32.41/drivers/char/tpm/tpm.c linux-2.6.32.41/drivers/char/tpm/tpm.c
27048 --- linux-2.6.32.41/drivers/char/tpm/tpm.c 2011-04-17 17:00:52.000000000 -0400
27049 +++ linux-2.6.32.41/drivers/char/tpm/tpm.c 2011-05-16 21:46:57.000000000 -0400
27050 @@ -402,7 +402,7 @@ static ssize_t tpm_transmit(struct tpm_c
27051 chip->vendor.req_complete_val)
27052 goto out_recv;
27053
27054 - if ((status == chip->vendor.req_canceled)) {
27055 + if (status == chip->vendor.req_canceled) {
27056 dev_err(chip->dev, "Operation Canceled\n");
27057 rc = -ECANCELED;
27058 goto out;
27059 @@ -821,6 +821,8 @@ ssize_t tpm_show_pubek(struct device *de
27060
27061 struct tpm_chip *chip = dev_get_drvdata(dev);
27062
27063 + pax_track_stack();
27064 +
27065 tpm_cmd.header.in = tpm_readpubek_header;
27066 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
27067 "attempting to read the PUBEK");
27068 diff -urNp linux-2.6.32.41/drivers/char/tty_io.c linux-2.6.32.41/drivers/char/tty_io.c
27069 --- linux-2.6.32.41/drivers/char/tty_io.c 2011-03-27 14:31:47.000000000 -0400
27070 +++ linux-2.6.32.41/drivers/char/tty_io.c 2011-04-17 15:56:46.000000000 -0400
27071 @@ -136,21 +136,10 @@ LIST_HEAD(tty_drivers); /* linked list
27072 DEFINE_MUTEX(tty_mutex);
27073 EXPORT_SYMBOL(tty_mutex);
27074
27075 -static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
27076 -static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
27077 ssize_t redirected_tty_write(struct file *, const char __user *,
27078 size_t, loff_t *);
27079 -static unsigned int tty_poll(struct file *, poll_table *);
27080 static int tty_open(struct inode *, struct file *);
27081 -static int tty_release(struct inode *, struct file *);
27082 long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
27083 -#ifdef CONFIG_COMPAT
27084 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
27085 - unsigned long arg);
27086 -#else
27087 -#define tty_compat_ioctl NULL
27088 -#endif
27089 -static int tty_fasync(int fd, struct file *filp, int on);
27090 static void release_tty(struct tty_struct *tty, int idx);
27091 static void __proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
27092 static void proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
27093 @@ -870,7 +859,7 @@ EXPORT_SYMBOL(start_tty);
27094 * read calls may be outstanding in parallel.
27095 */
27096
27097 -static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
27098 +ssize_t tty_read(struct file *file, char __user *buf, size_t count,
27099 loff_t *ppos)
27100 {
27101 int i;
27102 @@ -898,6 +887,8 @@ static ssize_t tty_read(struct file *fil
27103 return i;
27104 }
27105
27106 +EXPORT_SYMBOL(tty_read);
27107 +
27108 void tty_write_unlock(struct tty_struct *tty)
27109 {
27110 mutex_unlock(&tty->atomic_write_lock);
27111 @@ -1045,7 +1036,7 @@ void tty_write_message(struct tty_struct
27112 * write method will not be invoked in parallel for each device.
27113 */
27114
27115 -static ssize_t tty_write(struct file *file, const char __user *buf,
27116 +ssize_t tty_write(struct file *file, const char __user *buf,
27117 size_t count, loff_t *ppos)
27118 {
27119 struct tty_struct *tty;
27120 @@ -1072,6 +1063,8 @@ static ssize_t tty_write(struct file *fi
27121 return ret;
27122 }
27123
27124 +EXPORT_SYMBOL(tty_write);
27125 +
27126 ssize_t redirected_tty_write(struct file *file, const char __user *buf,
27127 size_t count, loff_t *ppos)
27128 {
27129 @@ -1867,7 +1860,7 @@ static int tty_open(struct inode *inode,
27130 * Takes bkl. See tty_release_dev
27131 */
27132
27133 -static int tty_release(struct inode *inode, struct file *filp)
27134 +int tty_release(struct inode *inode, struct file *filp)
27135 {
27136 lock_kernel();
27137 tty_release_dev(filp);
27138 @@ -1875,6 +1868,8 @@ static int tty_release(struct inode *ino
27139 return 0;
27140 }
27141
27142 +EXPORT_SYMBOL(tty_release);
27143 +
27144 /**
27145 * tty_poll - check tty status
27146 * @filp: file being polled
27147 @@ -1887,7 +1882,7 @@ static int tty_release(struct inode *ino
27148 * may be re-entered freely by other callers.
27149 */
27150
27151 -static unsigned int tty_poll(struct file *filp, poll_table *wait)
27152 +unsigned int tty_poll(struct file *filp, poll_table *wait)
27153 {
27154 struct tty_struct *tty;
27155 struct tty_ldisc *ld;
27156 @@ -1904,7 +1899,9 @@ static unsigned int tty_poll(struct file
27157 return ret;
27158 }
27159
27160 -static int tty_fasync(int fd, struct file *filp, int on)
27161 +EXPORT_SYMBOL(tty_poll);
27162 +
27163 +int tty_fasync(int fd, struct file *filp, int on)
27164 {
27165 struct tty_struct *tty;
27166 unsigned long flags;
27167 @@ -1948,6 +1945,8 @@ out:
27168 return retval;
27169 }
27170
27171 +EXPORT_SYMBOL(tty_fasync);
27172 +
27173 /**
27174 * tiocsti - fake input character
27175 * @tty: tty to fake input into
27176 @@ -2582,8 +2581,10 @@ long tty_ioctl(struct file *file, unsign
27177 return retval;
27178 }
27179
27180 +EXPORT_SYMBOL(tty_ioctl);
27181 +
27182 #ifdef CONFIG_COMPAT
27183 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
27184 +long tty_compat_ioctl(struct file *file, unsigned int cmd,
27185 unsigned long arg)
27186 {
27187 struct inode *inode = file->f_dentry->d_inode;
27188 @@ -2607,6 +2608,8 @@ static long tty_compat_ioctl(struct file
27189
27190 return retval;
27191 }
27192 +
27193 +EXPORT_SYMBOL(tty_compat_ioctl);
27194 #endif
27195
27196 /*
27197 @@ -3050,11 +3053,6 @@ struct tty_struct *get_current_tty(void)
27198 }
27199 EXPORT_SYMBOL_GPL(get_current_tty);
27200
27201 -void tty_default_fops(struct file_operations *fops)
27202 -{
27203 - *fops = tty_fops;
27204 -}
27205 -
27206 /*
27207 * Initialize the console device. This is called *early*, so
27208 * we can't necessarily depend on lots of kernel help here.
27209 diff -urNp linux-2.6.32.41/drivers/char/tty_ldisc.c linux-2.6.32.41/drivers/char/tty_ldisc.c
27210 --- linux-2.6.32.41/drivers/char/tty_ldisc.c 2011-03-27 14:31:47.000000000 -0400
27211 +++ linux-2.6.32.41/drivers/char/tty_ldisc.c 2011-04-17 15:56:46.000000000 -0400
27212 @@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
27213 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
27214 struct tty_ldisc_ops *ldo = ld->ops;
27215
27216 - ldo->refcount--;
27217 + atomic_dec(&ldo->refcount);
27218 module_put(ldo->owner);
27219 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
27220
27221 @@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
27222 spin_lock_irqsave(&tty_ldisc_lock, flags);
27223 tty_ldiscs[disc] = new_ldisc;
27224 new_ldisc->num = disc;
27225 - new_ldisc->refcount = 0;
27226 + atomic_set(&new_ldisc->refcount, 0);
27227 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
27228
27229 return ret;
27230 @@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
27231 return -EINVAL;
27232
27233 spin_lock_irqsave(&tty_ldisc_lock, flags);
27234 - if (tty_ldiscs[disc]->refcount)
27235 + if (atomic_read(&tty_ldiscs[disc]->refcount))
27236 ret = -EBUSY;
27237 else
27238 tty_ldiscs[disc] = NULL;
27239 @@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
27240 if (ldops) {
27241 ret = ERR_PTR(-EAGAIN);
27242 if (try_module_get(ldops->owner)) {
27243 - ldops->refcount++;
27244 + atomic_inc(&ldops->refcount);
27245 ret = ldops;
27246 }
27247 }
27248 @@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
27249 unsigned long flags;
27250
27251 spin_lock_irqsave(&tty_ldisc_lock, flags);
27252 - ldops->refcount--;
27253 + atomic_dec(&ldops->refcount);
27254 module_put(ldops->owner);
27255 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
27256 }
27257 diff -urNp linux-2.6.32.41/drivers/char/virtio_console.c linux-2.6.32.41/drivers/char/virtio_console.c
27258 --- linux-2.6.32.41/drivers/char/virtio_console.c 2011-03-27 14:31:47.000000000 -0400
27259 +++ linux-2.6.32.41/drivers/char/virtio_console.c 2011-04-17 15:56:46.000000000 -0400
27260 @@ -44,6 +44,7 @@ static unsigned int in_len;
27261 static char *in, *inbuf;
27262
27263 /* The operations for our console. */
27264 +/* cannot be const */
27265 static struct hv_ops virtio_cons;
27266
27267 /* The hvc device */
27268 diff -urNp linux-2.6.32.41/drivers/char/vt.c linux-2.6.32.41/drivers/char/vt.c
27269 --- linux-2.6.32.41/drivers/char/vt.c 2011-03-27 14:31:47.000000000 -0400
27270 +++ linux-2.6.32.41/drivers/char/vt.c 2011-04-17 15:56:46.000000000 -0400
27271 @@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
27272
27273 static void notify_write(struct vc_data *vc, unsigned int unicode)
27274 {
27275 - struct vt_notifier_param param = { .vc = vc, unicode = unicode };
27276 + struct vt_notifier_param param = { .vc = vc, .c = unicode };
27277 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
27278 }
27279
27280 diff -urNp linux-2.6.32.41/drivers/char/vt_ioctl.c linux-2.6.32.41/drivers/char/vt_ioctl.c
27281 --- linux-2.6.32.41/drivers/char/vt_ioctl.c 2011-03-27 14:31:47.000000000 -0400
27282 +++ linux-2.6.32.41/drivers/char/vt_ioctl.c 2011-04-17 15:56:46.000000000 -0400
27283 @@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
27284 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
27285 return -EFAULT;
27286
27287 - if (!capable(CAP_SYS_TTY_CONFIG))
27288 - perm = 0;
27289 -
27290 switch (cmd) {
27291 case KDGKBENT:
27292 key_map = key_maps[s];
27293 @@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __
27294 val = (i ? K_HOLE : K_NOSUCHMAP);
27295 return put_user(val, &user_kbe->kb_value);
27296 case KDSKBENT:
27297 + if (!capable(CAP_SYS_TTY_CONFIG))
27298 + perm = 0;
27299 +
27300 if (!perm)
27301 return -EPERM;
27302 +
27303 if (!i && v == K_NOSUCHMAP) {
27304 /* deallocate map */
27305 key_map = key_maps[s];
27306 @@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
27307 int i, j, k;
27308 int ret;
27309
27310 - if (!capable(CAP_SYS_TTY_CONFIG))
27311 - perm = 0;
27312 -
27313 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
27314 if (!kbs) {
27315 ret = -ENOMEM;
27316 @@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
27317 kfree(kbs);
27318 return ((p && *p) ? -EOVERFLOW : 0);
27319 case KDSKBSENT:
27320 + if (!capable(CAP_SYS_TTY_CONFIG))
27321 + perm = 0;
27322 +
27323 if (!perm) {
27324 ret = -EPERM;
27325 goto reterr;
27326 diff -urNp linux-2.6.32.41/drivers/cpufreq/cpufreq.c linux-2.6.32.41/drivers/cpufreq/cpufreq.c
27327 --- linux-2.6.32.41/drivers/cpufreq/cpufreq.c 2011-03-27 14:31:47.000000000 -0400
27328 +++ linux-2.6.32.41/drivers/cpufreq/cpufreq.c 2011-04-17 15:56:46.000000000 -0400
27329 @@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct
27330 complete(&policy->kobj_unregister);
27331 }
27332
27333 -static struct sysfs_ops sysfs_ops = {
27334 +static const struct sysfs_ops sysfs_ops = {
27335 .show = show,
27336 .store = store,
27337 };
27338 diff -urNp linux-2.6.32.41/drivers/cpuidle/sysfs.c linux-2.6.32.41/drivers/cpuidle/sysfs.c
27339 --- linux-2.6.32.41/drivers/cpuidle/sysfs.c 2011-03-27 14:31:47.000000000 -0400
27340 +++ linux-2.6.32.41/drivers/cpuidle/sysfs.c 2011-04-17 15:56:46.000000000 -0400
27341 @@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobj
27342 return ret;
27343 }
27344
27345 -static struct sysfs_ops cpuidle_sysfs_ops = {
27346 +static const struct sysfs_ops cpuidle_sysfs_ops = {
27347 .show = cpuidle_show,
27348 .store = cpuidle_store,
27349 };
27350 @@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct
27351 return ret;
27352 }
27353
27354 -static struct sysfs_ops cpuidle_state_sysfs_ops = {
27355 +static const struct sysfs_ops cpuidle_state_sysfs_ops = {
27356 .show = cpuidle_state_show,
27357 };
27358
27359 @@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpui
27360 .release = cpuidle_state_sysfs_release,
27361 };
27362
27363 -static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
27364 +static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
27365 {
27366 kobject_put(&device->kobjs[i]->kobj);
27367 wait_for_completion(&device->kobjs[i]->kobj_unregister);
27368 diff -urNp linux-2.6.32.41/drivers/crypto/hifn_795x.c linux-2.6.32.41/drivers/crypto/hifn_795x.c
27369 --- linux-2.6.32.41/drivers/crypto/hifn_795x.c 2011-03-27 14:31:47.000000000 -0400
27370 +++ linux-2.6.32.41/drivers/crypto/hifn_795x.c 2011-05-16 21:46:57.000000000 -0400
27371 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
27372 0xCA, 0x34, 0x2B, 0x2E};
27373 struct scatterlist sg;
27374
27375 + pax_track_stack();
27376 +
27377 memset(src, 0, sizeof(src));
27378 memset(ctx.key, 0, sizeof(ctx.key));
27379
27380 diff -urNp linux-2.6.32.41/drivers/crypto/padlock-aes.c linux-2.6.32.41/drivers/crypto/padlock-aes.c
27381 --- linux-2.6.32.41/drivers/crypto/padlock-aes.c 2011-03-27 14:31:47.000000000 -0400
27382 +++ linux-2.6.32.41/drivers/crypto/padlock-aes.c 2011-05-16 21:46:57.000000000 -0400
27383 @@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm
27384 struct crypto_aes_ctx gen_aes;
27385 int cpu;
27386
27387 + pax_track_stack();
27388 +
27389 if (key_len % 8) {
27390 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
27391 return -EINVAL;
27392 diff -urNp linux-2.6.32.41/drivers/dma/ioat/dma.c linux-2.6.32.41/drivers/dma/ioat/dma.c
27393 --- linux-2.6.32.41/drivers/dma/ioat/dma.c 2011-03-27 14:31:47.000000000 -0400
27394 +++ linux-2.6.32.41/drivers/dma/ioat/dma.c 2011-04-17 15:56:46.000000000 -0400
27395 @@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, str
27396 return entry->show(&chan->common, page);
27397 }
27398
27399 -struct sysfs_ops ioat_sysfs_ops = {
27400 +const struct sysfs_ops ioat_sysfs_ops = {
27401 .show = ioat_attr_show,
27402 };
27403
27404 diff -urNp linux-2.6.32.41/drivers/dma/ioat/dma.h linux-2.6.32.41/drivers/dma/ioat/dma.h
27405 --- linux-2.6.32.41/drivers/dma/ioat/dma.h 2011-03-27 14:31:47.000000000 -0400
27406 +++ linux-2.6.32.41/drivers/dma/ioat/dma.h 2011-04-17 15:56:46.000000000 -0400
27407 @@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_c
27408 unsigned long *phys_complete);
27409 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
27410 void ioat_kobject_del(struct ioatdma_device *device);
27411 -extern struct sysfs_ops ioat_sysfs_ops;
27412 +extern const struct sysfs_ops ioat_sysfs_ops;
27413 extern struct ioat_sysfs_entry ioat_version_attr;
27414 extern struct ioat_sysfs_entry ioat_cap_attr;
27415 #endif /* IOATDMA_H */
27416 diff -urNp linux-2.6.32.41/drivers/edac/edac_device_sysfs.c linux-2.6.32.41/drivers/edac/edac_device_sysfs.c
27417 --- linux-2.6.32.41/drivers/edac/edac_device_sysfs.c 2011-03-27 14:31:47.000000000 -0400
27418 +++ linux-2.6.32.41/drivers/edac/edac_device_sysfs.c 2011-04-17 15:56:46.000000000 -0400
27419 @@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(s
27420 }
27421
27422 /* edac_dev file operations for an 'ctl_info' */
27423 -static struct sysfs_ops device_ctl_info_ops = {
27424 +static const struct sysfs_ops device_ctl_info_ops = {
27425 .show = edac_dev_ctl_info_show,
27426 .store = edac_dev_ctl_info_store
27427 };
27428 @@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(s
27429 }
27430
27431 /* edac_dev file operations for an 'instance' */
27432 -static struct sysfs_ops device_instance_ops = {
27433 +static const struct sysfs_ops device_instance_ops = {
27434 .show = edac_dev_instance_show,
27435 .store = edac_dev_instance_store
27436 };
27437 @@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(stru
27438 }
27439
27440 /* edac_dev file operations for a 'block' */
27441 -static struct sysfs_ops device_block_ops = {
27442 +static const struct sysfs_ops device_block_ops = {
27443 .show = edac_dev_block_show,
27444 .store = edac_dev_block_store
27445 };
27446 diff -urNp linux-2.6.32.41/drivers/edac/edac_mc_sysfs.c linux-2.6.32.41/drivers/edac/edac_mc_sysfs.c
27447 --- linux-2.6.32.41/drivers/edac/edac_mc_sysfs.c 2011-03-27 14:31:47.000000000 -0400
27448 +++ linux-2.6.32.41/drivers/edac/edac_mc_sysfs.c 2011-04-17 15:56:46.000000000 -0400
27449 @@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kob
27450 return -EIO;
27451 }
27452
27453 -static struct sysfs_ops csrowfs_ops = {
27454 +static const struct sysfs_ops csrowfs_ops = {
27455 .show = csrowdev_show,
27456 .store = csrowdev_store
27457 };
27458 @@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobje
27459 }
27460
27461 /* Intermediate show/store table */
27462 -static struct sysfs_ops mci_ops = {
27463 +static const struct sysfs_ops mci_ops = {
27464 .show = mcidev_show,
27465 .store = mcidev_store
27466 };
27467 diff -urNp linux-2.6.32.41/drivers/edac/edac_pci_sysfs.c linux-2.6.32.41/drivers/edac/edac_pci_sysfs.c
27468 --- linux-2.6.32.41/drivers/edac/edac_pci_sysfs.c 2011-03-27 14:31:47.000000000 -0400
27469 +++ linux-2.6.32.41/drivers/edac/edac_pci_sysfs.c 2011-05-04 17:56:20.000000000 -0400
27470 @@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log
27471 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
27472 static int edac_pci_poll_msec = 1000; /* one second workq period */
27473
27474 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
27475 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
27476 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
27477 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
27478
27479 static struct kobject *edac_pci_top_main_kobj;
27480 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
27481 @@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(s
27482 }
27483
27484 /* fs_ops table */
27485 -static struct sysfs_ops pci_instance_ops = {
27486 +static const struct sysfs_ops pci_instance_ops = {
27487 .show = edac_pci_instance_show,
27488 .store = edac_pci_instance_store
27489 };
27490 @@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct
27491 return -EIO;
27492 }
27493
27494 -static struct sysfs_ops edac_pci_sysfs_ops = {
27495 +static const struct sysfs_ops edac_pci_sysfs_ops = {
27496 .show = edac_pci_dev_show,
27497 .store = edac_pci_dev_store
27498 };
27499 @@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(str
27500 edac_printk(KERN_CRIT, EDAC_PCI,
27501 "Signaled System Error on %s\n",
27502 pci_name(dev));
27503 - atomic_inc(&pci_nonparity_count);
27504 + atomic_inc_unchecked(&pci_nonparity_count);
27505 }
27506
27507 if (status & (PCI_STATUS_PARITY)) {
27508 @@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(str
27509 "Master Data Parity Error on %s\n",
27510 pci_name(dev));
27511
27512 - atomic_inc(&pci_parity_count);
27513 + atomic_inc_unchecked(&pci_parity_count);
27514 }
27515
27516 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27517 @@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(str
27518 "Detected Parity Error on %s\n",
27519 pci_name(dev));
27520
27521 - atomic_inc(&pci_parity_count);
27522 + atomic_inc_unchecked(&pci_parity_count);
27523 }
27524 }
27525
27526 @@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(str
27527 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
27528 "Signaled System Error on %s\n",
27529 pci_name(dev));
27530 - atomic_inc(&pci_nonparity_count);
27531 + atomic_inc_unchecked(&pci_nonparity_count);
27532 }
27533
27534 if (status & (PCI_STATUS_PARITY)) {
27535 @@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(str
27536 "Master Data Parity Error on "
27537 "%s\n", pci_name(dev));
27538
27539 - atomic_inc(&pci_parity_count);
27540 + atomic_inc_unchecked(&pci_parity_count);
27541 }
27542
27543 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27544 @@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(str
27545 "Detected Parity Error on %s\n",
27546 pci_name(dev));
27547
27548 - atomic_inc(&pci_parity_count);
27549 + atomic_inc_unchecked(&pci_parity_count);
27550 }
27551 }
27552 }
27553 @@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
27554 if (!check_pci_errors)
27555 return;
27556
27557 - before_count = atomic_read(&pci_parity_count);
27558 + before_count = atomic_read_unchecked(&pci_parity_count);
27559
27560 /* scan all PCI devices looking for a Parity Error on devices and
27561 * bridges.
27562 @@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
27563 /* Only if operator has selected panic on PCI Error */
27564 if (edac_pci_get_panic_on_pe()) {
27565 /* If the count is different 'after' from 'before' */
27566 - if (before_count != atomic_read(&pci_parity_count))
27567 + if (before_count != atomic_read_unchecked(&pci_parity_count))
27568 panic("EDAC: PCI Parity Error");
27569 }
27570 }
27571 diff -urNp linux-2.6.32.41/drivers/firewire/core-cdev.c linux-2.6.32.41/drivers/firewire/core-cdev.c
27572 --- linux-2.6.32.41/drivers/firewire/core-cdev.c 2011-03-27 14:31:47.000000000 -0400
27573 +++ linux-2.6.32.41/drivers/firewire/core-cdev.c 2011-04-17 15:56:46.000000000 -0400
27574 @@ -1141,8 +1141,7 @@ static int init_iso_resource(struct clie
27575 int ret;
27576
27577 if ((request->channels == 0 && request->bandwidth == 0) ||
27578 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
27579 - request->bandwidth < 0)
27580 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
27581 return -EINVAL;
27582
27583 r = kmalloc(sizeof(*r), GFP_KERNEL);
27584 diff -urNp linux-2.6.32.41/drivers/firewire/core-transaction.c linux-2.6.32.41/drivers/firewire/core-transaction.c
27585 --- linux-2.6.32.41/drivers/firewire/core-transaction.c 2011-03-27 14:31:47.000000000 -0400
27586 +++ linux-2.6.32.41/drivers/firewire/core-transaction.c 2011-05-16 21:46:57.000000000 -0400
27587 @@ -36,6 +36,7 @@
27588 #include <linux/string.h>
27589 #include <linux/timer.h>
27590 #include <linux/types.h>
27591 +#include <linux/sched.h>
27592
27593 #include <asm/byteorder.h>
27594
27595 @@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *c
27596 struct transaction_callback_data d;
27597 struct fw_transaction t;
27598
27599 + pax_track_stack();
27600 +
27601 init_completion(&d.done);
27602 d.payload = payload;
27603 fw_send_request(card, &t, tcode, destination_id, generation, speed,
27604 diff -urNp linux-2.6.32.41/drivers/firmware/dmi_scan.c linux-2.6.32.41/drivers/firmware/dmi_scan.c
27605 --- linux-2.6.32.41/drivers/firmware/dmi_scan.c 2011-03-27 14:31:47.000000000 -0400
27606 +++ linux-2.6.32.41/drivers/firmware/dmi_scan.c 2011-04-17 15:56:46.000000000 -0400
27607 @@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
27608 }
27609 }
27610 else {
27611 - /*
27612 - * no iounmap() for that ioremap(); it would be a no-op, but
27613 - * it's so early in setup that sucker gets confused into doing
27614 - * what it shouldn't if we actually call it.
27615 - */
27616 p = dmi_ioremap(0xF0000, 0x10000);
27617 if (p == NULL)
27618 goto error;
27619 diff -urNp linux-2.6.32.41/drivers/firmware/edd.c linux-2.6.32.41/drivers/firmware/edd.c
27620 --- linux-2.6.32.41/drivers/firmware/edd.c 2011-03-27 14:31:47.000000000 -0400
27621 +++ linux-2.6.32.41/drivers/firmware/edd.c 2011-04-17 15:56:46.000000000 -0400
27622 @@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, str
27623 return ret;
27624 }
27625
27626 -static struct sysfs_ops edd_attr_ops = {
27627 +static const struct sysfs_ops edd_attr_ops = {
27628 .show = edd_attr_show,
27629 };
27630
27631 diff -urNp linux-2.6.32.41/drivers/firmware/efivars.c linux-2.6.32.41/drivers/firmware/efivars.c
27632 --- linux-2.6.32.41/drivers/firmware/efivars.c 2011-03-27 14:31:47.000000000 -0400
27633 +++ linux-2.6.32.41/drivers/firmware/efivars.c 2011-04-17 15:56:46.000000000 -0400
27634 @@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct
27635 return ret;
27636 }
27637
27638 -static struct sysfs_ops efivar_attr_ops = {
27639 +static const struct sysfs_ops efivar_attr_ops = {
27640 .show = efivar_attr_show,
27641 .store = efivar_attr_store,
27642 };
27643 diff -urNp linux-2.6.32.41/drivers/firmware/iscsi_ibft.c linux-2.6.32.41/drivers/firmware/iscsi_ibft.c
27644 --- linux-2.6.32.41/drivers/firmware/iscsi_ibft.c 2011-03-27 14:31:47.000000000 -0400
27645 +++ linux-2.6.32.41/drivers/firmware/iscsi_ibft.c 2011-04-17 15:56:46.000000000 -0400
27646 @@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struc
27647 return ret;
27648 }
27649
27650 -static struct sysfs_ops ibft_attr_ops = {
27651 +static const struct sysfs_ops ibft_attr_ops = {
27652 .show = ibft_show_attribute,
27653 };
27654
27655 diff -urNp linux-2.6.32.41/drivers/firmware/memmap.c linux-2.6.32.41/drivers/firmware/memmap.c
27656 --- linux-2.6.32.41/drivers/firmware/memmap.c 2011-03-27 14:31:47.000000000 -0400
27657 +++ linux-2.6.32.41/drivers/firmware/memmap.c 2011-04-17 15:56:46.000000000 -0400
27658 @@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
27659 NULL
27660 };
27661
27662 -static struct sysfs_ops memmap_attr_ops = {
27663 +static const struct sysfs_ops memmap_attr_ops = {
27664 .show = memmap_attr_show,
27665 };
27666
27667 diff -urNp linux-2.6.32.41/drivers/gpio/vr41xx_giu.c linux-2.6.32.41/drivers/gpio/vr41xx_giu.c
27668 --- linux-2.6.32.41/drivers/gpio/vr41xx_giu.c 2011-03-27 14:31:47.000000000 -0400
27669 +++ linux-2.6.32.41/drivers/gpio/vr41xx_giu.c 2011-05-04 17:56:28.000000000 -0400
27670 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
27671 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
27672 maskl, pendl, maskh, pendh);
27673
27674 - atomic_inc(&irq_err_count);
27675 + atomic_inc_unchecked(&irq_err_count);
27676
27677 return -EINVAL;
27678 }
27679 diff -urNp linux-2.6.32.41/drivers/gpu/drm/drm_crtc_helper.c linux-2.6.32.41/drivers/gpu/drm/drm_crtc_helper.c
27680 --- linux-2.6.32.41/drivers/gpu/drm/drm_crtc_helper.c 2011-03-27 14:31:47.000000000 -0400
27681 +++ linux-2.6.32.41/drivers/gpu/drm/drm_crtc_helper.c 2011-05-16 21:46:57.000000000 -0400
27682 @@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct d
27683 struct drm_crtc *tmp;
27684 int crtc_mask = 1;
27685
27686 - WARN(!crtc, "checking null crtc?");
27687 + BUG_ON(!crtc);
27688
27689 dev = crtc->dev;
27690
27691 @@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm
27692
27693 adjusted_mode = drm_mode_duplicate(dev, mode);
27694
27695 + pax_track_stack();
27696 +
27697 crtc->enabled = drm_helper_crtc_in_use(crtc);
27698
27699 if (!crtc->enabled)
27700 diff -urNp linux-2.6.32.41/drivers/gpu/drm/drm_drv.c linux-2.6.32.41/drivers/gpu/drm/drm_drv.c
27701 --- linux-2.6.32.41/drivers/gpu/drm/drm_drv.c 2011-03-27 14:31:47.000000000 -0400
27702 +++ linux-2.6.32.41/drivers/gpu/drm/drm_drv.c 2011-04-17 15:56:46.000000000 -0400
27703 @@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struc
27704 char *kdata = NULL;
27705
27706 atomic_inc(&dev->ioctl_count);
27707 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
27708 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
27709 ++file_priv->ioctl_count;
27710
27711 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
27712 diff -urNp linux-2.6.32.41/drivers/gpu/drm/drm_fops.c linux-2.6.32.41/drivers/gpu/drm/drm_fops.c
27713 --- linux-2.6.32.41/drivers/gpu/drm/drm_fops.c 2011-03-27 14:31:47.000000000 -0400
27714 +++ linux-2.6.32.41/drivers/gpu/drm/drm_fops.c 2011-04-17 15:56:46.000000000 -0400
27715 @@ -66,7 +66,7 @@ static int drm_setup(struct drm_device *
27716 }
27717
27718 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
27719 - atomic_set(&dev->counts[i], 0);
27720 + atomic_set_unchecked(&dev->counts[i], 0);
27721
27722 dev->sigdata.lock = NULL;
27723
27724 @@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct
27725
27726 retcode = drm_open_helper(inode, filp, dev);
27727 if (!retcode) {
27728 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
27729 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
27730 spin_lock(&dev->count_lock);
27731 - if (!dev->open_count++) {
27732 + if (local_inc_return(&dev->open_count) == 1) {
27733 spin_unlock(&dev->count_lock);
27734 retcode = drm_setup(dev);
27735 goto out;
27736 @@ -435,7 +435,7 @@ int drm_release(struct inode *inode, str
27737
27738 lock_kernel();
27739
27740 - DRM_DEBUG("open_count = %d\n", dev->open_count);
27741 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
27742
27743 if (dev->driver->preclose)
27744 dev->driver->preclose(dev, file_priv);
27745 @@ -447,7 +447,7 @@ int drm_release(struct inode *inode, str
27746 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
27747 task_pid_nr(current),
27748 (long)old_encode_dev(file_priv->minor->device),
27749 - dev->open_count);
27750 + local_read(&dev->open_count));
27751
27752 /* if the master has gone away we can't do anything with the lock */
27753 if (file_priv->minor->master)
27754 @@ -524,9 +524,9 @@ int drm_release(struct inode *inode, str
27755 * End inline drm_release
27756 */
27757
27758 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
27759 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
27760 spin_lock(&dev->count_lock);
27761 - if (!--dev->open_count) {
27762 + if (local_dec_and_test(&dev->open_count)) {
27763 if (atomic_read(&dev->ioctl_count)) {
27764 DRM_ERROR("Device busy: %d\n",
27765 atomic_read(&dev->ioctl_count));
27766 diff -urNp linux-2.6.32.41/drivers/gpu/drm/drm_gem.c linux-2.6.32.41/drivers/gpu/drm/drm_gem.c
27767 --- linux-2.6.32.41/drivers/gpu/drm/drm_gem.c 2011-03-27 14:31:47.000000000 -0400
27768 +++ linux-2.6.32.41/drivers/gpu/drm/drm_gem.c 2011-04-17 15:56:46.000000000 -0400
27769 @@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
27770 spin_lock_init(&dev->object_name_lock);
27771 idr_init(&dev->object_name_idr);
27772 atomic_set(&dev->object_count, 0);
27773 - atomic_set(&dev->object_memory, 0);
27774 + atomic_set_unchecked(&dev->object_memory, 0);
27775 atomic_set(&dev->pin_count, 0);
27776 - atomic_set(&dev->pin_memory, 0);
27777 + atomic_set_unchecked(&dev->pin_memory, 0);
27778 atomic_set(&dev->gtt_count, 0);
27779 - atomic_set(&dev->gtt_memory, 0);
27780 + atomic_set_unchecked(&dev->gtt_memory, 0);
27781
27782 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
27783 if (!mm) {
27784 @@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *
27785 goto fput;
27786 }
27787 atomic_inc(&dev->object_count);
27788 - atomic_add(obj->size, &dev->object_memory);
27789 + atomic_add_unchecked(obj->size, &dev->object_memory);
27790 return obj;
27791 fput:
27792 fput(obj->filp);
27793 @@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
27794
27795 fput(obj->filp);
27796 atomic_dec(&dev->object_count);
27797 - atomic_sub(obj->size, &dev->object_memory);
27798 + atomic_sub_unchecked(obj->size, &dev->object_memory);
27799 kfree(obj);
27800 }
27801 EXPORT_SYMBOL(drm_gem_object_free);
27802 diff -urNp linux-2.6.32.41/drivers/gpu/drm/drm_info.c linux-2.6.32.41/drivers/gpu/drm/drm_info.c
27803 --- linux-2.6.32.41/drivers/gpu/drm/drm_info.c 2011-03-27 14:31:47.000000000 -0400
27804 +++ linux-2.6.32.41/drivers/gpu/drm/drm_info.c 2011-04-17 15:56:46.000000000 -0400
27805 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
27806 struct drm_local_map *map;
27807 struct drm_map_list *r_list;
27808
27809 - /* Hardcoded from _DRM_FRAME_BUFFER,
27810 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
27811 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
27812 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
27813 + static const char * const types[] = {
27814 + [_DRM_FRAME_BUFFER] = "FB",
27815 + [_DRM_REGISTERS] = "REG",
27816 + [_DRM_SHM] = "SHM",
27817 + [_DRM_AGP] = "AGP",
27818 + [_DRM_SCATTER_GATHER] = "SG",
27819 + [_DRM_CONSISTENT] = "PCI",
27820 + [_DRM_GEM] = "GEM" };
27821 const char *type;
27822 int i;
27823
27824 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
27825 map = r_list->map;
27826 if (!map)
27827 continue;
27828 - if (map->type < 0 || map->type > 5)
27829 + if (map->type >= ARRAY_SIZE(types))
27830 type = "??";
27831 else
27832 type = types[map->type];
27833 @@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file
27834 struct drm_device *dev = node->minor->dev;
27835
27836 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
27837 - seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
27838 + seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
27839 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
27840 - seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
27841 - seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
27842 + seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
27843 + seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
27844 seq_printf(m, "%d gtt total\n", dev->gtt_total);
27845 return 0;
27846 }
27847 @@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, voi
27848 mutex_lock(&dev->struct_mutex);
27849 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
27850 atomic_read(&dev->vma_count),
27851 +#ifdef CONFIG_GRKERNSEC_HIDESYM
27852 + NULL, 0);
27853 +#else
27854 high_memory, (u64)virt_to_phys(high_memory));
27855 +#endif
27856
27857 list_for_each_entry(pt, &dev->vmalist, head) {
27858 vma = pt->vma;
27859 @@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, voi
27860 continue;
27861 seq_printf(m,
27862 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
27863 - pt->pid, vma->vm_start, vma->vm_end,
27864 + pt->pid,
27865 +#ifdef CONFIG_GRKERNSEC_HIDESYM
27866 + 0, 0,
27867 +#else
27868 + vma->vm_start, vma->vm_end,
27869 +#endif
27870 vma->vm_flags & VM_READ ? 'r' : '-',
27871 vma->vm_flags & VM_WRITE ? 'w' : '-',
27872 vma->vm_flags & VM_EXEC ? 'x' : '-',
27873 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
27874 vma->vm_flags & VM_LOCKED ? 'l' : '-',
27875 vma->vm_flags & VM_IO ? 'i' : '-',
27876 +#ifdef CONFIG_GRKERNSEC_HIDESYM
27877 + 0);
27878 +#else
27879 vma->vm_pgoff);
27880 +#endif
27881
27882 #if defined(__i386__)
27883 pgprot = pgprot_val(vma->vm_page_prot);
27884 diff -urNp linux-2.6.32.41/drivers/gpu/drm/drm_ioctl.c linux-2.6.32.41/drivers/gpu/drm/drm_ioctl.c
27885 --- linux-2.6.32.41/drivers/gpu/drm/drm_ioctl.c 2011-03-27 14:31:47.000000000 -0400
27886 +++ linux-2.6.32.41/drivers/gpu/drm/drm_ioctl.c 2011-04-17 15:56:46.000000000 -0400
27887 @@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev,
27888 stats->data[i].value =
27889 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
27890 else
27891 - stats->data[i].value = atomic_read(&dev->counts[i]);
27892 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
27893 stats->data[i].type = dev->types[i];
27894 }
27895
27896 diff -urNp linux-2.6.32.41/drivers/gpu/drm/drm_lock.c linux-2.6.32.41/drivers/gpu/drm/drm_lock.c
27897 --- linux-2.6.32.41/drivers/gpu/drm/drm_lock.c 2011-03-27 14:31:47.000000000 -0400
27898 +++ linux-2.6.32.41/drivers/gpu/drm/drm_lock.c 2011-04-17 15:56:46.000000000 -0400
27899 @@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, voi
27900 if (drm_lock_take(&master->lock, lock->context)) {
27901 master->lock.file_priv = file_priv;
27902 master->lock.lock_time = jiffies;
27903 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
27904 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
27905 break; /* Got lock */
27906 }
27907
27908 @@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, v
27909 return -EINVAL;
27910 }
27911
27912 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
27913 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
27914
27915 /* kernel_context_switch isn't used by any of the x86 drm
27916 * modules but is required by the Sparc driver.
27917 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i810/i810_dma.c linux-2.6.32.41/drivers/gpu/drm/i810/i810_dma.c
27918 --- linux-2.6.32.41/drivers/gpu/drm/i810/i810_dma.c 2011-03-27 14:31:47.000000000 -0400
27919 +++ linux-2.6.32.41/drivers/gpu/drm/i810/i810_dma.c 2011-04-17 15:56:46.000000000 -0400
27920 @@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_de
27921 dma->buflist[vertex->idx],
27922 vertex->discard, vertex->used);
27923
27924 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
27925 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
27926 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
27927 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
27928 sarea_priv->last_enqueue = dev_priv->counter - 1;
27929 sarea_priv->last_dispatch = (int)hw_status[5];
27930
27931 @@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device
27932 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
27933 mc->last_render);
27934
27935 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
27936 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
27937 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
27938 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
27939 sarea_priv->last_enqueue = dev_priv->counter - 1;
27940 sarea_priv->last_dispatch = (int)hw_status[5];
27941
27942 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i810/i810_drv.h linux-2.6.32.41/drivers/gpu/drm/i810/i810_drv.h
27943 --- linux-2.6.32.41/drivers/gpu/drm/i810/i810_drv.h 2011-03-27 14:31:47.000000000 -0400
27944 +++ linux-2.6.32.41/drivers/gpu/drm/i810/i810_drv.h 2011-05-04 17:56:28.000000000 -0400
27945 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
27946 int page_flipping;
27947
27948 wait_queue_head_t irq_queue;
27949 - atomic_t irq_received;
27950 - atomic_t irq_emitted;
27951 + atomic_unchecked_t irq_received;
27952 + atomic_unchecked_t irq_emitted;
27953
27954 int front_offset;
27955 } drm_i810_private_t;
27956 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i830/i830_drv.h linux-2.6.32.41/drivers/gpu/drm/i830/i830_drv.h
27957 --- linux-2.6.32.41/drivers/gpu/drm/i830/i830_drv.h 2011-03-27 14:31:47.000000000 -0400
27958 +++ linux-2.6.32.41/drivers/gpu/drm/i830/i830_drv.h 2011-05-04 17:56:28.000000000 -0400
27959 @@ -115,8 +115,8 @@ typedef struct drm_i830_private {
27960 int page_flipping;
27961
27962 wait_queue_head_t irq_queue;
27963 - atomic_t irq_received;
27964 - atomic_t irq_emitted;
27965 + atomic_unchecked_t irq_received;
27966 + atomic_unchecked_t irq_emitted;
27967
27968 int use_mi_batchbuffer_start;
27969
27970 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i830/i830_irq.c linux-2.6.32.41/drivers/gpu/drm/i830/i830_irq.c
27971 --- linux-2.6.32.41/drivers/gpu/drm/i830/i830_irq.c 2011-03-27 14:31:47.000000000 -0400
27972 +++ linux-2.6.32.41/drivers/gpu/drm/i830/i830_irq.c 2011-05-04 17:56:28.000000000 -0400
27973 @@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_
27974
27975 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
27976
27977 - atomic_inc(&dev_priv->irq_received);
27978 + atomic_inc_unchecked(&dev_priv->irq_received);
27979 wake_up_interruptible(&dev_priv->irq_queue);
27980
27981 return IRQ_HANDLED;
27982 @@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_devi
27983
27984 DRM_DEBUG("%s\n", __func__);
27985
27986 - atomic_inc(&dev_priv->irq_emitted);
27987 + atomic_inc_unchecked(&dev_priv->irq_emitted);
27988
27989 BEGIN_LP_RING(2);
27990 OUT_RING(0);
27991 OUT_RING(GFX_OP_USER_INTERRUPT);
27992 ADVANCE_LP_RING();
27993
27994 - return atomic_read(&dev_priv->irq_emitted);
27995 + return atomic_read_unchecked(&dev_priv->irq_emitted);
27996 }
27997
27998 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
27999 @@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_devi
28000
28001 DRM_DEBUG("%s\n", __func__);
28002
28003 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
28004 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
28005 return 0;
28006
28007 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
28008 @@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_devi
28009
28010 for (;;) {
28011 __set_current_state(TASK_INTERRUPTIBLE);
28012 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
28013 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
28014 break;
28015 if ((signed)(end - jiffies) <= 0) {
28016 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
28017 @@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct d
28018 I830_WRITE16(I830REG_HWSTAM, 0xffff);
28019 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
28020 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
28021 - atomic_set(&dev_priv->irq_received, 0);
28022 - atomic_set(&dev_priv->irq_emitted, 0);
28023 + atomic_set_unchecked(&dev_priv->irq_received, 0);
28024 + atomic_set_unchecked(&dev_priv->irq_emitted, 0);
28025 init_waitqueue_head(&dev_priv->irq_queue);
28026 }
28027
28028 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ch7017.c linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ch7017.c
28029 --- linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ch7017.c 2011-03-27 14:31:47.000000000 -0400
28030 +++ linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ch7017.c 2011-04-17 15:56:46.000000000 -0400
28031 @@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_
28032 }
28033 }
28034
28035 -struct intel_dvo_dev_ops ch7017_ops = {
28036 +const struct intel_dvo_dev_ops ch7017_ops = {
28037 .init = ch7017_init,
28038 .detect = ch7017_detect,
28039 .mode_valid = ch7017_mode_valid,
28040 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ch7xxx.c linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ch7xxx.c
28041 --- linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-03-27 14:31:47.000000000 -0400
28042 +++ linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-04-17 15:56:46.000000000 -0400
28043 @@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_
28044 }
28045 }
28046
28047 -struct intel_dvo_dev_ops ch7xxx_ops = {
28048 +const struct intel_dvo_dev_ops ch7xxx_ops = {
28049 .init = ch7xxx_init,
28050 .detect = ch7xxx_detect,
28051 .mode_valid = ch7xxx_mode_valid,
28052 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/dvo.h linux-2.6.32.41/drivers/gpu/drm/i915/dvo.h
28053 --- linux-2.6.32.41/drivers/gpu/drm/i915/dvo.h 2011-03-27 14:31:47.000000000 -0400
28054 +++ linux-2.6.32.41/drivers/gpu/drm/i915/dvo.h 2011-04-17 15:56:46.000000000 -0400
28055 @@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
28056 *
28057 * \return singly-linked list of modes or NULL if no modes found.
28058 */
28059 - struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
28060 + struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
28061
28062 /**
28063 * Clean up driver-specific bits of the output
28064 */
28065 - void (*destroy) (struct intel_dvo_device *dvo);
28066 + void (* const destroy) (struct intel_dvo_device *dvo);
28067
28068 /**
28069 * Debugging hook to dump device registers to log file
28070 */
28071 - void (*dump_regs)(struct intel_dvo_device *dvo);
28072 + void (* const dump_regs)(struct intel_dvo_device *dvo);
28073 };
28074
28075 -extern struct intel_dvo_dev_ops sil164_ops;
28076 -extern struct intel_dvo_dev_ops ch7xxx_ops;
28077 -extern struct intel_dvo_dev_ops ivch_ops;
28078 -extern struct intel_dvo_dev_ops tfp410_ops;
28079 -extern struct intel_dvo_dev_ops ch7017_ops;
28080 +extern const struct intel_dvo_dev_ops sil164_ops;
28081 +extern const struct intel_dvo_dev_ops ch7xxx_ops;
28082 +extern const struct intel_dvo_dev_ops ivch_ops;
28083 +extern const struct intel_dvo_dev_ops tfp410_ops;
28084 +extern const struct intel_dvo_dev_ops ch7017_ops;
28085
28086 #endif /* _INTEL_DVO_H */
28087 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ivch.c linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ivch.c
28088 --- linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ivch.c 2011-03-27 14:31:47.000000000 -0400
28089 +++ linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ivch.c 2011-04-17 15:56:46.000000000 -0400
28090 @@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dv
28091 }
28092 }
28093
28094 -struct intel_dvo_dev_ops ivch_ops= {
28095 +const struct intel_dvo_dev_ops ivch_ops= {
28096 .init = ivch_init,
28097 .dpms = ivch_dpms,
28098 .save = ivch_save,
28099 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/dvo_sil164.c linux-2.6.32.41/drivers/gpu/drm/i915/dvo_sil164.c
28100 --- linux-2.6.32.41/drivers/gpu/drm/i915/dvo_sil164.c 2011-03-27 14:31:47.000000000 -0400
28101 +++ linux-2.6.32.41/drivers/gpu/drm/i915/dvo_sil164.c 2011-04-17 15:56:46.000000000 -0400
28102 @@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_
28103 }
28104 }
28105
28106 -struct intel_dvo_dev_ops sil164_ops = {
28107 +const struct intel_dvo_dev_ops sil164_ops = {
28108 .init = sil164_init,
28109 .detect = sil164_detect,
28110 .mode_valid = sil164_mode_valid,
28111 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/dvo_tfp410.c linux-2.6.32.41/drivers/gpu/drm/i915/dvo_tfp410.c
28112 --- linux-2.6.32.41/drivers/gpu/drm/i915/dvo_tfp410.c 2011-03-27 14:31:47.000000000 -0400
28113 +++ linux-2.6.32.41/drivers/gpu/drm/i915/dvo_tfp410.c 2011-04-17 15:56:46.000000000 -0400
28114 @@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_
28115 }
28116 }
28117
28118 -struct intel_dvo_dev_ops tfp410_ops = {
28119 +const struct intel_dvo_dev_ops tfp410_ops = {
28120 .init = tfp410_init,
28121 .detect = tfp410_detect,
28122 .mode_valid = tfp410_mode_valid,
28123 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/i915_debugfs.c linux-2.6.32.41/drivers/gpu/drm/i915/i915_debugfs.c
28124 --- linux-2.6.32.41/drivers/gpu/drm/i915/i915_debugfs.c 2011-03-27 14:31:47.000000000 -0400
28125 +++ linux-2.6.32.41/drivers/gpu/drm/i915/i915_debugfs.c 2011-05-04 17:56:28.000000000 -0400
28126 @@ -192,7 +192,7 @@ static int i915_interrupt_info(struct se
28127 I915_READ(GTIMR));
28128 }
28129 seq_printf(m, "Interrupts received: %d\n",
28130 - atomic_read(&dev_priv->irq_received));
28131 + atomic_read_unchecked(&dev_priv->irq_received));
28132 if (dev_priv->hw_status_page != NULL) {
28133 seq_printf(m, "Current sequence: %d\n",
28134 i915_get_gem_seqno(dev));
28135 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/i915_drv.c linux-2.6.32.41/drivers/gpu/drm/i915/i915_drv.c
28136 --- linux-2.6.32.41/drivers/gpu/drm/i915/i915_drv.c 2011-03-27 14:31:47.000000000 -0400
28137 +++ linux-2.6.32.41/drivers/gpu/drm/i915/i915_drv.c 2011-04-17 15:56:46.000000000 -0400
28138 @@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
28139 return i915_resume(dev);
28140 }
28141
28142 -static struct vm_operations_struct i915_gem_vm_ops = {
28143 +static const struct vm_operations_struct i915_gem_vm_ops = {
28144 .fault = i915_gem_fault,
28145 .open = drm_gem_vm_open,
28146 .close = drm_gem_vm_close,
28147 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/i915_drv.h linux-2.6.32.41/drivers/gpu/drm/i915/i915_drv.h
28148 --- linux-2.6.32.41/drivers/gpu/drm/i915/i915_drv.h 2011-03-27 14:31:47.000000000 -0400
28149 +++ linux-2.6.32.41/drivers/gpu/drm/i915/i915_drv.h 2011-05-04 17:56:28.000000000 -0400
28150 @@ -197,7 +197,7 @@ typedef struct drm_i915_private {
28151 int page_flipping;
28152
28153 wait_queue_head_t irq_queue;
28154 - atomic_t irq_received;
28155 + atomic_unchecked_t irq_received;
28156 /** Protects user_irq_refcount and irq_mask_reg */
28157 spinlock_t user_irq_lock;
28158 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
28159 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/i915_gem.c linux-2.6.32.41/drivers/gpu/drm/i915/i915_gem.c
28160 --- linux-2.6.32.41/drivers/gpu/drm/i915/i915_gem.c 2011-03-27 14:31:47.000000000 -0400
28161 +++ linux-2.6.32.41/drivers/gpu/drm/i915/i915_gem.c 2011-04-17 15:56:46.000000000 -0400
28162 @@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_d
28163
28164 args->aper_size = dev->gtt_total;
28165 args->aper_available_size = (args->aper_size -
28166 - atomic_read(&dev->pin_memory));
28167 + atomic_read_unchecked(&dev->pin_memory));
28168
28169 return 0;
28170 }
28171 @@ -492,6 +492,11 @@ i915_gem_pread_ioctl(struct drm_device *
28172 return -EINVAL;
28173 }
28174
28175 + if (!access_ok(VERIFY_WRITE, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
28176 + drm_gem_object_unreference(obj);
28177 + return -EFAULT;
28178 + }
28179 +
28180 if (i915_gem_object_needs_bit17_swizzle(obj)) {
28181 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
28182 } else {
28183 @@ -965,6 +970,11 @@ i915_gem_pwrite_ioctl(struct drm_device
28184 return -EINVAL;
28185 }
28186
28187 + if (!access_ok(VERIFY_READ, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
28188 + drm_gem_object_unreference(obj);
28189 + return -EFAULT;
28190 + }
28191 +
28192 /* We can only do the GTT pwrite on untiled buffers, as otherwise
28193 * it would end up going through the fenced access, and we'll get
28194 * different detiling behavior between reading and writing.
28195 @@ -2054,7 +2064,7 @@ i915_gem_object_unbind(struct drm_gem_ob
28196
28197 if (obj_priv->gtt_space) {
28198 atomic_dec(&dev->gtt_count);
28199 - atomic_sub(obj->size, &dev->gtt_memory);
28200 + atomic_sub_unchecked(obj->size, &dev->gtt_memory);
28201
28202 drm_mm_put_block(obj_priv->gtt_space);
28203 obj_priv->gtt_space = NULL;
28204 @@ -2697,7 +2707,7 @@ i915_gem_object_bind_to_gtt(struct drm_g
28205 goto search_free;
28206 }
28207 atomic_inc(&dev->gtt_count);
28208 - atomic_add(obj->size, &dev->gtt_memory);
28209 + atomic_add_unchecked(obj->size, &dev->gtt_memory);
28210
28211 /* Assert that the object is not currently in any GPU domain. As it
28212 * wasn't in the GTT, there shouldn't be any way it could have been in
28213 @@ -3751,9 +3761,9 @@ i915_gem_execbuffer(struct drm_device *d
28214 "%d/%d gtt bytes\n",
28215 atomic_read(&dev->object_count),
28216 atomic_read(&dev->pin_count),
28217 - atomic_read(&dev->object_memory),
28218 - atomic_read(&dev->pin_memory),
28219 - atomic_read(&dev->gtt_memory),
28220 + atomic_read_unchecked(&dev->object_memory),
28221 + atomic_read_unchecked(&dev->pin_memory),
28222 + atomic_read_unchecked(&dev->gtt_memory),
28223 dev->gtt_total);
28224 }
28225 goto err;
28226 @@ -3985,7 +3995,7 @@ i915_gem_object_pin(struct drm_gem_objec
28227 */
28228 if (obj_priv->pin_count == 1) {
28229 atomic_inc(&dev->pin_count);
28230 - atomic_add(obj->size, &dev->pin_memory);
28231 + atomic_add_unchecked(obj->size, &dev->pin_memory);
28232 if (!obj_priv->active &&
28233 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
28234 !list_empty(&obj_priv->list))
28235 @@ -4018,7 +4028,7 @@ i915_gem_object_unpin(struct drm_gem_obj
28236 list_move_tail(&obj_priv->list,
28237 &dev_priv->mm.inactive_list);
28238 atomic_dec(&dev->pin_count);
28239 - atomic_sub(obj->size, &dev->pin_memory);
28240 + atomic_sub_unchecked(obj->size, &dev->pin_memory);
28241 }
28242 i915_verify_inactive(dev, __FILE__, __LINE__);
28243 }
28244 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/i915_irq.c linux-2.6.32.41/drivers/gpu/drm/i915/i915_irq.c
28245 --- linux-2.6.32.41/drivers/gpu/drm/i915/i915_irq.c 2011-03-27 14:31:47.000000000 -0400
28246 +++ linux-2.6.32.41/drivers/gpu/drm/i915/i915_irq.c 2011-05-04 17:56:28.000000000 -0400
28247 @@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_
28248 int irq_received;
28249 int ret = IRQ_NONE;
28250
28251 - atomic_inc(&dev_priv->irq_received);
28252 + atomic_inc_unchecked(&dev_priv->irq_received);
28253
28254 if (IS_IGDNG(dev))
28255 return igdng_irq_handler(dev);
28256 @@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct d
28257 {
28258 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28259
28260 - atomic_set(&dev_priv->irq_received, 0);
28261 + atomic_set_unchecked(&dev_priv->irq_received, 0);
28262
28263 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28264 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28265 diff -urNp linux-2.6.32.41/drivers/gpu/drm/mga/mga_drv.h linux-2.6.32.41/drivers/gpu/drm/mga/mga_drv.h
28266 --- linux-2.6.32.41/drivers/gpu/drm/mga/mga_drv.h 2011-03-27 14:31:47.000000000 -0400
28267 +++ linux-2.6.32.41/drivers/gpu/drm/mga/mga_drv.h 2011-05-04 17:56:28.000000000 -0400
28268 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
28269 u32 clear_cmd;
28270 u32 maccess;
28271
28272 - atomic_t vbl_received; /**< Number of vblanks received. */
28273 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
28274 wait_queue_head_t fence_queue;
28275 - atomic_t last_fence_retired;
28276 + atomic_unchecked_t last_fence_retired;
28277 u32 next_fence_to_post;
28278
28279 unsigned int fb_cpp;
28280 diff -urNp linux-2.6.32.41/drivers/gpu/drm/mga/mga_irq.c linux-2.6.32.41/drivers/gpu/drm/mga/mga_irq.c
28281 --- linux-2.6.32.41/drivers/gpu/drm/mga/mga_irq.c 2011-03-27 14:31:47.000000000 -0400
28282 +++ linux-2.6.32.41/drivers/gpu/drm/mga/mga_irq.c 2011-05-04 17:56:28.000000000 -0400
28283 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
28284 if (crtc != 0)
28285 return 0;
28286
28287 - return atomic_read(&dev_priv->vbl_received);
28288 + return atomic_read_unchecked(&dev_priv->vbl_received);
28289 }
28290
28291
28292 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
28293 /* VBLANK interrupt */
28294 if (status & MGA_VLINEPEN) {
28295 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
28296 - atomic_inc(&dev_priv->vbl_received);
28297 + atomic_inc_unchecked(&dev_priv->vbl_received);
28298 drm_handle_vblank(dev, 0);
28299 handled = 1;
28300 }
28301 @@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
28302 MGA_WRITE(MGA_PRIMEND, prim_end);
28303 }
28304
28305 - atomic_inc(&dev_priv->last_fence_retired);
28306 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
28307 DRM_WAKEUP(&dev_priv->fence_queue);
28308 handled = 1;
28309 }
28310 @@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_dev
28311 * using fences.
28312 */
28313 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
28314 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
28315 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
28316 - *sequence) <= (1 << 23)));
28317
28318 *sequence = cur_fence;
28319 diff -urNp linux-2.6.32.41/drivers/gpu/drm/r128/r128_cce.c linux-2.6.32.41/drivers/gpu/drm/r128/r128_cce.c
28320 --- linux-2.6.32.41/drivers/gpu/drm/r128/r128_cce.c 2011-03-27 14:31:47.000000000 -0400
28321 +++ linux-2.6.32.41/drivers/gpu/drm/r128/r128_cce.c 2011-05-04 17:56:28.000000000 -0400
28322 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
28323
28324 /* GH: Simple idle check.
28325 */
28326 - atomic_set(&dev_priv->idle_count, 0);
28327 + atomic_set_unchecked(&dev_priv->idle_count, 0);
28328
28329 /* We don't support anything other than bus-mastering ring mode,
28330 * but the ring can be in either AGP or PCI space for the ring
28331 diff -urNp linux-2.6.32.41/drivers/gpu/drm/r128/r128_drv.h linux-2.6.32.41/drivers/gpu/drm/r128/r128_drv.h
28332 --- linux-2.6.32.41/drivers/gpu/drm/r128/r128_drv.h 2011-03-27 14:31:47.000000000 -0400
28333 +++ linux-2.6.32.41/drivers/gpu/drm/r128/r128_drv.h 2011-05-04 17:56:28.000000000 -0400
28334 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
28335 int is_pci;
28336 unsigned long cce_buffers_offset;
28337
28338 - atomic_t idle_count;
28339 + atomic_unchecked_t idle_count;
28340
28341 int page_flipping;
28342 int current_page;
28343 u32 crtc_offset;
28344 u32 crtc_offset_cntl;
28345
28346 - atomic_t vbl_received;
28347 + atomic_unchecked_t vbl_received;
28348
28349 u32 color_fmt;
28350 unsigned int front_offset;
28351 diff -urNp linux-2.6.32.41/drivers/gpu/drm/r128/r128_irq.c linux-2.6.32.41/drivers/gpu/drm/r128/r128_irq.c
28352 --- linux-2.6.32.41/drivers/gpu/drm/r128/r128_irq.c 2011-03-27 14:31:47.000000000 -0400
28353 +++ linux-2.6.32.41/drivers/gpu/drm/r128/r128_irq.c 2011-05-04 17:56:28.000000000 -0400
28354 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
28355 if (crtc != 0)
28356 return 0;
28357
28358 - return atomic_read(&dev_priv->vbl_received);
28359 + return atomic_read_unchecked(&dev_priv->vbl_received);
28360 }
28361
28362 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28363 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
28364 /* VBLANK interrupt */
28365 if (status & R128_CRTC_VBLANK_INT) {
28366 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
28367 - atomic_inc(&dev_priv->vbl_received);
28368 + atomic_inc_unchecked(&dev_priv->vbl_received);
28369 drm_handle_vblank(dev, 0);
28370 return IRQ_HANDLED;
28371 }
28372 diff -urNp linux-2.6.32.41/drivers/gpu/drm/r128/r128_state.c linux-2.6.32.41/drivers/gpu/drm/r128/r128_state.c
28373 --- linux-2.6.32.41/drivers/gpu/drm/r128/r128_state.c 2011-03-27 14:31:47.000000000 -0400
28374 +++ linux-2.6.32.41/drivers/gpu/drm/r128/r128_state.c 2011-05-04 17:56:28.000000000 -0400
28375 @@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_priv
28376
28377 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
28378 {
28379 - if (atomic_read(&dev_priv->idle_count) == 0) {
28380 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
28381 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
28382 } else {
28383 - atomic_set(&dev_priv->idle_count, 0);
28384 + atomic_set_unchecked(&dev_priv->idle_count, 0);
28385 }
28386 }
28387
28388 diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/atom.c linux-2.6.32.41/drivers/gpu/drm/radeon/atom.c
28389 --- linux-2.6.32.41/drivers/gpu/drm/radeon/atom.c 2011-05-10 22:12:01.000000000 -0400
28390 +++ linux-2.6.32.41/drivers/gpu/drm/radeon/atom.c 2011-05-16 21:46:57.000000000 -0400
28391 @@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct c
28392 char name[512];
28393 int i;
28394
28395 + pax_track_stack();
28396 +
28397 ctx->card = card;
28398 ctx->bios = bios;
28399
28400 diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/mkregtable.c linux-2.6.32.41/drivers/gpu/drm/radeon/mkregtable.c
28401 --- linux-2.6.32.41/drivers/gpu/drm/radeon/mkregtable.c 2011-03-27 14:31:47.000000000 -0400
28402 +++ linux-2.6.32.41/drivers/gpu/drm/radeon/mkregtable.c 2011-04-17 15:56:46.000000000 -0400
28403 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
28404 regex_t mask_rex;
28405 regmatch_t match[4];
28406 char buf[1024];
28407 - size_t end;
28408 + long end;
28409 int len;
28410 int done = 0;
28411 int r;
28412 unsigned o;
28413 struct offset *offset;
28414 char last_reg_s[10];
28415 - int last_reg;
28416 + unsigned long last_reg;
28417
28418 if (regcomp
28419 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
28420 diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_atombios.c linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_atombios.c
28421 --- linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_atombios.c 2011-03-27 14:31:47.000000000 -0400
28422 +++ linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_atombios.c 2011-05-16 21:46:57.000000000 -0400
28423 @@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from
28424 bool linkb;
28425 struct radeon_i2c_bus_rec ddc_bus;
28426
28427 + pax_track_stack();
28428 +
28429 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
28430
28431 if (data_offset == 0)
28432 @@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_o
28433 }
28434 }
28435
28436 -struct bios_connector {
28437 +static struct bios_connector {
28438 bool valid;
28439 uint16_t line_mux;
28440 uint16_t devices;
28441 int connector_type;
28442 struct radeon_i2c_bus_rec ddc_bus;
28443 -};
28444 +} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
28445
28446 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
28447 drm_device
28448 @@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from
28449 uint8_t dac;
28450 union atom_supported_devices *supported_devices;
28451 int i, j;
28452 - struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
28453
28454 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
28455
28456 diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_display.c linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_display.c
28457 --- linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_display.c 2011-03-27 14:31:47.000000000 -0400
28458 +++ linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_display.c 2011-04-17 15:56:46.000000000 -0400
28459 @@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pl
28460
28461 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
28462 error = freq - current_freq;
28463 - error = error < 0 ? 0xffffffff : error;
28464 + error = (int32_t)error < 0 ? 0xffffffff : error;
28465 } else
28466 error = abs(current_freq - freq);
28467 vco_diff = abs(vco - best_vco);
28468 diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_drv.h linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_drv.h
28469 --- linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_drv.h 2011-03-27 14:31:47.000000000 -0400
28470 +++ linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_drv.h 2011-05-04 17:56:28.000000000 -0400
28471 @@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
28472
28473 /* SW interrupt */
28474 wait_queue_head_t swi_queue;
28475 - atomic_t swi_emitted;
28476 + atomic_unchecked_t swi_emitted;
28477 int vblank_crtc;
28478 uint32_t irq_enable_reg;
28479 uint32_t r500_disp_irq_reg;
28480 diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_fence.c linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_fence.c
28481 --- linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_fence.c 2011-03-27 14:31:47.000000000 -0400
28482 +++ linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_fence.c 2011-05-04 17:56:28.000000000 -0400
28483 @@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_devi
28484 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
28485 return 0;
28486 }
28487 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
28488 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
28489 if (!rdev->cp.ready) {
28490 /* FIXME: cp is not running assume everythings is done right
28491 * away
28492 @@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct rade
28493 return r;
28494 }
28495 WREG32(rdev->fence_drv.scratch_reg, 0);
28496 - atomic_set(&rdev->fence_drv.seq, 0);
28497 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
28498 INIT_LIST_HEAD(&rdev->fence_drv.created);
28499 INIT_LIST_HEAD(&rdev->fence_drv.emited);
28500 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
28501 diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/radeon.h linux-2.6.32.41/drivers/gpu/drm/radeon/radeon.h
28502 --- linux-2.6.32.41/drivers/gpu/drm/radeon/radeon.h 2011-03-27 14:31:47.000000000 -0400
28503 +++ linux-2.6.32.41/drivers/gpu/drm/radeon/radeon.h 2011-05-04 17:56:28.000000000 -0400
28504 @@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device
28505 */
28506 struct radeon_fence_driver {
28507 uint32_t scratch_reg;
28508 - atomic_t seq;
28509 + atomic_unchecked_t seq;
28510 uint32_t last_seq;
28511 unsigned long count_timeout;
28512 wait_queue_head_t queue;
28513 diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_ioc32.c linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_ioc32.c
28514 --- linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-03-27 14:31:47.000000000 -0400
28515 +++ linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-04-23 13:57:24.000000000 -0400
28516 @@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(str
28517 request = compat_alloc_user_space(sizeof(*request));
28518 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
28519 || __put_user(req32.param, &request->param)
28520 - || __put_user((void __user *)(unsigned long)req32.value,
28521 + || __put_user((unsigned long)req32.value,
28522 &request->value))
28523 return -EFAULT;
28524
28525 diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_irq.c linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_irq.c
28526 --- linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_irq.c 2011-03-27 14:31:47.000000000 -0400
28527 +++ linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_irq.c 2011-05-04 17:56:28.000000000 -0400
28528 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
28529 unsigned int ret;
28530 RING_LOCALS;
28531
28532 - atomic_inc(&dev_priv->swi_emitted);
28533 - ret = atomic_read(&dev_priv->swi_emitted);
28534 + atomic_inc_unchecked(&dev_priv->swi_emitted);
28535 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
28536
28537 BEGIN_RING(4);
28538 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
28539 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
28540 drm_radeon_private_t *dev_priv =
28541 (drm_radeon_private_t *) dev->dev_private;
28542
28543 - atomic_set(&dev_priv->swi_emitted, 0);
28544 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
28545 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
28546
28547 dev->max_vblank_count = 0x001fffff;
28548 diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_state.c linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_state.c
28549 --- linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_state.c 2011-03-27 14:31:47.000000000 -0400
28550 +++ linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_state.c 2011-04-17 15:56:46.000000000 -0400
28551 @@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm
28552 {
28553 drm_radeon_private_t *dev_priv = dev->dev_private;
28554 drm_radeon_getparam_t *param = data;
28555 - int value;
28556 + int value = 0;
28557
28558 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
28559
28560 diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_ttm.c linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_ttm.c
28561 --- linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_ttm.c 2011-03-27 14:31:47.000000000 -0400
28562 +++ linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_ttm.c 2011-04-17 15:56:46.000000000 -0400
28563 @@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_devic
28564 DRM_INFO("radeon: ttm finalized\n");
28565 }
28566
28567 -static struct vm_operations_struct radeon_ttm_vm_ops;
28568 -static const struct vm_operations_struct *ttm_vm_ops = NULL;
28569 -
28570 -static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
28571 -{
28572 - struct ttm_buffer_object *bo;
28573 - int r;
28574 -
28575 - bo = (struct ttm_buffer_object *)vma->vm_private_data;
28576 - if (bo == NULL) {
28577 - return VM_FAULT_NOPAGE;
28578 - }
28579 - r = ttm_vm_ops->fault(vma, vmf);
28580 - return r;
28581 -}
28582 -
28583 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
28584 {
28585 struct drm_file *file_priv;
28586 struct radeon_device *rdev;
28587 - int r;
28588
28589 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
28590 return drm_mmap(filp, vma);
28591 @@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struc
28592
28593 file_priv = (struct drm_file *)filp->private_data;
28594 rdev = file_priv->minor->dev->dev_private;
28595 - if (rdev == NULL) {
28596 + if (!rdev)
28597 return -EINVAL;
28598 - }
28599 - r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
28600 - if (unlikely(r != 0)) {
28601 - return r;
28602 - }
28603 - if (unlikely(ttm_vm_ops == NULL)) {
28604 - ttm_vm_ops = vma->vm_ops;
28605 - radeon_ttm_vm_ops = *ttm_vm_ops;
28606 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
28607 - }
28608 - vma->vm_ops = &radeon_ttm_vm_ops;
28609 - return 0;
28610 + return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
28611 }
28612
28613
28614 diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/rs690.c linux-2.6.32.41/drivers/gpu/drm/radeon/rs690.c
28615 --- linux-2.6.32.41/drivers/gpu/drm/radeon/rs690.c 2011-03-27 14:31:47.000000000 -0400
28616 +++ linux-2.6.32.41/drivers/gpu/drm/radeon/rs690.c 2011-04-17 15:56:46.000000000 -0400
28617 @@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct
28618 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
28619 rdev->pm.sideport_bandwidth.full)
28620 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
28621 - read_delay_latency.full = rfixed_const(370 * 800 * 1000);
28622 + read_delay_latency.full = rfixed_const(800 * 1000);
28623 read_delay_latency.full = rfixed_div(read_delay_latency,
28624 rdev->pm.igp_sideport_mclk);
28625 + a.full = rfixed_const(370);
28626 + read_delay_latency.full = rfixed_mul(read_delay_latency, a);
28627 } else {
28628 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
28629 rdev->pm.k8_bandwidth.full)
28630 diff -urNp linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_bo.c linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_bo.c
28631 --- linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_bo.c 2011-03-27 14:31:47.000000000 -0400
28632 +++ linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_bo.c 2011-04-23 12:56:11.000000000 -0400
28633 @@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_a
28634 NULL
28635 };
28636
28637 -static struct sysfs_ops ttm_bo_global_ops = {
28638 +static const struct sysfs_ops ttm_bo_global_ops = {
28639 .show = &ttm_bo_global_show
28640 };
28641
28642 diff -urNp linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_bo_vm.c linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_bo_vm.c
28643 --- linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-03-27 14:31:47.000000000 -0400
28644 +++ linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-04-17 15:56:46.000000000 -0400
28645 @@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_are
28646 {
28647 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
28648 vma->vm_private_data;
28649 - struct ttm_bo_device *bdev = bo->bdev;
28650 + struct ttm_bo_device *bdev;
28651 unsigned long bus_base;
28652 unsigned long bus_offset;
28653 unsigned long bus_size;
28654 @@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_are
28655 unsigned long address = (unsigned long)vmf->virtual_address;
28656 int retval = VM_FAULT_NOPAGE;
28657
28658 + if (!bo)
28659 + return VM_FAULT_NOPAGE;
28660 + bdev = bo->bdev;
28661 +
28662 /*
28663 * Work around locking order reversal in fault / nopfn
28664 * between mmap_sem and bo_reserve: Perform a trylock operation
28665 diff -urNp linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_global.c linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_global.c
28666 --- linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_global.c 2011-03-27 14:31:47.000000000 -0400
28667 +++ linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_global.c 2011-04-17 15:56:46.000000000 -0400
28668 @@ -36,7 +36,7 @@
28669 struct ttm_global_item {
28670 struct mutex mutex;
28671 void *object;
28672 - int refcount;
28673 + atomic_t refcount;
28674 };
28675
28676 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
28677 @@ -49,7 +49,7 @@ void ttm_global_init(void)
28678 struct ttm_global_item *item = &glob[i];
28679 mutex_init(&item->mutex);
28680 item->object = NULL;
28681 - item->refcount = 0;
28682 + atomic_set(&item->refcount, 0);
28683 }
28684 }
28685
28686 @@ -59,7 +59,7 @@ void ttm_global_release(void)
28687 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
28688 struct ttm_global_item *item = &glob[i];
28689 BUG_ON(item->object != NULL);
28690 - BUG_ON(item->refcount != 0);
28691 + BUG_ON(atomic_read(&item->refcount) != 0);
28692 }
28693 }
28694
28695 @@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_globa
28696 void *object;
28697
28698 mutex_lock(&item->mutex);
28699 - if (item->refcount == 0) {
28700 + if (atomic_read(&item->refcount) == 0) {
28701 item->object = kzalloc(ref->size, GFP_KERNEL);
28702 if (unlikely(item->object == NULL)) {
28703 ret = -ENOMEM;
28704 @@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_globa
28705 goto out_err;
28706
28707 }
28708 - ++item->refcount;
28709 + atomic_inc(&item->refcount);
28710 ref->object = item->object;
28711 object = item->object;
28712 mutex_unlock(&item->mutex);
28713 @@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_gl
28714 struct ttm_global_item *item = &glob[ref->global_type];
28715
28716 mutex_lock(&item->mutex);
28717 - BUG_ON(item->refcount == 0);
28718 + BUG_ON(atomic_read(&item->refcount) == 0);
28719 BUG_ON(ref->object != item->object);
28720 - if (--item->refcount == 0) {
28721 + if (atomic_dec_and_test(&item->refcount)) {
28722 ref->release(ref);
28723 item->object = NULL;
28724 }
28725 diff -urNp linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_memory.c linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_memory.c
28726 --- linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_memory.c 2011-03-27 14:31:47.000000000 -0400
28727 +++ linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_memory.c 2011-04-17 15:56:46.000000000 -0400
28728 @@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_at
28729 NULL
28730 };
28731
28732 -static struct sysfs_ops ttm_mem_zone_ops = {
28733 +static const struct sysfs_ops ttm_mem_zone_ops = {
28734 .show = &ttm_mem_zone_show,
28735 .store = &ttm_mem_zone_store
28736 };
28737 diff -urNp linux-2.6.32.41/drivers/gpu/drm/via/via_drv.h linux-2.6.32.41/drivers/gpu/drm/via/via_drv.h
28738 --- linux-2.6.32.41/drivers/gpu/drm/via/via_drv.h 2011-03-27 14:31:47.000000000 -0400
28739 +++ linux-2.6.32.41/drivers/gpu/drm/via/via_drv.h 2011-05-04 17:56:28.000000000 -0400
28740 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
28741 typedef uint32_t maskarray_t[5];
28742
28743 typedef struct drm_via_irq {
28744 - atomic_t irq_received;
28745 + atomic_unchecked_t irq_received;
28746 uint32_t pending_mask;
28747 uint32_t enable_mask;
28748 wait_queue_head_t irq_queue;
28749 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
28750 struct timeval last_vblank;
28751 int last_vblank_valid;
28752 unsigned usec_per_vblank;
28753 - atomic_t vbl_received;
28754 + atomic_unchecked_t vbl_received;
28755 drm_via_state_t hc_state;
28756 char pci_buf[VIA_PCI_BUF_SIZE];
28757 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
28758 diff -urNp linux-2.6.32.41/drivers/gpu/drm/via/via_irq.c linux-2.6.32.41/drivers/gpu/drm/via/via_irq.c
28759 --- linux-2.6.32.41/drivers/gpu/drm/via/via_irq.c 2011-03-27 14:31:47.000000000 -0400
28760 +++ linux-2.6.32.41/drivers/gpu/drm/via/via_irq.c 2011-05-04 17:56:28.000000000 -0400
28761 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
28762 if (crtc != 0)
28763 return 0;
28764
28765 - return atomic_read(&dev_priv->vbl_received);
28766 + return atomic_read_unchecked(&dev_priv->vbl_received);
28767 }
28768
28769 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28770 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
28771
28772 status = VIA_READ(VIA_REG_INTERRUPT);
28773 if (status & VIA_IRQ_VBLANK_PENDING) {
28774 - atomic_inc(&dev_priv->vbl_received);
28775 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
28776 + atomic_inc_unchecked(&dev_priv->vbl_received);
28777 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
28778 do_gettimeofday(&cur_vblank);
28779 if (dev_priv->last_vblank_valid) {
28780 dev_priv->usec_per_vblank =
28781 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
28782 dev_priv->last_vblank = cur_vblank;
28783 dev_priv->last_vblank_valid = 1;
28784 }
28785 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
28786 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
28787 DRM_DEBUG("US per vblank is: %u\n",
28788 dev_priv->usec_per_vblank);
28789 }
28790 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
28791
28792 for (i = 0; i < dev_priv->num_irqs; ++i) {
28793 if (status & cur_irq->pending_mask) {
28794 - atomic_inc(&cur_irq->irq_received);
28795 + atomic_inc_unchecked(&cur_irq->irq_received);
28796 DRM_WAKEUP(&cur_irq->irq_queue);
28797 handled = 1;
28798 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
28799 @@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device *
28800 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
28801 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
28802 masks[irq][4]));
28803 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
28804 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
28805 } else {
28806 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
28807 (((cur_irq_sequence =
28808 - atomic_read(&cur_irq->irq_received)) -
28809 + atomic_read_unchecked(&cur_irq->irq_received)) -
28810 *sequence) <= (1 << 23)));
28811 }
28812 *sequence = cur_irq_sequence;
28813 @@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct dr
28814 }
28815
28816 for (i = 0; i < dev_priv->num_irqs; ++i) {
28817 - atomic_set(&cur_irq->irq_received, 0);
28818 + atomic_set_unchecked(&cur_irq->irq_received, 0);
28819 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
28820 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
28821 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
28822 @@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev,
28823 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
28824 case VIA_IRQ_RELATIVE:
28825 irqwait->request.sequence +=
28826 - atomic_read(&cur_irq->irq_received);
28827 + atomic_read_unchecked(&cur_irq->irq_received);
28828 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
28829 case VIA_IRQ_ABSOLUTE:
28830 break;
28831 diff -urNp linux-2.6.32.41/drivers/hid/hid-core.c linux-2.6.32.41/drivers/hid/hid-core.c
28832 --- linux-2.6.32.41/drivers/hid/hid-core.c 2011-05-10 22:12:01.000000000 -0400
28833 +++ linux-2.6.32.41/drivers/hid/hid-core.c 2011-05-10 22:12:32.000000000 -0400
28834 @@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device
28835
28836 int hid_add_device(struct hid_device *hdev)
28837 {
28838 - static atomic_t id = ATOMIC_INIT(0);
28839 + static atomic_unchecked_t id = ATOMIC_INIT(0);
28840 int ret;
28841
28842 if (WARN_ON(hdev->status & HID_STAT_ADDED))
28843 @@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hd
28844 /* XXX hack, any other cleaner solution after the driver core
28845 * is converted to allow more than 20 bytes as the device name? */
28846 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
28847 - hdev->vendor, hdev->product, atomic_inc_return(&id));
28848 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
28849
28850 ret = device_add(&hdev->dev);
28851 if (!ret)
28852 diff -urNp linux-2.6.32.41/drivers/hid/usbhid/hiddev.c linux-2.6.32.41/drivers/hid/usbhid/hiddev.c
28853 --- linux-2.6.32.41/drivers/hid/usbhid/hiddev.c 2011-03-27 14:31:47.000000000 -0400
28854 +++ linux-2.6.32.41/drivers/hid/usbhid/hiddev.c 2011-04-17 15:56:46.000000000 -0400
28855 @@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *fi
28856 return put_user(HID_VERSION, (int __user *)arg);
28857
28858 case HIDIOCAPPLICATION:
28859 - if (arg < 0 || arg >= hid->maxapplication)
28860 + if (arg >= hid->maxapplication)
28861 return -EINVAL;
28862
28863 for (i = 0; i < hid->maxcollection; i++)
28864 diff -urNp linux-2.6.32.41/drivers/hwmon/lis3lv02d.c linux-2.6.32.41/drivers/hwmon/lis3lv02d.c
28865 --- linux-2.6.32.41/drivers/hwmon/lis3lv02d.c 2011-03-27 14:31:47.000000000 -0400
28866 +++ linux-2.6.32.41/drivers/hwmon/lis3lv02d.c 2011-05-04 17:56:28.000000000 -0400
28867 @@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(in
28868 * the lid is closed. This leads to interrupts as soon as a little move
28869 * is done.
28870 */
28871 - atomic_inc(&lis3_dev.count);
28872 + atomic_inc_unchecked(&lis3_dev.count);
28873
28874 wake_up_interruptible(&lis3_dev.misc_wait);
28875 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
28876 @@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct in
28877 if (test_and_set_bit(0, &lis3_dev.misc_opened))
28878 return -EBUSY; /* already open */
28879
28880 - atomic_set(&lis3_dev.count, 0);
28881 + atomic_set_unchecked(&lis3_dev.count, 0);
28882
28883 /*
28884 * The sensor can generate interrupts for free-fall and direction
28885 @@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struc
28886 add_wait_queue(&lis3_dev.misc_wait, &wait);
28887 while (true) {
28888 set_current_state(TASK_INTERRUPTIBLE);
28889 - data = atomic_xchg(&lis3_dev.count, 0);
28890 + data = atomic_xchg_unchecked(&lis3_dev.count, 0);
28891 if (data)
28892 break;
28893
28894 @@ -244,7 +244,7 @@ out:
28895 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
28896 {
28897 poll_wait(file, &lis3_dev.misc_wait, wait);
28898 - if (atomic_read(&lis3_dev.count))
28899 + if (atomic_read_unchecked(&lis3_dev.count))
28900 return POLLIN | POLLRDNORM;
28901 return 0;
28902 }
28903 diff -urNp linux-2.6.32.41/drivers/hwmon/lis3lv02d.h linux-2.6.32.41/drivers/hwmon/lis3lv02d.h
28904 --- linux-2.6.32.41/drivers/hwmon/lis3lv02d.h 2011-03-27 14:31:47.000000000 -0400
28905 +++ linux-2.6.32.41/drivers/hwmon/lis3lv02d.h 2011-05-04 17:56:28.000000000 -0400
28906 @@ -201,7 +201,7 @@ struct lis3lv02d {
28907
28908 struct input_polled_dev *idev; /* input device */
28909 struct platform_device *pdev; /* platform device */
28910 - atomic_t count; /* interrupt count after last read */
28911 + atomic_unchecked_t count; /* interrupt count after last read */
28912 int xcalib; /* calibrated null value for x */
28913 int ycalib; /* calibrated null value for y */
28914 int zcalib; /* calibrated null value for z */
28915 diff -urNp linux-2.6.32.41/drivers/hwmon/sht15.c linux-2.6.32.41/drivers/hwmon/sht15.c
28916 --- linux-2.6.32.41/drivers/hwmon/sht15.c 2011-03-27 14:31:47.000000000 -0400
28917 +++ linux-2.6.32.41/drivers/hwmon/sht15.c 2011-05-04 17:56:28.000000000 -0400
28918 @@ -112,7 +112,7 @@ struct sht15_data {
28919 int supply_uV;
28920 int supply_uV_valid;
28921 struct work_struct update_supply_work;
28922 - atomic_t interrupt_handled;
28923 + atomic_unchecked_t interrupt_handled;
28924 };
28925
28926 /**
28927 @@ -245,13 +245,13 @@ static inline int sht15_update_single_va
28928 return ret;
28929
28930 gpio_direction_input(data->pdata->gpio_data);
28931 - atomic_set(&data->interrupt_handled, 0);
28932 + atomic_set_unchecked(&data->interrupt_handled, 0);
28933
28934 enable_irq(gpio_to_irq(data->pdata->gpio_data));
28935 if (gpio_get_value(data->pdata->gpio_data) == 0) {
28936 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
28937 /* Only relevant if the interrupt hasn't occured. */
28938 - if (!atomic_read(&data->interrupt_handled))
28939 + if (!atomic_read_unchecked(&data->interrupt_handled))
28940 schedule_work(&data->read_work);
28941 }
28942 ret = wait_event_timeout(data->wait_queue,
28943 @@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired
28944 struct sht15_data *data = d;
28945 /* First disable the interrupt */
28946 disable_irq_nosync(irq);
28947 - atomic_inc(&data->interrupt_handled);
28948 + atomic_inc_unchecked(&data->interrupt_handled);
28949 /* Then schedule a reading work struct */
28950 if (data->flag != SHT15_READING_NOTHING)
28951 schedule_work(&data->read_work);
28952 @@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct wo
28953 here as could have gone low in meantime so verify
28954 it hasn't!
28955 */
28956 - atomic_set(&data->interrupt_handled, 0);
28957 + atomic_set_unchecked(&data->interrupt_handled, 0);
28958 enable_irq(gpio_to_irq(data->pdata->gpio_data));
28959 /* If still not occured or another handler has been scheduled */
28960 if (gpio_get_value(data->pdata->gpio_data)
28961 - || atomic_read(&data->interrupt_handled))
28962 + || atomic_read_unchecked(&data->interrupt_handled))
28963 return;
28964 }
28965 /* Read the data back from the device */
28966 diff -urNp linux-2.6.32.41/drivers/hwmon/w83791d.c linux-2.6.32.41/drivers/hwmon/w83791d.c
28967 --- linux-2.6.32.41/drivers/hwmon/w83791d.c 2011-03-27 14:31:47.000000000 -0400
28968 +++ linux-2.6.32.41/drivers/hwmon/w83791d.c 2011-04-17 15:56:46.000000000 -0400
28969 @@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_cli
28970 struct i2c_board_info *info);
28971 static int w83791d_remove(struct i2c_client *client);
28972
28973 -static int w83791d_read(struct i2c_client *client, u8 register);
28974 -static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
28975 +static int w83791d_read(struct i2c_client *client, u8 reg);
28976 +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
28977 static struct w83791d_data *w83791d_update_device(struct device *dev);
28978
28979 #ifdef DEBUG
28980 diff -urNp linux-2.6.32.41/drivers/ide/ide-cd.c linux-2.6.32.41/drivers/ide/ide-cd.c
28981 --- linux-2.6.32.41/drivers/ide/ide-cd.c 2011-03-27 14:31:47.000000000 -0400
28982 +++ linux-2.6.32.41/drivers/ide/ide-cd.c 2011-04-17 15:56:46.000000000 -0400
28983 @@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_
28984 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
28985 if ((unsigned long)buf & alignment
28986 || blk_rq_bytes(rq) & q->dma_pad_mask
28987 - || object_is_on_stack(buf))
28988 + || object_starts_on_stack(buf))
28989 drive->dma = 0;
28990 }
28991 }
28992 diff -urNp linux-2.6.32.41/drivers/ide/ide-floppy.c linux-2.6.32.41/drivers/ide/ide-floppy.c
28993 --- linux-2.6.32.41/drivers/ide/ide-floppy.c 2011-03-27 14:31:47.000000000 -0400
28994 +++ linux-2.6.32.41/drivers/ide/ide-floppy.c 2011-05-16 21:46:57.000000000 -0400
28995 @@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_d
28996 u8 pc_buf[256], header_len, desc_cnt;
28997 int i, rc = 1, blocks, length;
28998
28999 + pax_track_stack();
29000 +
29001 ide_debug_log(IDE_DBG_FUNC, "enter");
29002
29003 drive->bios_cyl = 0;
29004 diff -urNp linux-2.6.32.41/drivers/ide/setup-pci.c linux-2.6.32.41/drivers/ide/setup-pci.c
29005 --- linux-2.6.32.41/drivers/ide/setup-pci.c 2011-03-27 14:31:47.000000000 -0400
29006 +++ linux-2.6.32.41/drivers/ide/setup-pci.c 2011-05-16 21:46:57.000000000 -0400
29007 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
29008 int ret, i, n_ports = dev2 ? 4 : 2;
29009 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
29010
29011 + pax_track_stack();
29012 +
29013 for (i = 0; i < n_ports / 2; i++) {
29014 ret = ide_setup_pci_controller(pdev[i], d, !i);
29015 if (ret < 0)
29016 diff -urNp linux-2.6.32.41/drivers/ieee1394/dv1394.c linux-2.6.32.41/drivers/ieee1394/dv1394.c
29017 --- linux-2.6.32.41/drivers/ieee1394/dv1394.c 2011-03-27 14:31:47.000000000 -0400
29018 +++ linux-2.6.32.41/drivers/ieee1394/dv1394.c 2011-04-23 12:56:11.000000000 -0400
29019 @@ -739,7 +739,7 @@ static void frame_prepare(struct video_c
29020 based upon DIF section and sequence
29021 */
29022
29023 -static void inline
29024 +static inline void
29025 frame_put_packet (struct frame *f, struct packet *p)
29026 {
29027 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
29028 diff -urNp linux-2.6.32.41/drivers/ieee1394/hosts.c linux-2.6.32.41/drivers/ieee1394/hosts.c
29029 --- linux-2.6.32.41/drivers/ieee1394/hosts.c 2011-03-27 14:31:47.000000000 -0400
29030 +++ linux-2.6.32.41/drivers/ieee1394/hosts.c 2011-04-17 15:56:46.000000000 -0400
29031 @@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso
29032 }
29033
29034 static struct hpsb_host_driver dummy_driver = {
29035 + .name = "dummy",
29036 .transmit_packet = dummy_transmit_packet,
29037 .devctl = dummy_devctl,
29038 .isoctl = dummy_isoctl
29039 diff -urNp linux-2.6.32.41/drivers/ieee1394/init_ohci1394_dma.c linux-2.6.32.41/drivers/ieee1394/init_ohci1394_dma.c
29040 --- linux-2.6.32.41/drivers/ieee1394/init_ohci1394_dma.c 2011-03-27 14:31:47.000000000 -0400
29041 +++ linux-2.6.32.41/drivers/ieee1394/init_ohci1394_dma.c 2011-04-17 15:56:46.000000000 -0400
29042 @@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_con
29043 for (func = 0; func < 8; func++) {
29044 u32 class = read_pci_config(num,slot,func,
29045 PCI_CLASS_REVISION);
29046 - if ((class == 0xffffffff))
29047 + if (class == 0xffffffff)
29048 continue; /* No device at this func */
29049
29050 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
29051 diff -urNp linux-2.6.32.41/drivers/ieee1394/ohci1394.c linux-2.6.32.41/drivers/ieee1394/ohci1394.c
29052 --- linux-2.6.32.41/drivers/ieee1394/ohci1394.c 2011-03-27 14:31:47.000000000 -0400
29053 +++ linux-2.6.32.41/drivers/ieee1394/ohci1394.c 2011-04-23 12:56:11.000000000 -0400
29054 @@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_
29055 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
29056
29057 /* Module Parameters */
29058 -static int phys_dma = 1;
29059 +static int phys_dma;
29060 module_param(phys_dma, int, 0444);
29061 -MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
29062 +MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
29063
29064 static void dma_trm_tasklet(unsigned long data);
29065 static void dma_trm_reset(struct dma_trm_ctx *d);
29066 diff -urNp linux-2.6.32.41/drivers/ieee1394/sbp2.c linux-2.6.32.41/drivers/ieee1394/sbp2.c
29067 --- linux-2.6.32.41/drivers/ieee1394/sbp2.c 2011-03-27 14:31:47.000000000 -0400
29068 +++ linux-2.6.32.41/drivers/ieee1394/sbp2.c 2011-04-23 12:56:11.000000000 -0400
29069 @@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 prot
29070 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
29071 MODULE_LICENSE("GPL");
29072
29073 -static int sbp2_module_init(void)
29074 +static int __init sbp2_module_init(void)
29075 {
29076 int ret;
29077
29078 diff -urNp linux-2.6.32.41/drivers/infiniband/core/cm.c linux-2.6.32.41/drivers/infiniband/core/cm.c
29079 --- linux-2.6.32.41/drivers/infiniband/core/cm.c 2011-03-27 14:31:47.000000000 -0400
29080 +++ linux-2.6.32.41/drivers/infiniband/core/cm.c 2011-04-17 15:56:46.000000000 -0400
29081 @@ -112,7 +112,7 @@ static char const counter_group_names[CM
29082
29083 struct cm_counter_group {
29084 struct kobject obj;
29085 - atomic_long_t counter[CM_ATTR_COUNT];
29086 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
29087 };
29088
29089 struct cm_counter_attribute {
29090 @@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm
29091 struct ib_mad_send_buf *msg = NULL;
29092 int ret;
29093
29094 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29095 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29096 counter[CM_REQ_COUNTER]);
29097
29098 /* Quick state check to discard duplicate REQs. */
29099 @@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm
29100 if (!cm_id_priv)
29101 return;
29102
29103 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29104 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29105 counter[CM_REP_COUNTER]);
29106 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
29107 if (ret)
29108 @@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work
29109 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
29110 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
29111 spin_unlock_irq(&cm_id_priv->lock);
29112 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29113 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29114 counter[CM_RTU_COUNTER]);
29115 goto out;
29116 }
29117 @@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_wor
29118 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
29119 dreq_msg->local_comm_id);
29120 if (!cm_id_priv) {
29121 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29122 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29123 counter[CM_DREQ_COUNTER]);
29124 cm_issue_drep(work->port, work->mad_recv_wc);
29125 return -EINVAL;
29126 @@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_wor
29127 case IB_CM_MRA_REP_RCVD:
29128 break;
29129 case IB_CM_TIMEWAIT:
29130 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29131 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29132 counter[CM_DREQ_COUNTER]);
29133 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29134 goto unlock;
29135 @@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_wor
29136 cm_free_msg(msg);
29137 goto deref;
29138 case IB_CM_DREQ_RCVD:
29139 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29140 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29141 counter[CM_DREQ_COUNTER]);
29142 goto unlock;
29143 default:
29144 @@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work
29145 ib_modify_mad(cm_id_priv->av.port->mad_agent,
29146 cm_id_priv->msg, timeout)) {
29147 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
29148 - atomic_long_inc(&work->port->
29149 + atomic_long_inc_unchecked(&work->port->
29150 counter_group[CM_RECV_DUPLICATES].
29151 counter[CM_MRA_COUNTER]);
29152 goto out;
29153 @@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work
29154 break;
29155 case IB_CM_MRA_REQ_RCVD:
29156 case IB_CM_MRA_REP_RCVD:
29157 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29158 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29159 counter[CM_MRA_COUNTER]);
29160 /* fall through */
29161 default:
29162 @@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work
29163 case IB_CM_LAP_IDLE:
29164 break;
29165 case IB_CM_MRA_LAP_SENT:
29166 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29167 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29168 counter[CM_LAP_COUNTER]);
29169 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29170 goto unlock;
29171 @@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work
29172 cm_free_msg(msg);
29173 goto deref;
29174 case IB_CM_LAP_RCVD:
29175 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29176 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29177 counter[CM_LAP_COUNTER]);
29178 goto unlock;
29179 default:
29180 @@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm
29181 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
29182 if (cur_cm_id_priv) {
29183 spin_unlock_irq(&cm.lock);
29184 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29185 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29186 counter[CM_SIDR_REQ_COUNTER]);
29187 goto out; /* Duplicate message. */
29188 }
29189 @@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_ma
29190 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
29191 msg->retries = 1;
29192
29193 - atomic_long_add(1 + msg->retries,
29194 + atomic_long_add_unchecked(1 + msg->retries,
29195 &port->counter_group[CM_XMIT].counter[attr_index]);
29196 if (msg->retries)
29197 - atomic_long_add(msg->retries,
29198 + atomic_long_add_unchecked(msg->retries,
29199 &port->counter_group[CM_XMIT_RETRIES].
29200 counter[attr_index]);
29201
29202 @@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_ma
29203 }
29204
29205 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
29206 - atomic_long_inc(&port->counter_group[CM_RECV].
29207 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
29208 counter[attr_id - CM_ATTR_ID_OFFSET]);
29209
29210 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
29211 @@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct ko
29212 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
29213
29214 return sprintf(buf, "%ld\n",
29215 - atomic_long_read(&group->counter[cm_attr->index]));
29216 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
29217 }
29218
29219 -static struct sysfs_ops cm_counter_ops = {
29220 +static const struct sysfs_ops cm_counter_ops = {
29221 .show = cm_show_counter
29222 };
29223
29224 diff -urNp linux-2.6.32.41/drivers/infiniband/core/fmr_pool.c linux-2.6.32.41/drivers/infiniband/core/fmr_pool.c
29225 --- linux-2.6.32.41/drivers/infiniband/core/fmr_pool.c 2011-03-27 14:31:47.000000000 -0400
29226 +++ linux-2.6.32.41/drivers/infiniband/core/fmr_pool.c 2011-05-04 17:56:28.000000000 -0400
29227 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
29228
29229 struct task_struct *thread;
29230
29231 - atomic_t req_ser;
29232 - atomic_t flush_ser;
29233 + atomic_unchecked_t req_ser;
29234 + atomic_unchecked_t flush_ser;
29235
29236 wait_queue_head_t force_wait;
29237 };
29238 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
29239 struct ib_fmr_pool *pool = pool_ptr;
29240
29241 do {
29242 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
29243 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
29244 ib_fmr_batch_release(pool);
29245
29246 - atomic_inc(&pool->flush_ser);
29247 + atomic_inc_unchecked(&pool->flush_ser);
29248 wake_up_interruptible(&pool->force_wait);
29249
29250 if (pool->flush_function)
29251 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
29252 }
29253
29254 set_current_state(TASK_INTERRUPTIBLE);
29255 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
29256 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
29257 !kthread_should_stop())
29258 schedule();
29259 __set_current_state(TASK_RUNNING);
29260 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
29261 pool->dirty_watermark = params->dirty_watermark;
29262 pool->dirty_len = 0;
29263 spin_lock_init(&pool->pool_lock);
29264 - atomic_set(&pool->req_ser, 0);
29265 - atomic_set(&pool->flush_ser, 0);
29266 + atomic_set_unchecked(&pool->req_ser, 0);
29267 + atomic_set_unchecked(&pool->flush_ser, 0);
29268 init_waitqueue_head(&pool->force_wait);
29269
29270 pool->thread = kthread_run(ib_fmr_cleanup_thread,
29271 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
29272 }
29273 spin_unlock_irq(&pool->pool_lock);
29274
29275 - serial = atomic_inc_return(&pool->req_ser);
29276 + serial = atomic_inc_return_unchecked(&pool->req_ser);
29277 wake_up_process(pool->thread);
29278
29279 if (wait_event_interruptible(pool->force_wait,
29280 - atomic_read(&pool->flush_ser) - serial >= 0))
29281 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
29282 return -EINTR;
29283
29284 return 0;
29285 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
29286 } else {
29287 list_add_tail(&fmr->list, &pool->dirty_list);
29288 if (++pool->dirty_len >= pool->dirty_watermark) {
29289 - atomic_inc(&pool->req_ser);
29290 + atomic_inc_unchecked(&pool->req_ser);
29291 wake_up_process(pool->thread);
29292 }
29293 }
29294 diff -urNp linux-2.6.32.41/drivers/infiniband/core/sysfs.c linux-2.6.32.41/drivers/infiniband/core/sysfs.c
29295 --- linux-2.6.32.41/drivers/infiniband/core/sysfs.c 2011-03-27 14:31:47.000000000 -0400
29296 +++ linux-2.6.32.41/drivers/infiniband/core/sysfs.c 2011-04-17 15:56:46.000000000 -0400
29297 @@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kob
29298 return port_attr->show(p, port_attr, buf);
29299 }
29300
29301 -static struct sysfs_ops port_sysfs_ops = {
29302 +static const struct sysfs_ops port_sysfs_ops = {
29303 .show = port_attr_show
29304 };
29305
29306 diff -urNp linux-2.6.32.41/drivers/infiniband/core/uverbs_marshall.c linux-2.6.32.41/drivers/infiniband/core/uverbs_marshall.c
29307 --- linux-2.6.32.41/drivers/infiniband/core/uverbs_marshall.c 2011-03-27 14:31:47.000000000 -0400
29308 +++ linux-2.6.32.41/drivers/infiniband/core/uverbs_marshall.c 2011-04-17 15:56:46.000000000 -0400
29309 @@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_u
29310 dst->grh.sgid_index = src->grh.sgid_index;
29311 dst->grh.hop_limit = src->grh.hop_limit;
29312 dst->grh.traffic_class = src->grh.traffic_class;
29313 + memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
29314 dst->dlid = src->dlid;
29315 dst->sl = src->sl;
29316 dst->src_path_bits = src->src_path_bits;
29317 dst->static_rate = src->static_rate;
29318 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
29319 dst->port_num = src->port_num;
29320 + dst->reserved = 0;
29321 }
29322 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
29323
29324 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
29325 struct ib_qp_attr *src)
29326 {
29327 + dst->qp_state = src->qp_state;
29328 dst->cur_qp_state = src->cur_qp_state;
29329 dst->path_mtu = src->path_mtu;
29330 dst->path_mig_state = src->path_mig_state;
29331 @@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_u
29332 dst->rnr_retry = src->rnr_retry;
29333 dst->alt_port_num = src->alt_port_num;
29334 dst->alt_timeout = src->alt_timeout;
29335 + memset(dst->reserved, 0, sizeof(dst->reserved));
29336 }
29337 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
29338
29339 diff -urNp linux-2.6.32.41/drivers/infiniband/hw/ipath/ipath_fs.c linux-2.6.32.41/drivers/infiniband/hw/ipath/ipath_fs.c
29340 --- linux-2.6.32.41/drivers/infiniband/hw/ipath/ipath_fs.c 2011-03-27 14:31:47.000000000 -0400
29341 +++ linux-2.6.32.41/drivers/infiniband/hw/ipath/ipath_fs.c 2011-05-16 21:46:57.000000000 -0400
29342 @@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(stru
29343 struct infinipath_counters counters;
29344 struct ipath_devdata *dd;
29345
29346 + pax_track_stack();
29347 +
29348 dd = file->f_path.dentry->d_inode->i_private;
29349 dd->ipath_f_read_counters(dd, &counters);
29350
29351 diff -urNp linux-2.6.32.41/drivers/infiniband/hw/nes/nes.c linux-2.6.32.41/drivers/infiniband/hw/nes/nes.c
29352 --- linux-2.6.32.41/drivers/infiniband/hw/nes/nes.c 2011-03-27 14:31:47.000000000 -0400
29353 +++ linux-2.6.32.41/drivers/infiniband/hw/nes/nes.c 2011-05-04 17:56:28.000000000 -0400
29354 @@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
29355 LIST_HEAD(nes_adapter_list);
29356 static LIST_HEAD(nes_dev_list);
29357
29358 -atomic_t qps_destroyed;
29359 +atomic_unchecked_t qps_destroyed;
29360
29361 static unsigned int ee_flsh_adapter;
29362 static unsigned int sysfs_nonidx_addr;
29363 @@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(str
29364 struct nes_adapter *nesadapter = nesdev->nesadapter;
29365 u32 qp_id;
29366
29367 - atomic_inc(&qps_destroyed);
29368 + atomic_inc_unchecked(&qps_destroyed);
29369
29370 /* Free the control structures */
29371
29372 diff -urNp linux-2.6.32.41/drivers/infiniband/hw/nes/nes_cm.c linux-2.6.32.41/drivers/infiniband/hw/nes/nes_cm.c
29373 --- linux-2.6.32.41/drivers/infiniband/hw/nes/nes_cm.c 2011-03-27 14:31:47.000000000 -0400
29374 +++ linux-2.6.32.41/drivers/infiniband/hw/nes/nes_cm.c 2011-05-04 17:56:28.000000000 -0400
29375 @@ -69,11 +69,11 @@ u32 cm_packets_received;
29376 u32 cm_listens_created;
29377 u32 cm_listens_destroyed;
29378 u32 cm_backlog_drops;
29379 -atomic_t cm_loopbacks;
29380 -atomic_t cm_nodes_created;
29381 -atomic_t cm_nodes_destroyed;
29382 -atomic_t cm_accel_dropped_pkts;
29383 -atomic_t cm_resets_recvd;
29384 +atomic_unchecked_t cm_loopbacks;
29385 +atomic_unchecked_t cm_nodes_created;
29386 +atomic_unchecked_t cm_nodes_destroyed;
29387 +atomic_unchecked_t cm_accel_dropped_pkts;
29388 +atomic_unchecked_t cm_resets_recvd;
29389
29390 static inline int mini_cm_accelerated(struct nes_cm_core *,
29391 struct nes_cm_node *);
29392 @@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
29393
29394 static struct nes_cm_core *g_cm_core;
29395
29396 -atomic_t cm_connects;
29397 -atomic_t cm_accepts;
29398 -atomic_t cm_disconnects;
29399 -atomic_t cm_closes;
29400 -atomic_t cm_connecteds;
29401 -atomic_t cm_connect_reqs;
29402 -atomic_t cm_rejects;
29403 +atomic_unchecked_t cm_connects;
29404 +atomic_unchecked_t cm_accepts;
29405 +atomic_unchecked_t cm_disconnects;
29406 +atomic_unchecked_t cm_closes;
29407 +atomic_unchecked_t cm_connecteds;
29408 +atomic_unchecked_t cm_connect_reqs;
29409 +atomic_unchecked_t cm_rejects;
29410
29411
29412 /**
29413 @@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(
29414 cm_node->rem_mac);
29415
29416 add_hte_node(cm_core, cm_node);
29417 - atomic_inc(&cm_nodes_created);
29418 + atomic_inc_unchecked(&cm_nodes_created);
29419
29420 return cm_node;
29421 }
29422 @@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm
29423 }
29424
29425 atomic_dec(&cm_core->node_cnt);
29426 - atomic_inc(&cm_nodes_destroyed);
29427 + atomic_inc_unchecked(&cm_nodes_destroyed);
29428 nesqp = cm_node->nesqp;
29429 if (nesqp) {
29430 nesqp->cm_node = NULL;
29431 @@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm
29432
29433 static void drop_packet(struct sk_buff *skb)
29434 {
29435 - atomic_inc(&cm_accel_dropped_pkts);
29436 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
29437 dev_kfree_skb_any(skb);
29438 }
29439
29440 @@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm
29441
29442 int reset = 0; /* whether to send reset in case of err.. */
29443 int passive_state;
29444 - atomic_inc(&cm_resets_recvd);
29445 + atomic_inc_unchecked(&cm_resets_recvd);
29446 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
29447 " refcnt=%d\n", cm_node, cm_node->state,
29448 atomic_read(&cm_node->ref_count));
29449 @@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_conne
29450 rem_ref_cm_node(cm_node->cm_core, cm_node);
29451 return NULL;
29452 }
29453 - atomic_inc(&cm_loopbacks);
29454 + atomic_inc_unchecked(&cm_loopbacks);
29455 loopbackremotenode->loopbackpartner = cm_node;
29456 loopbackremotenode->tcp_cntxt.rcv_wscale =
29457 NES_CM_DEFAULT_RCV_WND_SCALE;
29458 @@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_c
29459 add_ref_cm_node(cm_node);
29460 } else if (cm_node->state == NES_CM_STATE_TSA) {
29461 rem_ref_cm_node(cm_core, cm_node);
29462 - atomic_inc(&cm_accel_dropped_pkts);
29463 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
29464 dev_kfree_skb_any(skb);
29465 break;
29466 }
29467 @@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct ne
29468
29469 if ((cm_id) && (cm_id->event_handler)) {
29470 if (issue_disconn) {
29471 - atomic_inc(&cm_disconnects);
29472 + atomic_inc_unchecked(&cm_disconnects);
29473 cm_event.event = IW_CM_EVENT_DISCONNECT;
29474 cm_event.status = disconn_status;
29475 cm_event.local_addr = cm_id->local_addr;
29476 @@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct ne
29477 }
29478
29479 if (issue_close) {
29480 - atomic_inc(&cm_closes);
29481 + atomic_inc_unchecked(&cm_closes);
29482 nes_disconnect(nesqp, 1);
29483
29484 cm_id->provider_data = nesqp;
29485 @@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
29486
29487 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
29488 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
29489 - atomic_inc(&cm_accepts);
29490 + atomic_inc_unchecked(&cm_accepts);
29491
29492 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
29493 atomic_read(&nesvnic->netdev->refcnt));
29494 @@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
29495
29496 struct nes_cm_core *cm_core;
29497
29498 - atomic_inc(&cm_rejects);
29499 + atomic_inc_unchecked(&cm_rejects);
29500 cm_node = (struct nes_cm_node *) cm_id->provider_data;
29501 loopback = cm_node->loopbackpartner;
29502 cm_core = cm_node->cm_core;
29503 @@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id,
29504 ntohl(cm_id->local_addr.sin_addr.s_addr),
29505 ntohs(cm_id->local_addr.sin_port));
29506
29507 - atomic_inc(&cm_connects);
29508 + atomic_inc_unchecked(&cm_connects);
29509 nesqp->active_conn = 1;
29510
29511 /* cache the cm_id in the qp */
29512 @@ -3195,7 +3195,7 @@ static void cm_event_connected(struct ne
29513 if (nesqp->destroyed) {
29514 return;
29515 }
29516 - atomic_inc(&cm_connecteds);
29517 + atomic_inc_unchecked(&cm_connecteds);
29518 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
29519 " local port 0x%04X. jiffies = %lu.\n",
29520 nesqp->hwqp.qp_id,
29521 @@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm
29522
29523 ret = cm_id->event_handler(cm_id, &cm_event);
29524 cm_id->add_ref(cm_id);
29525 - atomic_inc(&cm_closes);
29526 + atomic_inc_unchecked(&cm_closes);
29527 cm_event.event = IW_CM_EVENT_CLOSE;
29528 cm_event.status = IW_CM_EVENT_STATUS_OK;
29529 cm_event.provider_data = cm_id->provider_data;
29530 @@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_
29531 return;
29532 cm_id = cm_node->cm_id;
29533
29534 - atomic_inc(&cm_connect_reqs);
29535 + atomic_inc_unchecked(&cm_connect_reqs);
29536 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
29537 cm_node, cm_id, jiffies);
29538
29539 @@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct n
29540 return;
29541 cm_id = cm_node->cm_id;
29542
29543 - atomic_inc(&cm_connect_reqs);
29544 + atomic_inc_unchecked(&cm_connect_reqs);
29545 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
29546 cm_node, cm_id, jiffies);
29547
29548 diff -urNp linux-2.6.32.41/drivers/infiniband/hw/nes/nes.h linux-2.6.32.41/drivers/infiniband/hw/nes/nes.h
29549 --- linux-2.6.32.41/drivers/infiniband/hw/nes/nes.h 2011-03-27 14:31:47.000000000 -0400
29550 +++ linux-2.6.32.41/drivers/infiniband/hw/nes/nes.h 2011-05-04 17:56:28.000000000 -0400
29551 @@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
29552 extern unsigned int wqm_quanta;
29553 extern struct list_head nes_adapter_list;
29554
29555 -extern atomic_t cm_connects;
29556 -extern atomic_t cm_accepts;
29557 -extern atomic_t cm_disconnects;
29558 -extern atomic_t cm_closes;
29559 -extern atomic_t cm_connecteds;
29560 -extern atomic_t cm_connect_reqs;
29561 -extern atomic_t cm_rejects;
29562 -extern atomic_t mod_qp_timouts;
29563 -extern atomic_t qps_created;
29564 -extern atomic_t qps_destroyed;
29565 -extern atomic_t sw_qps_destroyed;
29566 +extern atomic_unchecked_t cm_connects;
29567 +extern atomic_unchecked_t cm_accepts;
29568 +extern atomic_unchecked_t cm_disconnects;
29569 +extern atomic_unchecked_t cm_closes;
29570 +extern atomic_unchecked_t cm_connecteds;
29571 +extern atomic_unchecked_t cm_connect_reqs;
29572 +extern atomic_unchecked_t cm_rejects;
29573 +extern atomic_unchecked_t mod_qp_timouts;
29574 +extern atomic_unchecked_t qps_created;
29575 +extern atomic_unchecked_t qps_destroyed;
29576 +extern atomic_unchecked_t sw_qps_destroyed;
29577 extern u32 mh_detected;
29578 extern u32 mh_pauses_sent;
29579 extern u32 cm_packets_sent;
29580 @@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
29581 extern u32 cm_listens_created;
29582 extern u32 cm_listens_destroyed;
29583 extern u32 cm_backlog_drops;
29584 -extern atomic_t cm_loopbacks;
29585 -extern atomic_t cm_nodes_created;
29586 -extern atomic_t cm_nodes_destroyed;
29587 -extern atomic_t cm_accel_dropped_pkts;
29588 -extern atomic_t cm_resets_recvd;
29589 +extern atomic_unchecked_t cm_loopbacks;
29590 +extern atomic_unchecked_t cm_nodes_created;
29591 +extern atomic_unchecked_t cm_nodes_destroyed;
29592 +extern atomic_unchecked_t cm_accel_dropped_pkts;
29593 +extern atomic_unchecked_t cm_resets_recvd;
29594
29595 extern u32 int_mod_timer_init;
29596 extern u32 int_mod_cq_depth_256;
29597 diff -urNp linux-2.6.32.41/drivers/infiniband/hw/nes/nes_nic.c linux-2.6.32.41/drivers/infiniband/hw/nes/nes_nic.c
29598 --- linux-2.6.32.41/drivers/infiniband/hw/nes/nes_nic.c 2011-03-27 14:31:47.000000000 -0400
29599 +++ linux-2.6.32.41/drivers/infiniband/hw/nes/nes_nic.c 2011-05-04 17:56:28.000000000 -0400
29600 @@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats
29601 target_stat_values[++index] = mh_detected;
29602 target_stat_values[++index] = mh_pauses_sent;
29603 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
29604 - target_stat_values[++index] = atomic_read(&cm_connects);
29605 - target_stat_values[++index] = atomic_read(&cm_accepts);
29606 - target_stat_values[++index] = atomic_read(&cm_disconnects);
29607 - target_stat_values[++index] = atomic_read(&cm_connecteds);
29608 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
29609 - target_stat_values[++index] = atomic_read(&cm_rejects);
29610 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
29611 - target_stat_values[++index] = atomic_read(&qps_created);
29612 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
29613 - target_stat_values[++index] = atomic_read(&qps_destroyed);
29614 - target_stat_values[++index] = atomic_read(&cm_closes);
29615 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
29616 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
29617 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
29618 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
29619 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
29620 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
29621 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
29622 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
29623 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
29624 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
29625 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
29626 target_stat_values[++index] = cm_packets_sent;
29627 target_stat_values[++index] = cm_packets_bounced;
29628 target_stat_values[++index] = cm_packets_created;
29629 @@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats
29630 target_stat_values[++index] = cm_listens_created;
29631 target_stat_values[++index] = cm_listens_destroyed;
29632 target_stat_values[++index] = cm_backlog_drops;
29633 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
29634 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
29635 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
29636 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
29637 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
29638 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
29639 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
29640 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
29641 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
29642 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
29643 target_stat_values[++index] = int_mod_timer_init;
29644 target_stat_values[++index] = int_mod_cq_depth_1;
29645 target_stat_values[++index] = int_mod_cq_depth_4;
29646 diff -urNp linux-2.6.32.41/drivers/infiniband/hw/nes/nes_verbs.c linux-2.6.32.41/drivers/infiniband/hw/nes/nes_verbs.c
29647 --- linux-2.6.32.41/drivers/infiniband/hw/nes/nes_verbs.c 2011-03-27 14:31:47.000000000 -0400
29648 +++ linux-2.6.32.41/drivers/infiniband/hw/nes/nes_verbs.c 2011-05-04 17:56:28.000000000 -0400
29649 @@ -45,9 +45,9 @@
29650
29651 #include <rdma/ib_umem.h>
29652
29653 -atomic_t mod_qp_timouts;
29654 -atomic_t qps_created;
29655 -atomic_t sw_qps_destroyed;
29656 +atomic_unchecked_t mod_qp_timouts;
29657 +atomic_unchecked_t qps_created;
29658 +atomic_unchecked_t sw_qps_destroyed;
29659
29660 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
29661
29662 @@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struc
29663 if (init_attr->create_flags)
29664 return ERR_PTR(-EINVAL);
29665
29666 - atomic_inc(&qps_created);
29667 + atomic_inc_unchecked(&qps_created);
29668 switch (init_attr->qp_type) {
29669 case IB_QPT_RC:
29670 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
29671 @@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *
29672 struct iw_cm_event cm_event;
29673 int ret;
29674
29675 - atomic_inc(&sw_qps_destroyed);
29676 + atomic_inc_unchecked(&sw_qps_destroyed);
29677 nesqp->destroyed = 1;
29678
29679 /* Blow away the connection if it exists. */
29680 diff -urNp linux-2.6.32.41/drivers/input/gameport/gameport.c linux-2.6.32.41/drivers/input/gameport/gameport.c
29681 --- linux-2.6.32.41/drivers/input/gameport/gameport.c 2011-03-27 14:31:47.000000000 -0400
29682 +++ linux-2.6.32.41/drivers/input/gameport/gameport.c 2011-05-04 17:56:28.000000000 -0400
29683 @@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
29684 */
29685 static void gameport_init_port(struct gameport *gameport)
29686 {
29687 - static atomic_t gameport_no = ATOMIC_INIT(0);
29688 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
29689
29690 __module_get(THIS_MODULE);
29691
29692 mutex_init(&gameport->drv_mutex);
29693 device_initialize(&gameport->dev);
29694 - dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
29695 + dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
29696 gameport->dev.bus = &gameport_bus;
29697 gameport->dev.release = gameport_release_port;
29698 if (gameport->parent)
29699 diff -urNp linux-2.6.32.41/drivers/input/input.c linux-2.6.32.41/drivers/input/input.c
29700 --- linux-2.6.32.41/drivers/input/input.c 2011-03-27 14:31:47.000000000 -0400
29701 +++ linux-2.6.32.41/drivers/input/input.c 2011-05-04 17:56:28.000000000 -0400
29702 @@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
29703 */
29704 int input_register_device(struct input_dev *dev)
29705 {
29706 - static atomic_t input_no = ATOMIC_INIT(0);
29707 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
29708 struct input_handler *handler;
29709 const char *path;
29710 int error;
29711 @@ -1585,7 +1585,7 @@ int input_register_device(struct input_d
29712 dev->setkeycode = input_default_setkeycode;
29713
29714 dev_set_name(&dev->dev, "input%ld",
29715 - (unsigned long) atomic_inc_return(&input_no) - 1);
29716 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
29717
29718 error = device_add(&dev->dev);
29719 if (error)
29720 diff -urNp linux-2.6.32.41/drivers/input/joystick/sidewinder.c linux-2.6.32.41/drivers/input/joystick/sidewinder.c
29721 --- linux-2.6.32.41/drivers/input/joystick/sidewinder.c 2011-03-27 14:31:47.000000000 -0400
29722 +++ linux-2.6.32.41/drivers/input/joystick/sidewinder.c 2011-05-18 20:09:36.000000000 -0400
29723 @@ -30,6 +30,7 @@
29724 #include <linux/kernel.h>
29725 #include <linux/module.h>
29726 #include <linux/slab.h>
29727 +#include <linux/sched.h>
29728 #include <linux/init.h>
29729 #include <linux/input.h>
29730 #include <linux/gameport.h>
29731 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
29732 unsigned char buf[SW_LENGTH];
29733 int i;
29734
29735 + pax_track_stack();
29736 +
29737 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
29738
29739 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
29740 diff -urNp linux-2.6.32.41/drivers/input/joystick/xpad.c linux-2.6.32.41/drivers/input/joystick/xpad.c
29741 --- linux-2.6.32.41/drivers/input/joystick/xpad.c 2011-03-27 14:31:47.000000000 -0400
29742 +++ linux-2.6.32.41/drivers/input/joystick/xpad.c 2011-05-04 17:56:28.000000000 -0400
29743 @@ -621,7 +621,7 @@ static void xpad_led_set(struct led_clas
29744
29745 static int xpad_led_probe(struct usb_xpad *xpad)
29746 {
29747 - static atomic_t led_seq = ATOMIC_INIT(0);
29748 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
29749 long led_no;
29750 struct xpad_led *led;
29751 struct led_classdev *led_cdev;
29752 @@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpa
29753 if (!led)
29754 return -ENOMEM;
29755
29756 - led_no = (long)atomic_inc_return(&led_seq) - 1;
29757 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
29758
29759 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
29760 led->xpad = xpad;
29761 diff -urNp linux-2.6.32.41/drivers/input/serio/serio.c linux-2.6.32.41/drivers/input/serio/serio.c
29762 --- linux-2.6.32.41/drivers/input/serio/serio.c 2011-03-27 14:31:47.000000000 -0400
29763 +++ linux-2.6.32.41/drivers/input/serio/serio.c 2011-05-04 17:56:28.000000000 -0400
29764 @@ -527,7 +527,7 @@ static void serio_release_port(struct de
29765 */
29766 static void serio_init_port(struct serio *serio)
29767 {
29768 - static atomic_t serio_no = ATOMIC_INIT(0);
29769 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
29770
29771 __module_get(THIS_MODULE);
29772
29773 @@ -536,7 +536,7 @@ static void serio_init_port(struct serio
29774 mutex_init(&serio->drv_mutex);
29775 device_initialize(&serio->dev);
29776 dev_set_name(&serio->dev, "serio%ld",
29777 - (long)atomic_inc_return(&serio_no) - 1);
29778 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
29779 serio->dev.bus = &serio_bus;
29780 serio->dev.release = serio_release_port;
29781 if (serio->parent) {
29782 diff -urNp linux-2.6.32.41/drivers/isdn/gigaset/common.c linux-2.6.32.41/drivers/isdn/gigaset/common.c
29783 --- linux-2.6.32.41/drivers/isdn/gigaset/common.c 2011-03-27 14:31:47.000000000 -0400
29784 +++ linux-2.6.32.41/drivers/isdn/gigaset/common.c 2011-04-17 15:56:46.000000000 -0400
29785 @@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct
29786 cs->commands_pending = 0;
29787 cs->cur_at_seq = 0;
29788 cs->gotfwver = -1;
29789 - cs->open_count = 0;
29790 + local_set(&cs->open_count, 0);
29791 cs->dev = NULL;
29792 cs->tty = NULL;
29793 cs->tty_dev = NULL;
29794 diff -urNp linux-2.6.32.41/drivers/isdn/gigaset/gigaset.h linux-2.6.32.41/drivers/isdn/gigaset/gigaset.h
29795 --- linux-2.6.32.41/drivers/isdn/gigaset/gigaset.h 2011-03-27 14:31:47.000000000 -0400
29796 +++ linux-2.6.32.41/drivers/isdn/gigaset/gigaset.h 2011-04-17 15:56:46.000000000 -0400
29797 @@ -34,6 +34,7 @@
29798 #include <linux/tty_driver.h>
29799 #include <linux/list.h>
29800 #include <asm/atomic.h>
29801 +#include <asm/local.h>
29802
29803 #define GIG_VERSION {0,5,0,0}
29804 #define GIG_COMPAT {0,4,0,0}
29805 @@ -446,7 +447,7 @@ struct cardstate {
29806 spinlock_t cmdlock;
29807 unsigned curlen, cmdbytes;
29808
29809 - unsigned open_count;
29810 + local_t open_count;
29811 struct tty_struct *tty;
29812 struct tasklet_struct if_wake_tasklet;
29813 unsigned control_state;
29814 diff -urNp linux-2.6.32.41/drivers/isdn/gigaset/interface.c linux-2.6.32.41/drivers/isdn/gigaset/interface.c
29815 --- linux-2.6.32.41/drivers/isdn/gigaset/interface.c 2011-03-27 14:31:47.000000000 -0400
29816 +++ linux-2.6.32.41/drivers/isdn/gigaset/interface.c 2011-04-17 15:56:46.000000000 -0400
29817 @@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tt
29818 return -ERESTARTSYS; // FIXME -EINTR?
29819 tty->driver_data = cs;
29820
29821 - ++cs->open_count;
29822 -
29823 - if (cs->open_count == 1) {
29824 + if (local_inc_return(&cs->open_count) == 1) {
29825 spin_lock_irqsave(&cs->lock, flags);
29826 cs->tty = tty;
29827 spin_unlock_irqrestore(&cs->lock, flags);
29828 @@ -195,10 +193,10 @@ static void if_close(struct tty_struct *
29829
29830 if (!cs->connected)
29831 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
29832 - else if (!cs->open_count)
29833 + else if (!local_read(&cs->open_count))
29834 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29835 else {
29836 - if (!--cs->open_count) {
29837 + if (!local_dec_return(&cs->open_count)) {
29838 spin_lock_irqsave(&cs->lock, flags);
29839 cs->tty = NULL;
29840 spin_unlock_irqrestore(&cs->lock, flags);
29841 @@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *t
29842 if (!cs->connected) {
29843 gig_dbg(DEBUG_IF, "not connected");
29844 retval = -ENODEV;
29845 - } else if (!cs->open_count)
29846 + } else if (!local_read(&cs->open_count))
29847 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29848 else {
29849 retval = 0;
29850 @@ -361,7 +359,7 @@ static int if_write(struct tty_struct *t
29851 if (!cs->connected) {
29852 gig_dbg(DEBUG_IF, "not connected");
29853 retval = -ENODEV;
29854 - } else if (!cs->open_count)
29855 + } else if (!local_read(&cs->open_count))
29856 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29857 else if (cs->mstate != MS_LOCKED) {
29858 dev_warn(cs->dev, "can't write to unlocked device\n");
29859 @@ -395,7 +393,7 @@ static int if_write_room(struct tty_stru
29860 if (!cs->connected) {
29861 gig_dbg(DEBUG_IF, "not connected");
29862 retval = -ENODEV;
29863 - } else if (!cs->open_count)
29864 + } else if (!local_read(&cs->open_count))
29865 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29866 else if (cs->mstate != MS_LOCKED) {
29867 dev_warn(cs->dev, "can't write to unlocked device\n");
29868 @@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty
29869
29870 if (!cs->connected)
29871 gig_dbg(DEBUG_IF, "not connected");
29872 - else if (!cs->open_count)
29873 + else if (!local_read(&cs->open_count))
29874 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29875 else if (cs->mstate != MS_LOCKED)
29876 dev_warn(cs->dev, "can't write to unlocked device\n");
29877 @@ -453,7 +451,7 @@ static void if_throttle(struct tty_struc
29878
29879 if (!cs->connected)
29880 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
29881 - else if (!cs->open_count)
29882 + else if (!local_read(&cs->open_count))
29883 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29884 else {
29885 //FIXME
29886 @@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_str
29887
29888 if (!cs->connected)
29889 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
29890 - else if (!cs->open_count)
29891 + else if (!local_read(&cs->open_count))
29892 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29893 else {
29894 //FIXME
29895 @@ -510,7 +508,7 @@ static void if_set_termios(struct tty_st
29896 goto out;
29897 }
29898
29899 - if (!cs->open_count) {
29900 + if (!local_read(&cs->open_count)) {
29901 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29902 goto out;
29903 }
29904 diff -urNp linux-2.6.32.41/drivers/isdn/hardware/avm/b1.c linux-2.6.32.41/drivers/isdn/hardware/avm/b1.c
29905 --- linux-2.6.32.41/drivers/isdn/hardware/avm/b1.c 2011-03-27 14:31:47.000000000 -0400
29906 +++ linux-2.6.32.41/drivers/isdn/hardware/avm/b1.c 2011-04-17 15:56:46.000000000 -0400
29907 @@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capilo
29908 }
29909 if (left) {
29910 if (t4file->user) {
29911 - if (copy_from_user(buf, dp, left))
29912 + if (left > sizeof buf || copy_from_user(buf, dp, left))
29913 return -EFAULT;
29914 } else {
29915 memcpy(buf, dp, left);
29916 @@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capilo
29917 }
29918 if (left) {
29919 if (config->user) {
29920 - if (copy_from_user(buf, dp, left))
29921 + if (left > sizeof buf || copy_from_user(buf, dp, left))
29922 return -EFAULT;
29923 } else {
29924 memcpy(buf, dp, left);
29925 diff -urNp linux-2.6.32.41/drivers/isdn/hardware/eicon/capidtmf.c linux-2.6.32.41/drivers/isdn/hardware/eicon/capidtmf.c
29926 --- linux-2.6.32.41/drivers/isdn/hardware/eicon/capidtmf.c 2011-03-27 14:31:47.000000000 -0400
29927 +++ linux-2.6.32.41/drivers/isdn/hardware/eicon/capidtmf.c 2011-05-16 21:46:57.000000000 -0400
29928 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
29929 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
29930 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
29931
29932 + pax_track_stack();
29933
29934 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
29935 {
29936 diff -urNp linux-2.6.32.41/drivers/isdn/hardware/eicon/capifunc.c linux-2.6.32.41/drivers/isdn/hardware/eicon/capifunc.c
29937 --- linux-2.6.32.41/drivers/isdn/hardware/eicon/capifunc.c 2011-03-27 14:31:47.000000000 -0400
29938 +++ linux-2.6.32.41/drivers/isdn/hardware/eicon/capifunc.c 2011-05-16 21:46:57.000000000 -0400
29939 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
29940 IDI_SYNC_REQ req;
29941 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29942
29943 + pax_track_stack();
29944 +
29945 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29946
29947 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29948 diff -urNp linux-2.6.32.41/drivers/isdn/hardware/eicon/diddfunc.c linux-2.6.32.41/drivers/isdn/hardware/eicon/diddfunc.c
29949 --- linux-2.6.32.41/drivers/isdn/hardware/eicon/diddfunc.c 2011-03-27 14:31:47.000000000 -0400
29950 +++ linux-2.6.32.41/drivers/isdn/hardware/eicon/diddfunc.c 2011-05-16 21:46:57.000000000 -0400
29951 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
29952 IDI_SYNC_REQ req;
29953 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29954
29955 + pax_track_stack();
29956 +
29957 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29958
29959 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29960 diff -urNp linux-2.6.32.41/drivers/isdn/hardware/eicon/divasfunc.c linux-2.6.32.41/drivers/isdn/hardware/eicon/divasfunc.c
29961 --- linux-2.6.32.41/drivers/isdn/hardware/eicon/divasfunc.c 2011-03-27 14:31:47.000000000 -0400
29962 +++ linux-2.6.32.41/drivers/isdn/hardware/eicon/divasfunc.c 2011-05-16 21:46:57.000000000 -0400
29963 @@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_di
29964 IDI_SYNC_REQ req;
29965 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29966
29967 + pax_track_stack();
29968 +
29969 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29970
29971 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29972 diff -urNp linux-2.6.32.41/drivers/isdn/hardware/eicon/idifunc.c linux-2.6.32.41/drivers/isdn/hardware/eicon/idifunc.c
29973 --- linux-2.6.32.41/drivers/isdn/hardware/eicon/idifunc.c 2011-03-27 14:31:47.000000000 -0400
29974 +++ linux-2.6.32.41/drivers/isdn/hardware/eicon/idifunc.c 2011-05-16 21:46:57.000000000 -0400
29975 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
29976 IDI_SYNC_REQ req;
29977 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29978
29979 + pax_track_stack();
29980 +
29981 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29982
29983 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29984 diff -urNp linux-2.6.32.41/drivers/isdn/hardware/eicon/message.c linux-2.6.32.41/drivers/isdn/hardware/eicon/message.c
29985 --- linux-2.6.32.41/drivers/isdn/hardware/eicon/message.c 2011-03-27 14:31:47.000000000 -0400
29986 +++ linux-2.6.32.41/drivers/isdn/hardware/eicon/message.c 2011-05-16 21:46:57.000000000 -0400
29987 @@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
29988 dword d;
29989 word w;
29990
29991 + pax_track_stack();
29992 +
29993 a = plci->adapter;
29994 Id = ((word)plci->Id<<8)|a->Id;
29995 PUT_WORD(&SS_Ind[4],0x0000);
29996 @@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE
29997 word j, n, w;
29998 dword d;
29999
30000 + pax_track_stack();
30001 +
30002
30003 for(i=0;i<8;i++) bp_parms[i].length = 0;
30004 for(i=0;i<2;i++) global_config[i].length = 0;
30005 @@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARS
30006 const byte llc3[] = {4,3,2,2,6,6,0};
30007 const byte header[] = {0,2,3,3,0,0,0};
30008
30009 + pax_track_stack();
30010 +
30011 for(i=0;i<8;i++) bp_parms[i].length = 0;
30012 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
30013 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
30014 @@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI
30015 word appl_number_group_type[MAX_APPL];
30016 PLCI *auxplci;
30017
30018 + pax_track_stack();
30019 +
30020 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
30021
30022 if(!a->group_optimization_enabled)
30023 diff -urNp linux-2.6.32.41/drivers/isdn/hardware/eicon/mntfunc.c linux-2.6.32.41/drivers/isdn/hardware/eicon/mntfunc.c
30024 --- linux-2.6.32.41/drivers/isdn/hardware/eicon/mntfunc.c 2011-03-27 14:31:47.000000000 -0400
30025 +++ linux-2.6.32.41/drivers/isdn/hardware/eicon/mntfunc.c 2011-05-16 21:46:57.000000000 -0400
30026 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
30027 IDI_SYNC_REQ req;
30028 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30029
30030 + pax_track_stack();
30031 +
30032 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30033
30034 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30035 diff -urNp linux-2.6.32.41/drivers/isdn/i4l/isdn_common.c linux-2.6.32.41/drivers/isdn/i4l/isdn_common.c
30036 --- linux-2.6.32.41/drivers/isdn/i4l/isdn_common.c 2011-03-27 14:31:47.000000000 -0400
30037 +++ linux-2.6.32.41/drivers/isdn/i4l/isdn_common.c 2011-05-16 21:46:57.000000000 -0400
30038 @@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct f
30039 } iocpar;
30040 void __user *argp = (void __user *)arg;
30041
30042 + pax_track_stack();
30043 +
30044 #define name iocpar.name
30045 #define bname iocpar.bname
30046 #define iocts iocpar.iocts
30047 diff -urNp linux-2.6.32.41/drivers/isdn/icn/icn.c linux-2.6.32.41/drivers/isdn/icn/icn.c
30048 --- linux-2.6.32.41/drivers/isdn/icn/icn.c 2011-03-27 14:31:47.000000000 -0400
30049 +++ linux-2.6.32.41/drivers/isdn/icn/icn.c 2011-04-17 15:56:46.000000000 -0400
30050 @@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len
30051 if (count > len)
30052 count = len;
30053 if (user) {
30054 - if (copy_from_user(msg, buf, count))
30055 + if (count > sizeof msg || copy_from_user(msg, buf, count))
30056 return -EFAULT;
30057 } else
30058 memcpy(msg, buf, count);
30059 diff -urNp linux-2.6.32.41/drivers/isdn/mISDN/socket.c linux-2.6.32.41/drivers/isdn/mISDN/socket.c
30060 --- linux-2.6.32.41/drivers/isdn/mISDN/socket.c 2011-03-27 14:31:47.000000000 -0400
30061 +++ linux-2.6.32.41/drivers/isdn/mISDN/socket.c 2011-04-17 15:56:46.000000000 -0400
30062 @@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, uns
30063 if (dev) {
30064 struct mISDN_devinfo di;
30065
30066 + memset(&di, 0, sizeof(di));
30067 di.id = dev->id;
30068 di.Dprotocols = dev->Dprotocols;
30069 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
30070 @@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, uns
30071 if (dev) {
30072 struct mISDN_devinfo di;
30073
30074 + memset(&di, 0, sizeof(di));
30075 di.id = dev->id;
30076 di.Dprotocols = dev->Dprotocols;
30077 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
30078 diff -urNp linux-2.6.32.41/drivers/isdn/sc/interrupt.c linux-2.6.32.41/drivers/isdn/sc/interrupt.c
30079 --- linux-2.6.32.41/drivers/isdn/sc/interrupt.c 2011-03-27 14:31:47.000000000 -0400
30080 +++ linux-2.6.32.41/drivers/isdn/sc/interrupt.c 2011-04-17 15:56:46.000000000 -0400
30081 @@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy,
30082 }
30083 else if(callid>=0x0000 && callid<=0x7FFF)
30084 {
30085 + int len;
30086 +
30087 pr_debug("%s: Got Incoming Call\n",
30088 sc_adapter[card]->devicename);
30089 - strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
30090 - strcpy(setup.eazmsn,
30091 - sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
30092 + len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
30093 + sizeof(setup.phone));
30094 + if (len >= sizeof(setup.phone))
30095 + continue;
30096 + len = strlcpy(setup.eazmsn,
30097 + sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
30098 + sizeof(setup.eazmsn));
30099 + if (len >= sizeof(setup.eazmsn))
30100 + continue;
30101 setup.si1 = 7;
30102 setup.si2 = 0;
30103 setup.plan = 0;
30104 @@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy,
30105 * Handle a GetMyNumber Rsp
30106 */
30107 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
30108 - strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
30109 + strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
30110 + rcvmsg.msg_data.byte_array,
30111 + sizeof(rcvmsg.msg_data.byte_array));
30112 continue;
30113 }
30114
30115 diff -urNp linux-2.6.32.41/drivers/lguest/core.c linux-2.6.32.41/drivers/lguest/core.c
30116 --- linux-2.6.32.41/drivers/lguest/core.c 2011-03-27 14:31:47.000000000 -0400
30117 +++ linux-2.6.32.41/drivers/lguest/core.c 2011-04-17 15:56:46.000000000 -0400
30118 @@ -91,9 +91,17 @@ static __init int map_switcher(void)
30119 * it's worked so far. The end address needs +1 because __get_vm_area
30120 * allocates an extra guard page, so we need space for that.
30121 */
30122 +
30123 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
30124 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30125 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
30126 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30127 +#else
30128 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30129 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
30130 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30131 +#endif
30132 +
30133 if (!switcher_vma) {
30134 err = -ENOMEM;
30135 printk("lguest: could not map switcher pages high\n");
30136 @@ -118,7 +126,7 @@ static __init int map_switcher(void)
30137 * Now the Switcher is mapped at the right address, we can't fail!
30138 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
30139 */
30140 - memcpy(switcher_vma->addr, start_switcher_text,
30141 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
30142 end_switcher_text - start_switcher_text);
30143
30144 printk(KERN_INFO "lguest: mapped switcher at %p\n",
30145 diff -urNp linux-2.6.32.41/drivers/lguest/x86/core.c linux-2.6.32.41/drivers/lguest/x86/core.c
30146 --- linux-2.6.32.41/drivers/lguest/x86/core.c 2011-03-27 14:31:47.000000000 -0400
30147 +++ linux-2.6.32.41/drivers/lguest/x86/core.c 2011-04-17 15:56:46.000000000 -0400
30148 @@ -59,7 +59,7 @@ static struct {
30149 /* Offset from where switcher.S was compiled to where we've copied it */
30150 static unsigned long switcher_offset(void)
30151 {
30152 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
30153 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
30154 }
30155
30156 /* This cpu's struct lguest_pages. */
30157 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
30158 * These copies are pretty cheap, so we do them unconditionally: */
30159 /* Save the current Host top-level page directory.
30160 */
30161 +
30162 +#ifdef CONFIG_PAX_PER_CPU_PGD
30163 + pages->state.host_cr3 = read_cr3();
30164 +#else
30165 pages->state.host_cr3 = __pa(current->mm->pgd);
30166 +#endif
30167 +
30168 /*
30169 * Set up the Guest's page tables to see this CPU's pages (and no
30170 * other CPU's pages).
30171 @@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
30172 * compiled-in switcher code and the high-mapped copy we just made.
30173 */
30174 for (i = 0; i < IDT_ENTRIES; i++)
30175 - default_idt_entries[i] += switcher_offset();
30176 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
30177
30178 /*
30179 * Set up the Switcher's per-cpu areas.
30180 @@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
30181 * it will be undisturbed when we switch. To change %cs and jump we
30182 * need this structure to feed to Intel's "lcall" instruction.
30183 */
30184 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
30185 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
30186 lguest_entry.segment = LGUEST_CS;
30187
30188 /*
30189 diff -urNp linux-2.6.32.41/drivers/lguest/x86/switcher_32.S linux-2.6.32.41/drivers/lguest/x86/switcher_32.S
30190 --- linux-2.6.32.41/drivers/lguest/x86/switcher_32.S 2011-03-27 14:31:47.000000000 -0400
30191 +++ linux-2.6.32.41/drivers/lguest/x86/switcher_32.S 2011-04-17 15:56:46.000000000 -0400
30192 @@ -87,6 +87,7 @@
30193 #include <asm/page.h>
30194 #include <asm/segment.h>
30195 #include <asm/lguest.h>
30196 +#include <asm/processor-flags.h>
30197
30198 // We mark the start of the code to copy
30199 // It's placed in .text tho it's never run here
30200 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
30201 // Changes type when we load it: damn Intel!
30202 // For after we switch over our page tables
30203 // That entry will be read-only: we'd crash.
30204 +
30205 +#ifdef CONFIG_PAX_KERNEXEC
30206 + mov %cr0, %edx
30207 + xor $X86_CR0_WP, %edx
30208 + mov %edx, %cr0
30209 +#endif
30210 +
30211 movl $(GDT_ENTRY_TSS*8), %edx
30212 ltr %dx
30213
30214 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
30215 // Let's clear it again for our return.
30216 // The GDT descriptor of the Host
30217 // Points to the table after two "size" bytes
30218 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
30219 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
30220 // Clear "used" from type field (byte 5, bit 2)
30221 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
30222 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
30223 +
30224 +#ifdef CONFIG_PAX_KERNEXEC
30225 + mov %cr0, %eax
30226 + xor $X86_CR0_WP, %eax
30227 + mov %eax, %cr0
30228 +#endif
30229
30230 // Once our page table's switched, the Guest is live!
30231 // The Host fades as we run this final step.
30232 @@ -295,13 +309,12 @@ deliver_to_host:
30233 // I consulted gcc, and it gave
30234 // These instructions, which I gladly credit:
30235 leal (%edx,%ebx,8), %eax
30236 - movzwl (%eax),%edx
30237 - movl 4(%eax), %eax
30238 - xorw %ax, %ax
30239 - orl %eax, %edx
30240 + movl 4(%eax), %edx
30241 + movw (%eax), %dx
30242 // Now the address of the handler's in %edx
30243 // We call it now: its "iret" drops us home.
30244 - jmp *%edx
30245 + ljmp $__KERNEL_CS, $1f
30246 +1: jmp *%edx
30247
30248 // Every interrupt can come to us here
30249 // But we must truly tell each apart.
30250 diff -urNp linux-2.6.32.41/drivers/macintosh/via-pmu-backlight.c linux-2.6.32.41/drivers/macintosh/via-pmu-backlight.c
30251 --- linux-2.6.32.41/drivers/macintosh/via-pmu-backlight.c 2011-03-27 14:31:47.000000000 -0400
30252 +++ linux-2.6.32.41/drivers/macintosh/via-pmu-backlight.c 2011-04-17 15:56:46.000000000 -0400
30253 @@ -15,7 +15,7 @@
30254
30255 #define MAX_PMU_LEVEL 0xFF
30256
30257 -static struct backlight_ops pmu_backlight_data;
30258 +static const struct backlight_ops pmu_backlight_data;
30259 static DEFINE_SPINLOCK(pmu_backlight_lock);
30260 static int sleeping, uses_pmu_bl;
30261 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
30262 @@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(
30263 return bd->props.brightness;
30264 }
30265
30266 -static struct backlight_ops pmu_backlight_data = {
30267 +static const struct backlight_ops pmu_backlight_data = {
30268 .get_brightness = pmu_backlight_get_brightness,
30269 .update_status = pmu_backlight_update_status,
30270
30271 diff -urNp linux-2.6.32.41/drivers/macintosh/via-pmu.c linux-2.6.32.41/drivers/macintosh/via-pmu.c
30272 --- linux-2.6.32.41/drivers/macintosh/via-pmu.c 2011-03-27 14:31:47.000000000 -0400
30273 +++ linux-2.6.32.41/drivers/macintosh/via-pmu.c 2011-04-17 15:56:46.000000000 -0400
30274 @@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state
30275 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
30276 }
30277
30278 -static struct platform_suspend_ops pmu_pm_ops = {
30279 +static const struct platform_suspend_ops pmu_pm_ops = {
30280 .enter = powerbook_sleep,
30281 .valid = pmu_sleep_valid,
30282 };
30283 diff -urNp linux-2.6.32.41/drivers/md/dm.c linux-2.6.32.41/drivers/md/dm.c
30284 --- linux-2.6.32.41/drivers/md/dm.c 2011-03-27 14:31:47.000000000 -0400
30285 +++ linux-2.6.32.41/drivers/md/dm.c 2011-05-04 17:56:28.000000000 -0400
30286 @@ -163,9 +163,9 @@ struct mapped_device {
30287 /*
30288 * Event handling.
30289 */
30290 - atomic_t event_nr;
30291 + atomic_unchecked_t event_nr;
30292 wait_queue_head_t eventq;
30293 - atomic_t uevent_seq;
30294 + atomic_unchecked_t uevent_seq;
30295 struct list_head uevent_list;
30296 spinlock_t uevent_lock; /* Protect access to uevent_list */
30297
30298 @@ -1770,8 +1770,8 @@ static struct mapped_device *alloc_dev(i
30299 rwlock_init(&md->map_lock);
30300 atomic_set(&md->holders, 1);
30301 atomic_set(&md->open_count, 0);
30302 - atomic_set(&md->event_nr, 0);
30303 - atomic_set(&md->uevent_seq, 0);
30304 + atomic_set_unchecked(&md->event_nr, 0);
30305 + atomic_set_unchecked(&md->uevent_seq, 0);
30306 INIT_LIST_HEAD(&md->uevent_list);
30307 spin_lock_init(&md->uevent_lock);
30308
30309 @@ -1921,7 +1921,7 @@ static void event_callback(void *context
30310
30311 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
30312
30313 - atomic_inc(&md->event_nr);
30314 + atomic_inc_unchecked(&md->event_nr);
30315 wake_up(&md->eventq);
30316 }
30317
30318 @@ -2556,18 +2556,18 @@ void dm_kobject_uevent(struct mapped_dev
30319
30320 uint32_t dm_next_uevent_seq(struct mapped_device *md)
30321 {
30322 - return atomic_add_return(1, &md->uevent_seq);
30323 + return atomic_add_return_unchecked(1, &md->uevent_seq);
30324 }
30325
30326 uint32_t dm_get_event_nr(struct mapped_device *md)
30327 {
30328 - return atomic_read(&md->event_nr);
30329 + return atomic_read_unchecked(&md->event_nr);
30330 }
30331
30332 int dm_wait_event(struct mapped_device *md, int event_nr)
30333 {
30334 return wait_event_interruptible(md->eventq,
30335 - (event_nr != atomic_read(&md->event_nr)));
30336 + (event_nr != atomic_read_unchecked(&md->event_nr)));
30337 }
30338
30339 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
30340 diff -urNp linux-2.6.32.41/drivers/md/dm-ioctl.c linux-2.6.32.41/drivers/md/dm-ioctl.c
30341 --- linux-2.6.32.41/drivers/md/dm-ioctl.c 2011-03-27 14:31:47.000000000 -0400
30342 +++ linux-2.6.32.41/drivers/md/dm-ioctl.c 2011-04-17 15:56:46.000000000 -0400
30343 @@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, str
30344 cmd == DM_LIST_VERSIONS_CMD)
30345 return 0;
30346
30347 - if ((cmd == DM_DEV_CREATE_CMD)) {
30348 + if (cmd == DM_DEV_CREATE_CMD) {
30349 if (!*param->name) {
30350 DMWARN("name not supplied when creating device");
30351 return -EINVAL;
30352 diff -urNp linux-2.6.32.41/drivers/md/dm-raid1.c linux-2.6.32.41/drivers/md/dm-raid1.c
30353 --- linux-2.6.32.41/drivers/md/dm-raid1.c 2011-03-27 14:31:47.000000000 -0400
30354 +++ linux-2.6.32.41/drivers/md/dm-raid1.c 2011-05-04 17:56:28.000000000 -0400
30355 @@ -41,7 +41,7 @@ enum dm_raid1_error {
30356
30357 struct mirror {
30358 struct mirror_set *ms;
30359 - atomic_t error_count;
30360 + atomic_unchecked_t error_count;
30361 unsigned long error_type;
30362 struct dm_dev *dev;
30363 sector_t offset;
30364 @@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m
30365 * simple way to tell if a device has encountered
30366 * errors.
30367 */
30368 - atomic_inc(&m->error_count);
30369 + atomic_inc_unchecked(&m->error_count);
30370
30371 if (test_and_set_bit(error_type, &m->error_type))
30372 return;
30373 @@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m
30374 }
30375
30376 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
30377 - if (!atomic_read(&new->error_count)) {
30378 + if (!atomic_read_unchecked(&new->error_count)) {
30379 set_default_mirror(new);
30380 break;
30381 }
30382 @@ -363,7 +363,7 @@ static struct mirror *choose_mirror(stru
30383 struct mirror *m = get_default_mirror(ms);
30384
30385 do {
30386 - if (likely(!atomic_read(&m->error_count)))
30387 + if (likely(!atomic_read_unchecked(&m->error_count)))
30388 return m;
30389
30390 if (m-- == ms->mirror)
30391 @@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
30392 {
30393 struct mirror *default_mirror = get_default_mirror(m->ms);
30394
30395 - return !atomic_read(&default_mirror->error_count);
30396 + return !atomic_read_unchecked(&default_mirror->error_count);
30397 }
30398
30399 static int mirror_available(struct mirror_set *ms, struct bio *bio)
30400 @@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *
30401 */
30402 if (likely(region_in_sync(ms, region, 1)))
30403 m = choose_mirror(ms, bio->bi_sector);
30404 - else if (m && atomic_read(&m->error_count))
30405 + else if (m && atomic_read_unchecked(&m->error_count))
30406 m = NULL;
30407
30408 if (likely(m))
30409 @@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set
30410 }
30411
30412 ms->mirror[mirror].ms = ms;
30413 - atomic_set(&(ms->mirror[mirror].error_count), 0);
30414 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
30415 ms->mirror[mirror].error_type = 0;
30416 ms->mirror[mirror].offset = offset;
30417
30418 @@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_targ
30419 */
30420 static char device_status_char(struct mirror *m)
30421 {
30422 - if (!atomic_read(&(m->error_count)))
30423 + if (!atomic_read_unchecked(&(m->error_count)))
30424 return 'A';
30425
30426 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
30427 diff -urNp linux-2.6.32.41/drivers/md/dm-stripe.c linux-2.6.32.41/drivers/md/dm-stripe.c
30428 --- linux-2.6.32.41/drivers/md/dm-stripe.c 2011-03-27 14:31:47.000000000 -0400
30429 +++ linux-2.6.32.41/drivers/md/dm-stripe.c 2011-05-04 17:56:28.000000000 -0400
30430 @@ -20,7 +20,7 @@ struct stripe {
30431 struct dm_dev *dev;
30432 sector_t physical_start;
30433
30434 - atomic_t error_count;
30435 + atomic_unchecked_t error_count;
30436 };
30437
30438 struct stripe_c {
30439 @@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *
30440 kfree(sc);
30441 return r;
30442 }
30443 - atomic_set(&(sc->stripe[i].error_count), 0);
30444 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
30445 }
30446
30447 ti->private = sc;
30448 @@ -257,7 +257,7 @@ static int stripe_status(struct dm_targe
30449 DMEMIT("%d ", sc->stripes);
30450 for (i = 0; i < sc->stripes; i++) {
30451 DMEMIT("%s ", sc->stripe[i].dev->name);
30452 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
30453 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
30454 'D' : 'A';
30455 }
30456 buffer[i] = '\0';
30457 @@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_targe
30458 */
30459 for (i = 0; i < sc->stripes; i++)
30460 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
30461 - atomic_inc(&(sc->stripe[i].error_count));
30462 - if (atomic_read(&(sc->stripe[i].error_count)) <
30463 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
30464 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
30465 DM_IO_ERROR_THRESHOLD)
30466 queue_work(kstriped, &sc->kstriped_ws);
30467 }
30468 diff -urNp linux-2.6.32.41/drivers/md/dm-sysfs.c linux-2.6.32.41/drivers/md/dm-sysfs.c
30469 --- linux-2.6.32.41/drivers/md/dm-sysfs.c 2011-03-27 14:31:47.000000000 -0400
30470 +++ linux-2.6.32.41/drivers/md/dm-sysfs.c 2011-04-17 15:56:46.000000000 -0400
30471 @@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
30472 NULL,
30473 };
30474
30475 -static struct sysfs_ops dm_sysfs_ops = {
30476 +static const struct sysfs_ops dm_sysfs_ops = {
30477 .show = dm_attr_show,
30478 };
30479
30480 diff -urNp linux-2.6.32.41/drivers/md/dm-table.c linux-2.6.32.41/drivers/md/dm-table.c
30481 --- linux-2.6.32.41/drivers/md/dm-table.c 2011-03-27 14:31:47.000000000 -0400
30482 +++ linux-2.6.32.41/drivers/md/dm-table.c 2011-04-17 15:56:46.000000000 -0400
30483 @@ -359,7 +359,7 @@ static int device_area_is_invalid(struct
30484 if (!dev_size)
30485 return 0;
30486
30487 - if ((start >= dev_size) || (start + len > dev_size)) {
30488 + if ((start >= dev_size) || (len > dev_size - start)) {
30489 DMWARN("%s: %s too small for target: "
30490 "start=%llu, len=%llu, dev_size=%llu",
30491 dm_device_name(ti->table->md), bdevname(bdev, b),
30492 diff -urNp linux-2.6.32.41/drivers/md/md.c linux-2.6.32.41/drivers/md/md.c
30493 --- linux-2.6.32.41/drivers/md/md.c 2011-03-27 14:31:47.000000000 -0400
30494 +++ linux-2.6.32.41/drivers/md/md.c 2011-05-04 17:56:20.000000000 -0400
30495 @@ -153,10 +153,10 @@ static int start_readonly;
30496 * start build, activate spare
30497 */
30498 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
30499 -static atomic_t md_event_count;
30500 +static atomic_unchecked_t md_event_count;
30501 void md_new_event(mddev_t *mddev)
30502 {
30503 - atomic_inc(&md_event_count);
30504 + atomic_inc_unchecked(&md_event_count);
30505 wake_up(&md_event_waiters);
30506 }
30507 EXPORT_SYMBOL_GPL(md_new_event);
30508 @@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
30509 */
30510 static void md_new_event_inintr(mddev_t *mddev)
30511 {
30512 - atomic_inc(&md_event_count);
30513 + atomic_inc_unchecked(&md_event_count);
30514 wake_up(&md_event_waiters);
30515 }
30516
30517 @@ -1218,7 +1218,7 @@ static int super_1_load(mdk_rdev_t *rdev
30518
30519 rdev->preferred_minor = 0xffff;
30520 rdev->data_offset = le64_to_cpu(sb->data_offset);
30521 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
30522 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
30523
30524 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
30525 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
30526 @@ -1392,7 +1392,7 @@ static void super_1_sync(mddev_t *mddev,
30527 else
30528 sb->resync_offset = cpu_to_le64(0);
30529
30530 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
30531 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
30532
30533 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
30534 sb->size = cpu_to_le64(mddev->dev_sectors);
30535 @@ -2214,7 +2214,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
30536 static ssize_t
30537 errors_show(mdk_rdev_t *rdev, char *page)
30538 {
30539 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
30540 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
30541 }
30542
30543 static ssize_t
30544 @@ -2223,7 +2223,7 @@ errors_store(mdk_rdev_t *rdev, const cha
30545 char *e;
30546 unsigned long n = simple_strtoul(buf, &e, 10);
30547 if (*buf && (*e == 0 || *e == '\n')) {
30548 - atomic_set(&rdev->corrected_errors, n);
30549 + atomic_set_unchecked(&rdev->corrected_errors, n);
30550 return len;
30551 }
30552 return -EINVAL;
30553 @@ -2517,7 +2517,7 @@ static void rdev_free(struct kobject *ko
30554 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
30555 kfree(rdev);
30556 }
30557 -static struct sysfs_ops rdev_sysfs_ops = {
30558 +static const struct sysfs_ops rdev_sysfs_ops = {
30559 .show = rdev_attr_show,
30560 .store = rdev_attr_store,
30561 };
30562 @@ -2566,8 +2566,8 @@ static mdk_rdev_t *md_import_device(dev_
30563 rdev->data_offset = 0;
30564 rdev->sb_events = 0;
30565 atomic_set(&rdev->nr_pending, 0);
30566 - atomic_set(&rdev->read_errors, 0);
30567 - atomic_set(&rdev->corrected_errors, 0);
30568 + atomic_set_unchecked(&rdev->read_errors, 0);
30569 + atomic_set_unchecked(&rdev->corrected_errors, 0);
30570
30571 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
30572 if (!size) {
30573 @@ -3887,7 +3887,7 @@ static void md_free(struct kobject *ko)
30574 kfree(mddev);
30575 }
30576
30577 -static struct sysfs_ops md_sysfs_ops = {
30578 +static const struct sysfs_ops md_sysfs_ops = {
30579 .show = md_attr_show,
30580 .store = md_attr_store,
30581 };
30582 @@ -4474,7 +4474,8 @@ out:
30583 err = 0;
30584 blk_integrity_unregister(disk);
30585 md_new_event(mddev);
30586 - sysfs_notify_dirent(mddev->sysfs_state);
30587 + if (mddev->sysfs_state)
30588 + sysfs_notify_dirent(mddev->sysfs_state);
30589 return err;
30590 }
30591
30592 @@ -5954,7 +5955,7 @@ static int md_seq_show(struct seq_file *
30593
30594 spin_unlock(&pers_lock);
30595 seq_printf(seq, "\n");
30596 - mi->event = atomic_read(&md_event_count);
30597 + mi->event = atomic_read_unchecked(&md_event_count);
30598 return 0;
30599 }
30600 if (v == (void*)2) {
30601 @@ -6043,7 +6044,7 @@ static int md_seq_show(struct seq_file *
30602 chunk_kb ? "KB" : "B");
30603 if (bitmap->file) {
30604 seq_printf(seq, ", file: ");
30605 - seq_path(seq, &bitmap->file->f_path, " \t\n");
30606 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
30607 }
30608
30609 seq_printf(seq, "\n");
30610 @@ -6077,7 +6078,7 @@ static int md_seq_open(struct inode *ino
30611 else {
30612 struct seq_file *p = file->private_data;
30613 p->private = mi;
30614 - mi->event = atomic_read(&md_event_count);
30615 + mi->event = atomic_read_unchecked(&md_event_count);
30616 }
30617 return error;
30618 }
30619 @@ -6093,7 +6094,7 @@ static unsigned int mdstat_poll(struct f
30620 /* always allow read */
30621 mask = POLLIN | POLLRDNORM;
30622
30623 - if (mi->event != atomic_read(&md_event_count))
30624 + if (mi->event != atomic_read_unchecked(&md_event_count))
30625 mask |= POLLERR | POLLPRI;
30626 return mask;
30627 }
30628 @@ -6137,7 +6138,7 @@ static int is_mddev_idle(mddev_t *mddev,
30629 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
30630 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
30631 (int)part_stat_read(&disk->part0, sectors[1]) -
30632 - atomic_read(&disk->sync_io);
30633 + atomic_read_unchecked(&disk->sync_io);
30634 /* sync IO will cause sync_io to increase before the disk_stats
30635 * as sync_io is counted when a request starts, and
30636 * disk_stats is counted when it completes.
30637 diff -urNp linux-2.6.32.41/drivers/md/md.h linux-2.6.32.41/drivers/md/md.h
30638 --- linux-2.6.32.41/drivers/md/md.h 2011-03-27 14:31:47.000000000 -0400
30639 +++ linux-2.6.32.41/drivers/md/md.h 2011-05-04 17:56:20.000000000 -0400
30640 @@ -94,10 +94,10 @@ struct mdk_rdev_s
30641 * only maintained for arrays that
30642 * support hot removal
30643 */
30644 - atomic_t read_errors; /* number of consecutive read errors that
30645 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
30646 * we have tried to ignore.
30647 */
30648 - atomic_t corrected_errors; /* number of corrected read errors,
30649 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
30650 * for reporting to userspace and storing
30651 * in superblock.
30652 */
30653 @@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_
30654
30655 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
30656 {
30657 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
30658 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
30659 }
30660
30661 struct mdk_personality
30662 diff -urNp linux-2.6.32.41/drivers/md/raid10.c linux-2.6.32.41/drivers/md/raid10.c
30663 --- linux-2.6.32.41/drivers/md/raid10.c 2011-03-27 14:31:47.000000000 -0400
30664 +++ linux-2.6.32.41/drivers/md/raid10.c 2011-05-04 17:56:28.000000000 -0400
30665 @@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bi
30666 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
30667 set_bit(R10BIO_Uptodate, &r10_bio->state);
30668 else {
30669 - atomic_add(r10_bio->sectors,
30670 + atomic_add_unchecked(r10_bio->sectors,
30671 &conf->mirrors[d].rdev->corrected_errors);
30672 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
30673 md_error(r10_bio->mddev,
30674 @@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
30675 test_bit(In_sync, &rdev->flags)) {
30676 atomic_inc(&rdev->nr_pending);
30677 rcu_read_unlock();
30678 - atomic_add(s, &rdev->corrected_errors);
30679 + atomic_add_unchecked(s, &rdev->corrected_errors);
30680 if (sync_page_io(rdev->bdev,
30681 r10_bio->devs[sl].addr +
30682 sect + rdev->data_offset,
30683 diff -urNp linux-2.6.32.41/drivers/md/raid1.c linux-2.6.32.41/drivers/md/raid1.c
30684 --- linux-2.6.32.41/drivers/md/raid1.c 2011-03-27 14:31:47.000000000 -0400
30685 +++ linux-2.6.32.41/drivers/md/raid1.c 2011-05-04 17:56:28.000000000 -0400
30686 @@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *
30687 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
30688 continue;
30689 rdev = conf->mirrors[d].rdev;
30690 - atomic_add(s, &rdev->corrected_errors);
30691 + atomic_add_unchecked(s, &rdev->corrected_errors);
30692 if (sync_page_io(rdev->bdev,
30693 sect + rdev->data_offset,
30694 s<<9,
30695 @@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf,
30696 /* Well, this device is dead */
30697 md_error(mddev, rdev);
30698 else {
30699 - atomic_add(s, &rdev->corrected_errors);
30700 + atomic_add_unchecked(s, &rdev->corrected_errors);
30701 printk(KERN_INFO
30702 "raid1:%s: read error corrected "
30703 "(%d sectors at %llu on %s)\n",
30704 diff -urNp linux-2.6.32.41/drivers/md/raid5.c linux-2.6.32.41/drivers/md/raid5.c
30705 --- linux-2.6.32.41/drivers/md/raid5.c 2011-03-27 14:31:47.000000000 -0400
30706 +++ linux-2.6.32.41/drivers/md/raid5.c 2011-05-16 21:46:57.000000000 -0400
30707 @@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_hea
30708 bi->bi_next = NULL;
30709 if (rw == WRITE &&
30710 test_bit(R5_ReWrite, &sh->dev[i].flags))
30711 - atomic_add(STRIPE_SECTORS,
30712 + atomic_add_unchecked(STRIPE_SECTORS,
30713 &rdev->corrected_errors);
30714 generic_make_request(bi);
30715 } else {
30716 @@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struc
30717 clear_bit(R5_ReadError, &sh->dev[i].flags);
30718 clear_bit(R5_ReWrite, &sh->dev[i].flags);
30719 }
30720 - if (atomic_read(&conf->disks[i].rdev->read_errors))
30721 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
30722 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
30723 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
30724 } else {
30725 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
30726 int retry = 0;
30727 rdev = conf->disks[i].rdev;
30728
30729 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
30730 - atomic_inc(&rdev->read_errors);
30731 + atomic_inc_unchecked(&rdev->read_errors);
30732 if (conf->mddev->degraded >= conf->max_degraded)
30733 printk_rl(KERN_WARNING
30734 "raid5:%s: read error not correctable "
30735 @@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struc
30736 (unsigned long long)(sh->sector
30737 + rdev->data_offset),
30738 bdn);
30739 - else if (atomic_read(&rdev->read_errors)
30740 + else if (atomic_read_unchecked(&rdev->read_errors)
30741 > conf->max_nr_stripes)
30742 printk(KERN_WARNING
30743 "raid5:%s: Too many read errors, failing device %s.\n",
30744 @@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct s
30745 sector_t r_sector;
30746 struct stripe_head sh2;
30747
30748 + pax_track_stack();
30749
30750 chunk_offset = sector_div(new_sector, sectors_per_chunk);
30751 stripe = new_sector;
30752 diff -urNp linux-2.6.32.41/drivers/media/common/saa7146_hlp.c linux-2.6.32.41/drivers/media/common/saa7146_hlp.c
30753 --- linux-2.6.32.41/drivers/media/common/saa7146_hlp.c 2011-03-27 14:31:47.000000000 -0400
30754 +++ linux-2.6.32.41/drivers/media/common/saa7146_hlp.c 2011-05-16 21:46:57.000000000 -0400
30755 @@ -353,6 +353,8 @@ static void calculate_clipping_registers
30756
30757 int x[32], y[32], w[32], h[32];
30758
30759 + pax_track_stack();
30760 +
30761 /* clear out memory */
30762 memset(&line_list[0], 0x00, sizeof(u32)*32);
30763 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
30764 diff -urNp linux-2.6.32.41/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-2.6.32.41/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
30765 --- linux-2.6.32.41/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-03-27 14:31:47.000000000 -0400
30766 +++ linux-2.6.32.41/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-05-16 21:46:57.000000000 -0400
30767 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
30768 u8 buf[HOST_LINK_BUF_SIZE];
30769 int i;
30770
30771 + pax_track_stack();
30772 +
30773 dprintk("%s\n", __func__);
30774
30775 /* check if we have space for a link buf in the rx_buffer */
30776 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
30777 unsigned long timeout;
30778 int written;
30779
30780 + pax_track_stack();
30781 +
30782 dprintk("%s\n", __func__);
30783
30784 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
30785 diff -urNp linux-2.6.32.41/drivers/media/dvb/dvb-core/dvbdev.c linux-2.6.32.41/drivers/media/dvb/dvb-core/dvbdev.c
30786 --- linux-2.6.32.41/drivers/media/dvb/dvb-core/dvbdev.c 2011-03-27 14:31:47.000000000 -0400
30787 +++ linux-2.6.32.41/drivers/media/dvb/dvb-core/dvbdev.c 2011-04-17 15:56:46.000000000 -0400
30788 @@ -191,6 +191,7 @@ int dvb_register_device(struct dvb_adapt
30789 const struct dvb_device *template, void *priv, int type)
30790 {
30791 struct dvb_device *dvbdev;
30792 + /* cannot be const */
30793 struct file_operations *dvbdevfops;
30794 struct device *clsdev;
30795 int minor;
30796 diff -urNp linux-2.6.32.41/drivers/media/dvb/dvb-usb/dib0700_core.c linux-2.6.32.41/drivers/media/dvb/dvb-usb/dib0700_core.c
30797 --- linux-2.6.32.41/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-03-27 14:31:47.000000000 -0400
30798 +++ linux-2.6.32.41/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-05-16 21:46:57.000000000 -0400
30799 @@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb
30800
30801 u8 buf[260];
30802
30803 + pax_track_stack();
30804 +
30805 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
30806 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
30807
30808 diff -urNp linux-2.6.32.41/drivers/media/dvb/frontends/or51211.c linux-2.6.32.41/drivers/media/dvb/frontends/or51211.c
30809 --- linux-2.6.32.41/drivers/media/dvb/frontends/or51211.c 2011-03-27 14:31:47.000000000 -0400
30810 +++ linux-2.6.32.41/drivers/media/dvb/frontends/or51211.c 2011-05-16 21:46:57.000000000 -0400
30811 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
30812 u8 tudata[585];
30813 int i;
30814
30815 + pax_track_stack();
30816 +
30817 dprintk("Firmware is %zd bytes\n",fw->size);
30818
30819 /* Get eprom data */
30820 diff -urNp linux-2.6.32.41/drivers/media/radio/radio-cadet.c linux-2.6.32.41/drivers/media/radio/radio-cadet.c
30821 --- linux-2.6.32.41/drivers/media/radio/radio-cadet.c 2011-03-27 14:31:47.000000000 -0400
30822 +++ linux-2.6.32.41/drivers/media/radio/radio-cadet.c 2011-04-17 15:56:46.000000000 -0400
30823 @@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *f
30824 while (i < count && dev->rdsin != dev->rdsout)
30825 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
30826
30827 - if (copy_to_user(data, readbuf, i))
30828 + if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
30829 return -EFAULT;
30830 return i;
30831 }
30832 diff -urNp linux-2.6.32.41/drivers/media/video/cx18/cx18-driver.c linux-2.6.32.41/drivers/media/video/cx18/cx18-driver.c
30833 --- linux-2.6.32.41/drivers/media/video/cx18/cx18-driver.c 2011-03-27 14:31:47.000000000 -0400
30834 +++ linux-2.6.32.41/drivers/media/video/cx18/cx18-driver.c 2011-05-16 21:46:57.000000000 -0400
30835 @@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl
30836
30837 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
30838
30839 -static atomic_t cx18_instance = ATOMIC_INIT(0);
30840 +static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
30841
30842 /* Parameter declarations */
30843 static int cardtype[CX18_MAX_CARDS];
30844 @@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
30845 struct i2c_client c;
30846 u8 eedata[256];
30847
30848 + pax_track_stack();
30849 +
30850 memset(&c, 0, sizeof(c));
30851 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
30852 c.adapter = &cx->i2c_adap[0];
30853 @@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct p
30854 struct cx18 *cx;
30855
30856 /* FIXME - module parameter arrays constrain max instances */
30857 - i = atomic_inc_return(&cx18_instance) - 1;
30858 + i = atomic_inc_return_unchecked(&cx18_instance) - 1;
30859 if (i >= CX18_MAX_CARDS) {
30860 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
30861 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
30862 diff -urNp linux-2.6.32.41/drivers/media/video/ivtv/ivtv-driver.c linux-2.6.32.41/drivers/media/video/ivtv/ivtv-driver.c
30863 --- linux-2.6.32.41/drivers/media/video/ivtv/ivtv-driver.c 2011-03-27 14:31:47.000000000 -0400
30864 +++ linux-2.6.32.41/drivers/media/video/ivtv/ivtv-driver.c 2011-05-04 17:56:28.000000000 -0400
30865 @@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl
30866 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
30867
30868 /* ivtv instance counter */
30869 -static atomic_t ivtv_instance = ATOMIC_INIT(0);
30870 +static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
30871
30872 /* Parameter declarations */
30873 static int cardtype[IVTV_MAX_CARDS];
30874 diff -urNp linux-2.6.32.41/drivers/media/video/omap24xxcam.c linux-2.6.32.41/drivers/media/video/omap24xxcam.c
30875 --- linux-2.6.32.41/drivers/media/video/omap24xxcam.c 2011-03-27 14:31:47.000000000 -0400
30876 +++ linux-2.6.32.41/drivers/media/video/omap24xxcam.c 2011-05-04 17:56:28.000000000 -0400
30877 @@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(str
30878 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
30879
30880 do_gettimeofday(&vb->ts);
30881 - vb->field_count = atomic_add_return(2, &fh->field_count);
30882 + vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
30883 if (csr & csr_error) {
30884 vb->state = VIDEOBUF_ERROR;
30885 if (!atomic_read(&fh->cam->in_reset)) {
30886 diff -urNp linux-2.6.32.41/drivers/media/video/omap24xxcam.h linux-2.6.32.41/drivers/media/video/omap24xxcam.h
30887 --- linux-2.6.32.41/drivers/media/video/omap24xxcam.h 2011-03-27 14:31:47.000000000 -0400
30888 +++ linux-2.6.32.41/drivers/media/video/omap24xxcam.h 2011-05-04 17:56:28.000000000 -0400
30889 @@ -533,7 +533,7 @@ struct omap24xxcam_fh {
30890 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
30891 struct videobuf_queue vbq;
30892 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
30893 - atomic_t field_count; /* field counter for videobuf_buffer */
30894 + atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
30895 /* accessing cam here doesn't need serialisation: it's constant */
30896 struct omap24xxcam_device *cam;
30897 };
30898 diff -urNp linux-2.6.32.41/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-2.6.32.41/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
30899 --- linux-2.6.32.41/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-03-27 14:31:47.000000000 -0400
30900 +++ linux-2.6.32.41/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-05-16 21:46:57.000000000 -0400
30901 @@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
30902 u8 *eeprom;
30903 struct tveeprom tvdata;
30904
30905 + pax_track_stack();
30906 +
30907 memset(&tvdata,0,sizeof(tvdata));
30908
30909 eeprom = pvr2_eeprom_fetch(hdw);
30910 diff -urNp linux-2.6.32.41/drivers/media/video/saa7134/saa6752hs.c linux-2.6.32.41/drivers/media/video/saa7134/saa6752hs.c
30911 --- linux-2.6.32.41/drivers/media/video/saa7134/saa6752hs.c 2011-03-27 14:31:47.000000000 -0400
30912 +++ linux-2.6.32.41/drivers/media/video/saa7134/saa6752hs.c 2011-05-16 21:46:57.000000000 -0400
30913 @@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_su
30914 unsigned char localPAT[256];
30915 unsigned char localPMT[256];
30916
30917 + pax_track_stack();
30918 +
30919 /* Set video format - must be done first as it resets other settings */
30920 set_reg8(client, 0x41, h->video_format);
30921
30922 diff -urNp linux-2.6.32.41/drivers/media/video/saa7164/saa7164-cmd.c linux-2.6.32.41/drivers/media/video/saa7164/saa7164-cmd.c
30923 --- linux-2.6.32.41/drivers/media/video/saa7164/saa7164-cmd.c 2011-03-27 14:31:47.000000000 -0400
30924 +++ linux-2.6.32.41/drivers/media/video/saa7164/saa7164-cmd.c 2011-05-16 21:46:57.000000000 -0400
30925 @@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_d
30926 wait_queue_head_t *q = 0;
30927 dprintk(DBGLVL_CMD, "%s()\n", __func__);
30928
30929 + pax_track_stack();
30930 +
30931 /* While any outstand message on the bus exists... */
30932 do {
30933
30934 @@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
30935 u8 tmp[512];
30936 dprintk(DBGLVL_CMD, "%s()\n", __func__);
30937
30938 + pax_track_stack();
30939 +
30940 while (loop) {
30941
30942 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
30943 diff -urNp linux-2.6.32.41/drivers/media/video/usbvideo/konicawc.c linux-2.6.32.41/drivers/media/video/usbvideo/konicawc.c
30944 --- linux-2.6.32.41/drivers/media/video/usbvideo/konicawc.c 2011-03-27 14:31:47.000000000 -0400
30945 +++ linux-2.6.32.41/drivers/media/video/usbvideo/konicawc.c 2011-04-17 15:56:46.000000000 -0400
30946 @@ -225,7 +225,7 @@ static void konicawc_register_input(stru
30947 int error;
30948
30949 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
30950 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
30951 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
30952
30953 cam->input = input_dev = input_allocate_device();
30954 if (!input_dev) {
30955 diff -urNp linux-2.6.32.41/drivers/media/video/usbvideo/quickcam_messenger.c linux-2.6.32.41/drivers/media/video/usbvideo/quickcam_messenger.c
30956 --- linux-2.6.32.41/drivers/media/video/usbvideo/quickcam_messenger.c 2011-03-27 14:31:47.000000000 -0400
30957 +++ linux-2.6.32.41/drivers/media/video/usbvideo/quickcam_messenger.c 2011-04-17 15:56:46.000000000 -0400
30958 @@ -89,7 +89,7 @@ static void qcm_register_input(struct qc
30959 int error;
30960
30961 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
30962 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
30963 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
30964
30965 cam->input = input_dev = input_allocate_device();
30966 if (!input_dev) {
30967 diff -urNp linux-2.6.32.41/drivers/media/video/usbvision/usbvision-core.c linux-2.6.32.41/drivers/media/video/usbvision/usbvision-core.c
30968 --- linux-2.6.32.41/drivers/media/video/usbvision/usbvision-core.c 2011-03-27 14:31:47.000000000 -0400
30969 +++ linux-2.6.32.41/drivers/media/video/usbvision/usbvision-core.c 2011-05-16 21:46:57.000000000 -0400
30970 @@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_c
30971 unsigned char rv, gv, bv;
30972 static unsigned char *Y, *U, *V;
30973
30974 + pax_track_stack();
30975 +
30976 frame = usbvision->curFrame;
30977 imageSize = frame->frmwidth * frame->frmheight;
30978 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
30979 diff -urNp linux-2.6.32.41/drivers/media/video/v4l2-device.c linux-2.6.32.41/drivers/media/video/v4l2-device.c
30980 --- linux-2.6.32.41/drivers/media/video/v4l2-device.c 2011-03-27 14:31:47.000000000 -0400
30981 +++ linux-2.6.32.41/drivers/media/video/v4l2-device.c 2011-05-04 17:56:28.000000000 -0400
30982 @@ -50,9 +50,9 @@ int v4l2_device_register(struct device *
30983 EXPORT_SYMBOL_GPL(v4l2_device_register);
30984
30985 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
30986 - atomic_t *instance)
30987 + atomic_unchecked_t *instance)
30988 {
30989 - int num = atomic_inc_return(instance) - 1;
30990 + int num = atomic_inc_return_unchecked(instance) - 1;
30991 int len = strlen(basename);
30992
30993 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
30994 diff -urNp linux-2.6.32.41/drivers/media/video/videobuf-dma-sg.c linux-2.6.32.41/drivers/media/video/videobuf-dma-sg.c
30995 --- linux-2.6.32.41/drivers/media/video/videobuf-dma-sg.c 2011-03-27 14:31:47.000000000 -0400
30996 +++ linux-2.6.32.41/drivers/media/video/videobuf-dma-sg.c 2011-05-16 21:46:57.000000000 -0400
30997 @@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
30998 {
30999 struct videobuf_queue q;
31000
31001 + pax_track_stack();
31002 +
31003 /* Required to make generic handler to call __videobuf_alloc */
31004 q.int_ops = &sg_ops;
31005
31006 diff -urNp linux-2.6.32.41/drivers/message/fusion/mptbase.c linux-2.6.32.41/drivers/message/fusion/mptbase.c
31007 --- linux-2.6.32.41/drivers/message/fusion/mptbase.c 2011-03-27 14:31:47.000000000 -0400
31008 +++ linux-2.6.32.41/drivers/message/fusion/mptbase.c 2011-04-17 15:56:46.000000000 -0400
31009 @@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **s
31010 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
31011 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
31012
31013 +#ifdef CONFIG_GRKERNSEC_HIDESYM
31014 + len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
31015 + NULL, NULL);
31016 +#else
31017 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
31018 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
31019 +#endif
31020 +
31021 /*
31022 * Rounding UP to nearest 4-kB boundary here...
31023 */
31024 diff -urNp linux-2.6.32.41/drivers/message/fusion/mptsas.c linux-2.6.32.41/drivers/message/fusion/mptsas.c
31025 --- linux-2.6.32.41/drivers/message/fusion/mptsas.c 2011-03-27 14:31:47.000000000 -0400
31026 +++ linux-2.6.32.41/drivers/message/fusion/mptsas.c 2011-04-17 15:56:46.000000000 -0400
31027 @@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devin
31028 return 0;
31029 }
31030
31031 +static inline void
31032 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31033 +{
31034 + if (phy_info->port_details) {
31035 + phy_info->port_details->rphy = rphy;
31036 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31037 + ioc->name, rphy));
31038 + }
31039 +
31040 + if (rphy) {
31041 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31042 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31043 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31044 + ioc->name, rphy, rphy->dev.release));
31045 + }
31046 +}
31047 +
31048 /* no mutex */
31049 static void
31050 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
31051 @@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
31052 return NULL;
31053 }
31054
31055 -static inline void
31056 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31057 -{
31058 - if (phy_info->port_details) {
31059 - phy_info->port_details->rphy = rphy;
31060 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31061 - ioc->name, rphy));
31062 - }
31063 -
31064 - if (rphy) {
31065 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31066 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31067 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31068 - ioc->name, rphy, rphy->dev.release));
31069 - }
31070 -}
31071 -
31072 static inline struct sas_port *
31073 mptsas_get_port(struct mptsas_phyinfo *phy_info)
31074 {
31075 diff -urNp linux-2.6.32.41/drivers/message/fusion/mptscsih.c linux-2.6.32.41/drivers/message/fusion/mptscsih.c
31076 --- linux-2.6.32.41/drivers/message/fusion/mptscsih.c 2011-03-27 14:31:47.000000000 -0400
31077 +++ linux-2.6.32.41/drivers/message/fusion/mptscsih.c 2011-04-17 15:56:46.000000000 -0400
31078 @@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
31079
31080 h = shost_priv(SChost);
31081
31082 - if (h) {
31083 - if (h->info_kbuf == NULL)
31084 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31085 - return h->info_kbuf;
31086 - h->info_kbuf[0] = '\0';
31087 + if (!h)
31088 + return NULL;
31089
31090 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31091 - h->info_kbuf[size-1] = '\0';
31092 - }
31093 + if (h->info_kbuf == NULL)
31094 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31095 + return h->info_kbuf;
31096 + h->info_kbuf[0] = '\0';
31097 +
31098 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31099 + h->info_kbuf[size-1] = '\0';
31100
31101 return h->info_kbuf;
31102 }
31103 diff -urNp linux-2.6.32.41/drivers/message/i2o/i2o_config.c linux-2.6.32.41/drivers/message/i2o/i2o_config.c
31104 --- linux-2.6.32.41/drivers/message/i2o/i2o_config.c 2011-03-27 14:31:47.000000000 -0400
31105 +++ linux-2.6.32.41/drivers/message/i2o/i2o_config.c 2011-05-16 21:46:57.000000000 -0400
31106 @@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned lon
31107 struct i2o_message *msg;
31108 unsigned int iop;
31109
31110 + pax_track_stack();
31111 +
31112 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
31113 return -EFAULT;
31114
31115 diff -urNp linux-2.6.32.41/drivers/message/i2o/i2o_proc.c linux-2.6.32.41/drivers/message/i2o/i2o_proc.c
31116 --- linux-2.6.32.41/drivers/message/i2o/i2o_proc.c 2011-03-27 14:31:47.000000000 -0400
31117 +++ linux-2.6.32.41/drivers/message/i2o/i2o_proc.c 2011-04-17 15:56:46.000000000 -0400
31118 @@ -259,13 +259,6 @@ static char *scsi_devices[] = {
31119 "Array Controller Device"
31120 };
31121
31122 -static char *chtostr(u8 * chars, int n)
31123 -{
31124 - char tmp[256];
31125 - tmp[0] = 0;
31126 - return strncat(tmp, (char *)chars, n);
31127 -}
31128 -
31129 static int i2o_report_query_status(struct seq_file *seq, int block_status,
31130 char *group)
31131 {
31132 @@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct
31133
31134 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
31135 seq_printf(seq, "%-#8x", ddm_table.module_id);
31136 - seq_printf(seq, "%-29s",
31137 - chtostr(ddm_table.module_name_version, 28));
31138 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
31139 seq_printf(seq, "%9d ", ddm_table.data_size);
31140 seq_printf(seq, "%8d", ddm_table.code_size);
31141
31142 @@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(s
31143
31144 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
31145 seq_printf(seq, "%-#8x", dst->module_id);
31146 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
31147 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
31148 + seq_printf(seq, "%-.28s", dst->module_name_version);
31149 + seq_printf(seq, "%-.8s", dst->date);
31150 seq_printf(seq, "%8d ", dst->module_size);
31151 seq_printf(seq, "%8d ", dst->mpb_size);
31152 seq_printf(seq, "0x%04x", dst->module_flags);
31153 @@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(str
31154 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
31155 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
31156 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
31157 - seq_printf(seq, "Vendor info : %s\n",
31158 - chtostr((u8 *) (work32 + 2), 16));
31159 - seq_printf(seq, "Product info : %s\n",
31160 - chtostr((u8 *) (work32 + 6), 16));
31161 - seq_printf(seq, "Description : %s\n",
31162 - chtostr((u8 *) (work32 + 10), 16));
31163 - seq_printf(seq, "Product rev. : %s\n",
31164 - chtostr((u8 *) (work32 + 14), 8));
31165 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
31166 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
31167 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
31168 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
31169
31170 seq_printf(seq, "Serial number : ");
31171 print_serial_number(seq, (u8 *) (work32 + 16),
31172 @@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(str
31173 }
31174
31175 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
31176 - seq_printf(seq, "Module name : %s\n",
31177 - chtostr(result.module_name, 24));
31178 - seq_printf(seq, "Module revision : %s\n",
31179 - chtostr(result.module_rev, 8));
31180 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
31181 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
31182
31183 seq_printf(seq, "Serial number : ");
31184 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
31185 @@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq
31186 return 0;
31187 }
31188
31189 - seq_printf(seq, "Device name : %s\n",
31190 - chtostr(result.device_name, 64));
31191 - seq_printf(seq, "Service name : %s\n",
31192 - chtostr(result.service_name, 64));
31193 - seq_printf(seq, "Physical name : %s\n",
31194 - chtostr(result.physical_location, 64));
31195 - seq_printf(seq, "Instance number : %s\n",
31196 - chtostr(result.instance_number, 4));
31197 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
31198 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
31199 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
31200 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
31201
31202 return 0;
31203 }
31204 diff -urNp linux-2.6.32.41/drivers/message/i2o/iop.c linux-2.6.32.41/drivers/message/i2o/iop.c
31205 --- linux-2.6.32.41/drivers/message/i2o/iop.c 2011-03-27 14:31:47.000000000 -0400
31206 +++ linux-2.6.32.41/drivers/message/i2o/iop.c 2011-05-04 17:56:28.000000000 -0400
31207 @@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
31208
31209 spin_lock_irqsave(&c->context_list_lock, flags);
31210
31211 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
31212 - atomic_inc(&c->context_list_counter);
31213 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
31214 + atomic_inc_unchecked(&c->context_list_counter);
31215
31216 - entry->context = atomic_read(&c->context_list_counter);
31217 + entry->context = atomic_read_unchecked(&c->context_list_counter);
31218
31219 list_add(&entry->list, &c->context_list);
31220
31221 @@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(voi
31222
31223 #if BITS_PER_LONG == 64
31224 spin_lock_init(&c->context_list_lock);
31225 - atomic_set(&c->context_list_counter, 0);
31226 + atomic_set_unchecked(&c->context_list_counter, 0);
31227 INIT_LIST_HEAD(&c->context_list);
31228 #endif
31229
31230 diff -urNp linux-2.6.32.41/drivers/mfd/wm8350-i2c.c linux-2.6.32.41/drivers/mfd/wm8350-i2c.c
31231 --- linux-2.6.32.41/drivers/mfd/wm8350-i2c.c 2011-03-27 14:31:47.000000000 -0400
31232 +++ linux-2.6.32.41/drivers/mfd/wm8350-i2c.c 2011-05-16 21:46:57.000000000 -0400
31233 @@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struc
31234 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
31235 int ret;
31236
31237 + pax_track_stack();
31238 +
31239 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
31240 return -EINVAL;
31241
31242 diff -urNp linux-2.6.32.41/drivers/misc/kgdbts.c linux-2.6.32.41/drivers/misc/kgdbts.c
31243 --- linux-2.6.32.41/drivers/misc/kgdbts.c 2011-03-27 14:31:47.000000000 -0400
31244 +++ linux-2.6.32.41/drivers/misc/kgdbts.c 2011-04-17 15:56:46.000000000 -0400
31245 @@ -118,7 +118,7 @@
31246 } while (0)
31247 #define MAX_CONFIG_LEN 40
31248
31249 -static struct kgdb_io kgdbts_io_ops;
31250 +static const struct kgdb_io kgdbts_io_ops;
31251 static char get_buf[BUFMAX];
31252 static int get_buf_cnt;
31253 static char put_buf[BUFMAX];
31254 @@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void
31255 module_put(THIS_MODULE);
31256 }
31257
31258 -static struct kgdb_io kgdbts_io_ops = {
31259 +static const struct kgdb_io kgdbts_io_ops = {
31260 .name = "kgdbts",
31261 .read_char = kgdbts_get_char,
31262 .write_char = kgdbts_put_char,
31263 diff -urNp linux-2.6.32.41/drivers/misc/sgi-gru/gruhandles.c linux-2.6.32.41/drivers/misc/sgi-gru/gruhandles.c
31264 --- linux-2.6.32.41/drivers/misc/sgi-gru/gruhandles.c 2011-03-27 14:31:47.000000000 -0400
31265 +++ linux-2.6.32.41/drivers/misc/sgi-gru/gruhandles.c 2011-04-17 15:56:46.000000000 -0400
31266 @@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistic
31267
31268 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
31269 {
31270 - atomic_long_inc(&mcs_op_statistics[op].count);
31271 - atomic_long_add(clks, &mcs_op_statistics[op].total);
31272 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
31273 + atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
31274 if (mcs_op_statistics[op].max < clks)
31275 mcs_op_statistics[op].max = clks;
31276 }
31277 diff -urNp linux-2.6.32.41/drivers/misc/sgi-gru/gruprocfs.c linux-2.6.32.41/drivers/misc/sgi-gru/gruprocfs.c
31278 --- linux-2.6.32.41/drivers/misc/sgi-gru/gruprocfs.c 2011-03-27 14:31:47.000000000 -0400
31279 +++ linux-2.6.32.41/drivers/misc/sgi-gru/gruprocfs.c 2011-04-17 15:56:46.000000000 -0400
31280 @@ -32,9 +32,9 @@
31281
31282 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
31283
31284 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
31285 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
31286 {
31287 - unsigned long val = atomic_long_read(v);
31288 + unsigned long val = atomic_long_read_unchecked(v);
31289
31290 if (val)
31291 seq_printf(s, "%16lu %s\n", val, id);
31292 @@ -136,8 +136,8 @@ static int mcs_statistics_show(struct se
31293 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
31294
31295 for (op = 0; op < mcsop_last; op++) {
31296 - count = atomic_long_read(&mcs_op_statistics[op].count);
31297 - total = atomic_long_read(&mcs_op_statistics[op].total);
31298 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
31299 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
31300 max = mcs_op_statistics[op].max;
31301 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
31302 count ? total / count : 0, max);
31303 diff -urNp linux-2.6.32.41/drivers/misc/sgi-gru/grutables.h linux-2.6.32.41/drivers/misc/sgi-gru/grutables.h
31304 --- linux-2.6.32.41/drivers/misc/sgi-gru/grutables.h 2011-03-27 14:31:47.000000000 -0400
31305 +++ linux-2.6.32.41/drivers/misc/sgi-gru/grutables.h 2011-04-17 15:56:46.000000000 -0400
31306 @@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
31307 * GRU statistics.
31308 */
31309 struct gru_stats_s {
31310 - atomic_long_t vdata_alloc;
31311 - atomic_long_t vdata_free;
31312 - atomic_long_t gts_alloc;
31313 - atomic_long_t gts_free;
31314 - atomic_long_t vdata_double_alloc;
31315 - atomic_long_t gts_double_allocate;
31316 - atomic_long_t assign_context;
31317 - atomic_long_t assign_context_failed;
31318 - atomic_long_t free_context;
31319 - atomic_long_t load_user_context;
31320 - atomic_long_t load_kernel_context;
31321 - atomic_long_t lock_kernel_context;
31322 - atomic_long_t unlock_kernel_context;
31323 - atomic_long_t steal_user_context;
31324 - atomic_long_t steal_kernel_context;
31325 - atomic_long_t steal_context_failed;
31326 - atomic_long_t nopfn;
31327 - atomic_long_t break_cow;
31328 - atomic_long_t asid_new;
31329 - atomic_long_t asid_next;
31330 - atomic_long_t asid_wrap;
31331 - atomic_long_t asid_reuse;
31332 - atomic_long_t intr;
31333 - atomic_long_t intr_mm_lock_failed;
31334 - atomic_long_t call_os;
31335 - atomic_long_t call_os_offnode_reference;
31336 - atomic_long_t call_os_check_for_bug;
31337 - atomic_long_t call_os_wait_queue;
31338 - atomic_long_t user_flush_tlb;
31339 - atomic_long_t user_unload_context;
31340 - atomic_long_t user_exception;
31341 - atomic_long_t set_context_option;
31342 - atomic_long_t migrate_check;
31343 - atomic_long_t migrated_retarget;
31344 - atomic_long_t migrated_unload;
31345 - atomic_long_t migrated_unload_delay;
31346 - atomic_long_t migrated_nopfn_retarget;
31347 - atomic_long_t migrated_nopfn_unload;
31348 - atomic_long_t tlb_dropin;
31349 - atomic_long_t tlb_dropin_fail_no_asid;
31350 - atomic_long_t tlb_dropin_fail_upm;
31351 - atomic_long_t tlb_dropin_fail_invalid;
31352 - atomic_long_t tlb_dropin_fail_range_active;
31353 - atomic_long_t tlb_dropin_fail_idle;
31354 - atomic_long_t tlb_dropin_fail_fmm;
31355 - atomic_long_t tlb_dropin_fail_no_exception;
31356 - atomic_long_t tlb_dropin_fail_no_exception_war;
31357 - atomic_long_t tfh_stale_on_fault;
31358 - atomic_long_t mmu_invalidate_range;
31359 - atomic_long_t mmu_invalidate_page;
31360 - atomic_long_t mmu_clear_flush_young;
31361 - atomic_long_t flush_tlb;
31362 - atomic_long_t flush_tlb_gru;
31363 - atomic_long_t flush_tlb_gru_tgh;
31364 - atomic_long_t flush_tlb_gru_zero_asid;
31365 -
31366 - atomic_long_t copy_gpa;
31367 -
31368 - atomic_long_t mesq_receive;
31369 - atomic_long_t mesq_receive_none;
31370 - atomic_long_t mesq_send;
31371 - atomic_long_t mesq_send_failed;
31372 - atomic_long_t mesq_noop;
31373 - atomic_long_t mesq_send_unexpected_error;
31374 - atomic_long_t mesq_send_lb_overflow;
31375 - atomic_long_t mesq_send_qlimit_reached;
31376 - atomic_long_t mesq_send_amo_nacked;
31377 - atomic_long_t mesq_send_put_nacked;
31378 - atomic_long_t mesq_qf_not_full;
31379 - atomic_long_t mesq_qf_locked;
31380 - atomic_long_t mesq_qf_noop_not_full;
31381 - atomic_long_t mesq_qf_switch_head_failed;
31382 - atomic_long_t mesq_qf_unexpected_error;
31383 - atomic_long_t mesq_noop_unexpected_error;
31384 - atomic_long_t mesq_noop_lb_overflow;
31385 - atomic_long_t mesq_noop_qlimit_reached;
31386 - atomic_long_t mesq_noop_amo_nacked;
31387 - atomic_long_t mesq_noop_put_nacked;
31388 + atomic_long_unchecked_t vdata_alloc;
31389 + atomic_long_unchecked_t vdata_free;
31390 + atomic_long_unchecked_t gts_alloc;
31391 + atomic_long_unchecked_t gts_free;
31392 + atomic_long_unchecked_t vdata_double_alloc;
31393 + atomic_long_unchecked_t gts_double_allocate;
31394 + atomic_long_unchecked_t assign_context;
31395 + atomic_long_unchecked_t assign_context_failed;
31396 + atomic_long_unchecked_t free_context;
31397 + atomic_long_unchecked_t load_user_context;
31398 + atomic_long_unchecked_t load_kernel_context;
31399 + atomic_long_unchecked_t lock_kernel_context;
31400 + atomic_long_unchecked_t unlock_kernel_context;
31401 + atomic_long_unchecked_t steal_user_context;
31402 + atomic_long_unchecked_t steal_kernel_context;
31403 + atomic_long_unchecked_t steal_context_failed;
31404 + atomic_long_unchecked_t nopfn;
31405 + atomic_long_unchecked_t break_cow;
31406 + atomic_long_unchecked_t asid_new;
31407 + atomic_long_unchecked_t asid_next;
31408 + atomic_long_unchecked_t asid_wrap;
31409 + atomic_long_unchecked_t asid_reuse;
31410 + atomic_long_unchecked_t intr;
31411 + atomic_long_unchecked_t intr_mm_lock_failed;
31412 + atomic_long_unchecked_t call_os;
31413 + atomic_long_unchecked_t call_os_offnode_reference;
31414 + atomic_long_unchecked_t call_os_check_for_bug;
31415 + atomic_long_unchecked_t call_os_wait_queue;
31416 + atomic_long_unchecked_t user_flush_tlb;
31417 + atomic_long_unchecked_t user_unload_context;
31418 + atomic_long_unchecked_t user_exception;
31419 + atomic_long_unchecked_t set_context_option;
31420 + atomic_long_unchecked_t migrate_check;
31421 + atomic_long_unchecked_t migrated_retarget;
31422 + atomic_long_unchecked_t migrated_unload;
31423 + atomic_long_unchecked_t migrated_unload_delay;
31424 + atomic_long_unchecked_t migrated_nopfn_retarget;
31425 + atomic_long_unchecked_t migrated_nopfn_unload;
31426 + atomic_long_unchecked_t tlb_dropin;
31427 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
31428 + atomic_long_unchecked_t tlb_dropin_fail_upm;
31429 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
31430 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
31431 + atomic_long_unchecked_t tlb_dropin_fail_idle;
31432 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
31433 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
31434 + atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
31435 + atomic_long_unchecked_t tfh_stale_on_fault;
31436 + atomic_long_unchecked_t mmu_invalidate_range;
31437 + atomic_long_unchecked_t mmu_invalidate_page;
31438 + atomic_long_unchecked_t mmu_clear_flush_young;
31439 + atomic_long_unchecked_t flush_tlb;
31440 + atomic_long_unchecked_t flush_tlb_gru;
31441 + atomic_long_unchecked_t flush_tlb_gru_tgh;
31442 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
31443 +
31444 + atomic_long_unchecked_t copy_gpa;
31445 +
31446 + atomic_long_unchecked_t mesq_receive;
31447 + atomic_long_unchecked_t mesq_receive_none;
31448 + atomic_long_unchecked_t mesq_send;
31449 + atomic_long_unchecked_t mesq_send_failed;
31450 + atomic_long_unchecked_t mesq_noop;
31451 + atomic_long_unchecked_t mesq_send_unexpected_error;
31452 + atomic_long_unchecked_t mesq_send_lb_overflow;
31453 + atomic_long_unchecked_t mesq_send_qlimit_reached;
31454 + atomic_long_unchecked_t mesq_send_amo_nacked;
31455 + atomic_long_unchecked_t mesq_send_put_nacked;
31456 + atomic_long_unchecked_t mesq_qf_not_full;
31457 + atomic_long_unchecked_t mesq_qf_locked;
31458 + atomic_long_unchecked_t mesq_qf_noop_not_full;
31459 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
31460 + atomic_long_unchecked_t mesq_qf_unexpected_error;
31461 + atomic_long_unchecked_t mesq_noop_unexpected_error;
31462 + atomic_long_unchecked_t mesq_noop_lb_overflow;
31463 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
31464 + atomic_long_unchecked_t mesq_noop_amo_nacked;
31465 + atomic_long_unchecked_t mesq_noop_put_nacked;
31466
31467 };
31468
31469 @@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start
31470 cchop_deallocate, tghop_invalidate, mcsop_last};
31471
31472 struct mcs_op_statistic {
31473 - atomic_long_t count;
31474 - atomic_long_t total;
31475 + atomic_long_unchecked_t count;
31476 + atomic_long_unchecked_t total;
31477 unsigned long max;
31478 };
31479
31480 @@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_st
31481
31482 #define STAT(id) do { \
31483 if (gru_options & OPT_STATS) \
31484 - atomic_long_inc(&gru_stats.id); \
31485 + atomic_long_inc_unchecked(&gru_stats.id); \
31486 } while (0)
31487
31488 #ifdef CONFIG_SGI_GRU_DEBUG
31489 diff -urNp linux-2.6.32.41/drivers/mtd/chips/cfi_cmdset_0001.c linux-2.6.32.41/drivers/mtd/chips/cfi_cmdset_0001.c
31490 --- linux-2.6.32.41/drivers/mtd/chips/cfi_cmdset_0001.c 2011-03-27 14:31:47.000000000 -0400
31491 +++ linux-2.6.32.41/drivers/mtd/chips/cfi_cmdset_0001.c 2011-05-16 21:46:57.000000000 -0400
31492 @@ -743,6 +743,8 @@ static int chip_ready (struct map_info *
31493 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
31494 unsigned long timeo = jiffies + HZ;
31495
31496 + pax_track_stack();
31497 +
31498 /* Prevent setting state FL_SYNCING for chip in suspended state. */
31499 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
31500 goto sleep;
31501 @@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(stru
31502 unsigned long initial_adr;
31503 int initial_len = len;
31504
31505 + pax_track_stack();
31506 +
31507 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
31508 adr += chip->start;
31509 initial_adr = adr;
31510 @@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(st
31511 int retries = 3;
31512 int ret;
31513
31514 + pax_track_stack();
31515 +
31516 adr += chip->start;
31517
31518 retry:
31519 diff -urNp linux-2.6.32.41/drivers/mtd/chips/cfi_cmdset_0020.c linux-2.6.32.41/drivers/mtd/chips/cfi_cmdset_0020.c
31520 --- linux-2.6.32.41/drivers/mtd/chips/cfi_cmdset_0020.c 2011-03-27 14:31:47.000000000 -0400
31521 +++ linux-2.6.32.41/drivers/mtd/chips/cfi_cmdset_0020.c 2011-05-16 21:46:57.000000000 -0400
31522 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
31523 unsigned long cmd_addr;
31524 struct cfi_private *cfi = map->fldrv_priv;
31525
31526 + pax_track_stack();
31527 +
31528 adr += chip->start;
31529
31530 /* Ensure cmd read/writes are aligned. */
31531 @@ -428,6 +430,8 @@ static inline int do_write_buffer(struct
31532 DECLARE_WAITQUEUE(wait, current);
31533 int wbufsize, z;
31534
31535 + pax_track_stack();
31536 +
31537 /* M58LW064A requires bus alignment for buffer wriets -- saw */
31538 if (adr & (map_bankwidth(map)-1))
31539 return -EINVAL;
31540 @@ -742,6 +746,8 @@ static inline int do_erase_oneblock(stru
31541 DECLARE_WAITQUEUE(wait, current);
31542 int ret = 0;
31543
31544 + pax_track_stack();
31545 +
31546 adr += chip->start;
31547
31548 /* Let's determine this according to the interleave only once */
31549 @@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struc
31550 unsigned long timeo = jiffies + HZ;
31551 DECLARE_WAITQUEUE(wait, current);
31552
31553 + pax_track_stack();
31554 +
31555 adr += chip->start;
31556
31557 /* Let's determine this according to the interleave only once */
31558 @@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(str
31559 unsigned long timeo = jiffies + HZ;
31560 DECLARE_WAITQUEUE(wait, current);
31561
31562 + pax_track_stack();
31563 +
31564 adr += chip->start;
31565
31566 /* Let's determine this according to the interleave only once */
31567 diff -urNp linux-2.6.32.41/drivers/mtd/devices/doc2000.c linux-2.6.32.41/drivers/mtd/devices/doc2000.c
31568 --- linux-2.6.32.41/drivers/mtd/devices/doc2000.c 2011-03-27 14:31:47.000000000 -0400
31569 +++ linux-2.6.32.41/drivers/mtd/devices/doc2000.c 2011-04-17 15:56:46.000000000 -0400
31570 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
31571
31572 /* The ECC will not be calculated correctly if less than 512 is written */
31573 /* DBB-
31574 - if (len != 0x200 && eccbuf)
31575 + if (len != 0x200)
31576 printk(KERN_WARNING
31577 "ECC needs a full sector write (adr: %lx size %lx)\n",
31578 (long) to, (long) len);
31579 diff -urNp linux-2.6.32.41/drivers/mtd/devices/doc2001.c linux-2.6.32.41/drivers/mtd/devices/doc2001.c
31580 --- linux-2.6.32.41/drivers/mtd/devices/doc2001.c 2011-03-27 14:31:47.000000000 -0400
31581 +++ linux-2.6.32.41/drivers/mtd/devices/doc2001.c 2011-04-17 15:56:46.000000000 -0400
31582 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
31583 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
31584
31585 /* Don't allow read past end of device */
31586 - if (from >= this->totlen)
31587 + if (from >= this->totlen || !len)
31588 return -EINVAL;
31589
31590 /* Don't allow a single read to cross a 512-byte block boundary */
31591 diff -urNp linux-2.6.32.41/drivers/mtd/ftl.c linux-2.6.32.41/drivers/mtd/ftl.c
31592 --- linux-2.6.32.41/drivers/mtd/ftl.c 2011-03-27 14:31:47.000000000 -0400
31593 +++ linux-2.6.32.41/drivers/mtd/ftl.c 2011-05-16 21:46:57.000000000 -0400
31594 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
31595 loff_t offset;
31596 uint16_t srcunitswap = cpu_to_le16(srcunit);
31597
31598 + pax_track_stack();
31599 +
31600 eun = &part->EUNInfo[srcunit];
31601 xfer = &part->XferInfo[xferunit];
31602 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
31603 diff -urNp linux-2.6.32.41/drivers/mtd/inftlcore.c linux-2.6.32.41/drivers/mtd/inftlcore.c
31604 --- linux-2.6.32.41/drivers/mtd/inftlcore.c 2011-03-27 14:31:47.000000000 -0400
31605 +++ linux-2.6.32.41/drivers/mtd/inftlcore.c 2011-05-16 21:46:57.000000000 -0400
31606 @@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLr
31607 struct inftl_oob oob;
31608 size_t retlen;
31609
31610 + pax_track_stack();
31611 +
31612 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
31613 "pending=%d)\n", inftl, thisVUC, pendingblock);
31614
31615 diff -urNp linux-2.6.32.41/drivers/mtd/inftlmount.c linux-2.6.32.41/drivers/mtd/inftlmount.c
31616 --- linux-2.6.32.41/drivers/mtd/inftlmount.c 2011-03-27 14:31:47.000000000 -0400
31617 +++ linux-2.6.32.41/drivers/mtd/inftlmount.c 2011-05-16 21:46:57.000000000 -0400
31618 @@ -54,6 +54,8 @@ static int find_boot_record(struct INFTL
31619 struct INFTLPartition *ip;
31620 size_t retlen;
31621
31622 + pax_track_stack();
31623 +
31624 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
31625
31626 /*
31627 diff -urNp linux-2.6.32.41/drivers/mtd/lpddr/qinfo_probe.c linux-2.6.32.41/drivers/mtd/lpddr/qinfo_probe.c
31628 --- linux-2.6.32.41/drivers/mtd/lpddr/qinfo_probe.c 2011-03-27 14:31:47.000000000 -0400
31629 +++ linux-2.6.32.41/drivers/mtd/lpddr/qinfo_probe.c 2011-05-16 21:46:57.000000000 -0400
31630 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
31631 {
31632 map_word pfow_val[4];
31633
31634 + pax_track_stack();
31635 +
31636 /* Check identification string */
31637 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
31638 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
31639 diff -urNp linux-2.6.32.41/drivers/mtd/mtdchar.c linux-2.6.32.41/drivers/mtd/mtdchar.c
31640 --- linux-2.6.32.41/drivers/mtd/mtdchar.c 2011-03-27 14:31:47.000000000 -0400
31641 +++ linux-2.6.32.41/drivers/mtd/mtdchar.c 2011-05-16 21:46:57.000000000 -0400
31642 @@ -460,6 +460,8 @@ static int mtd_ioctl(struct inode *inode
31643 u_long size;
31644 struct mtd_info_user info;
31645
31646 + pax_track_stack();
31647 +
31648 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
31649
31650 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
31651 diff -urNp linux-2.6.32.41/drivers/mtd/nftlcore.c linux-2.6.32.41/drivers/mtd/nftlcore.c
31652 --- linux-2.6.32.41/drivers/mtd/nftlcore.c 2011-03-27 14:31:47.000000000 -0400
31653 +++ linux-2.6.32.41/drivers/mtd/nftlcore.c 2011-05-16 21:46:57.000000000 -0400
31654 @@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLre
31655 int inplace = 1;
31656 size_t retlen;
31657
31658 + pax_track_stack();
31659 +
31660 memset(BlockMap, 0xff, sizeof(BlockMap));
31661 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
31662
31663 diff -urNp linux-2.6.32.41/drivers/mtd/nftlmount.c linux-2.6.32.41/drivers/mtd/nftlmount.c
31664 --- linux-2.6.32.41/drivers/mtd/nftlmount.c 2011-03-27 14:31:47.000000000 -0400
31665 +++ linux-2.6.32.41/drivers/mtd/nftlmount.c 2011-05-18 20:09:37.000000000 -0400
31666 @@ -23,6 +23,7 @@
31667 #include <asm/errno.h>
31668 #include <linux/delay.h>
31669 #include <linux/slab.h>
31670 +#include <linux/sched.h>
31671 #include <linux/mtd/mtd.h>
31672 #include <linux/mtd/nand.h>
31673 #include <linux/mtd/nftl.h>
31674 @@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLr
31675 struct mtd_info *mtd = nftl->mbd.mtd;
31676 unsigned int i;
31677
31678 + pax_track_stack();
31679 +
31680 /* Assume logical EraseSize == physical erasesize for starting the scan.
31681 We'll sort it out later if we find a MediaHeader which says otherwise */
31682 /* Actually, we won't. The new DiskOnChip driver has already scanned
31683 diff -urNp linux-2.6.32.41/drivers/mtd/ubi/build.c linux-2.6.32.41/drivers/mtd/ubi/build.c
31684 --- linux-2.6.32.41/drivers/mtd/ubi/build.c 2011-03-27 14:31:47.000000000 -0400
31685 +++ linux-2.6.32.41/drivers/mtd/ubi/build.c 2011-04-17 15:56:46.000000000 -0400
31686 @@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
31687 static int __init bytes_str_to_int(const char *str)
31688 {
31689 char *endp;
31690 - unsigned long result;
31691 + unsigned long result, scale = 1;
31692
31693 result = simple_strtoul(str, &endp, 0);
31694 if (str == endp || result >= INT_MAX) {
31695 @@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const
31696
31697 switch (*endp) {
31698 case 'G':
31699 - result *= 1024;
31700 + scale *= 1024;
31701 case 'M':
31702 - result *= 1024;
31703 + scale *= 1024;
31704 case 'K':
31705 - result *= 1024;
31706 + scale *= 1024;
31707 if (endp[1] == 'i' && endp[2] == 'B')
31708 endp += 2;
31709 case '\0':
31710 @@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const
31711 return -EINVAL;
31712 }
31713
31714 - return result;
31715 + if ((intoverflow_t)result*scale >= INT_MAX) {
31716 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
31717 + str);
31718 + return -EINVAL;
31719 + }
31720 +
31721 + return result*scale;
31722 }
31723
31724 /**
31725 diff -urNp linux-2.6.32.41/drivers/net/bnx2.c linux-2.6.32.41/drivers/net/bnx2.c
31726 --- linux-2.6.32.41/drivers/net/bnx2.c 2011-03-27 14:31:47.000000000 -0400
31727 +++ linux-2.6.32.41/drivers/net/bnx2.c 2011-05-16 21:46:57.000000000 -0400
31728 @@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
31729 int rc = 0;
31730 u32 magic, csum;
31731
31732 + pax_track_stack();
31733 +
31734 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
31735 goto test_nvram_done;
31736
31737 diff -urNp linux-2.6.32.41/drivers/net/cxgb3/t3_hw.c linux-2.6.32.41/drivers/net/cxgb3/t3_hw.c
31738 --- linux-2.6.32.41/drivers/net/cxgb3/t3_hw.c 2011-03-27 14:31:47.000000000 -0400
31739 +++ linux-2.6.32.41/drivers/net/cxgb3/t3_hw.c 2011-05-16 21:46:57.000000000 -0400
31740 @@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter
31741 int i, addr, ret;
31742 struct t3_vpd vpd;
31743
31744 + pax_track_stack();
31745 +
31746 /*
31747 * Card information is normally at VPD_BASE but some early cards had
31748 * it at 0.
31749 diff -urNp linux-2.6.32.41/drivers/net/e1000e/82571.c linux-2.6.32.41/drivers/net/e1000e/82571.c
31750 --- linux-2.6.32.41/drivers/net/e1000e/82571.c 2011-03-27 14:31:47.000000000 -0400
31751 +++ linux-2.6.32.41/drivers/net/e1000e/82571.c 2011-04-17 15:56:46.000000000 -0400
31752 @@ -212,6 +212,7 @@ static s32 e1000_init_mac_params_82571(s
31753 {
31754 struct e1000_hw *hw = &adapter->hw;
31755 struct e1000_mac_info *mac = &hw->mac;
31756 + /* cannot be const */
31757 struct e1000_mac_operations *func = &mac->ops;
31758 u32 swsm = 0;
31759 u32 swsm2 = 0;
31760 @@ -1656,7 +1657,7 @@ static void e1000_clear_hw_cntrs_82571(s
31761 temp = er32(ICRXDMTC);
31762 }
31763
31764 -static struct e1000_mac_operations e82571_mac_ops = {
31765 +static const struct e1000_mac_operations e82571_mac_ops = {
31766 /* .check_mng_mode: mac type dependent */
31767 /* .check_for_link: media type dependent */
31768 .id_led_init = e1000e_id_led_init,
31769 @@ -1674,7 +1675,7 @@ static struct e1000_mac_operations e8257
31770 .setup_led = e1000e_setup_led_generic,
31771 };
31772
31773 -static struct e1000_phy_operations e82_phy_ops_igp = {
31774 +static const struct e1000_phy_operations e82_phy_ops_igp = {
31775 .acquire_phy = e1000_get_hw_semaphore_82571,
31776 .check_reset_block = e1000e_check_reset_block_generic,
31777 .commit_phy = NULL,
31778 @@ -1691,7 +1692,7 @@ static struct e1000_phy_operations e82_p
31779 .cfg_on_link_up = NULL,
31780 };
31781
31782 -static struct e1000_phy_operations e82_phy_ops_m88 = {
31783 +static const struct e1000_phy_operations e82_phy_ops_m88 = {
31784 .acquire_phy = e1000_get_hw_semaphore_82571,
31785 .check_reset_block = e1000e_check_reset_block_generic,
31786 .commit_phy = e1000e_phy_sw_reset,
31787 @@ -1708,7 +1709,7 @@ static struct e1000_phy_operations e82_p
31788 .cfg_on_link_up = NULL,
31789 };
31790
31791 -static struct e1000_phy_operations e82_phy_ops_bm = {
31792 +static const struct e1000_phy_operations e82_phy_ops_bm = {
31793 .acquire_phy = e1000_get_hw_semaphore_82571,
31794 .check_reset_block = e1000e_check_reset_block_generic,
31795 .commit_phy = e1000e_phy_sw_reset,
31796 @@ -1725,7 +1726,7 @@ static struct e1000_phy_operations e82_p
31797 .cfg_on_link_up = NULL,
31798 };
31799
31800 -static struct e1000_nvm_operations e82571_nvm_ops = {
31801 +static const struct e1000_nvm_operations e82571_nvm_ops = {
31802 .acquire_nvm = e1000_acquire_nvm_82571,
31803 .read_nvm = e1000e_read_nvm_eerd,
31804 .release_nvm = e1000_release_nvm_82571,
31805 diff -urNp linux-2.6.32.41/drivers/net/e1000e/e1000.h linux-2.6.32.41/drivers/net/e1000e/e1000.h
31806 --- linux-2.6.32.41/drivers/net/e1000e/e1000.h 2011-03-27 14:31:47.000000000 -0400
31807 +++ linux-2.6.32.41/drivers/net/e1000e/e1000.h 2011-04-17 15:56:46.000000000 -0400
31808 @@ -375,9 +375,9 @@ struct e1000_info {
31809 u32 pba;
31810 u32 max_hw_frame_size;
31811 s32 (*get_variants)(struct e1000_adapter *);
31812 - struct e1000_mac_operations *mac_ops;
31813 - struct e1000_phy_operations *phy_ops;
31814 - struct e1000_nvm_operations *nvm_ops;
31815 + const struct e1000_mac_operations *mac_ops;
31816 + const struct e1000_phy_operations *phy_ops;
31817 + const struct e1000_nvm_operations *nvm_ops;
31818 };
31819
31820 /* hardware capability, feature, and workaround flags */
31821 diff -urNp linux-2.6.32.41/drivers/net/e1000e/es2lan.c linux-2.6.32.41/drivers/net/e1000e/es2lan.c
31822 --- linux-2.6.32.41/drivers/net/e1000e/es2lan.c 2011-03-27 14:31:47.000000000 -0400
31823 +++ linux-2.6.32.41/drivers/net/e1000e/es2lan.c 2011-04-17 15:56:46.000000000 -0400
31824 @@ -207,6 +207,7 @@ static s32 e1000_init_mac_params_80003es
31825 {
31826 struct e1000_hw *hw = &adapter->hw;
31827 struct e1000_mac_info *mac = &hw->mac;
31828 + /* cannot be const */
31829 struct e1000_mac_operations *func = &mac->ops;
31830
31831 /* Set media type */
31832 @@ -1365,7 +1366,7 @@ static void e1000_clear_hw_cntrs_80003es
31833 temp = er32(ICRXDMTC);
31834 }
31835
31836 -static struct e1000_mac_operations es2_mac_ops = {
31837 +static const struct e1000_mac_operations es2_mac_ops = {
31838 .id_led_init = e1000e_id_led_init,
31839 .check_mng_mode = e1000e_check_mng_mode_generic,
31840 /* check_for_link dependent on media type */
31841 @@ -1383,7 +1384,7 @@ static struct e1000_mac_operations es2_m
31842 .setup_led = e1000e_setup_led_generic,
31843 };
31844
31845 -static struct e1000_phy_operations es2_phy_ops = {
31846 +static const struct e1000_phy_operations es2_phy_ops = {
31847 .acquire_phy = e1000_acquire_phy_80003es2lan,
31848 .check_reset_block = e1000e_check_reset_block_generic,
31849 .commit_phy = e1000e_phy_sw_reset,
31850 @@ -1400,7 +1401,7 @@ static struct e1000_phy_operations es2_p
31851 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
31852 };
31853
31854 -static struct e1000_nvm_operations es2_nvm_ops = {
31855 +static const struct e1000_nvm_operations es2_nvm_ops = {
31856 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
31857 .read_nvm = e1000e_read_nvm_eerd,
31858 .release_nvm = e1000_release_nvm_80003es2lan,
31859 diff -urNp linux-2.6.32.41/drivers/net/e1000e/hw.h linux-2.6.32.41/drivers/net/e1000e/hw.h
31860 --- linux-2.6.32.41/drivers/net/e1000e/hw.h 2011-03-27 14:31:47.000000000 -0400
31861 +++ linux-2.6.32.41/drivers/net/e1000e/hw.h 2011-04-17 15:56:46.000000000 -0400
31862 @@ -756,34 +756,34 @@ struct e1000_mac_operations {
31863
31864 /* Function pointers for the PHY. */
31865 struct e1000_phy_operations {
31866 - s32 (*acquire_phy)(struct e1000_hw *);
31867 - s32 (*check_polarity)(struct e1000_hw *);
31868 - s32 (*check_reset_block)(struct e1000_hw *);
31869 - s32 (*commit_phy)(struct e1000_hw *);
31870 - s32 (*force_speed_duplex)(struct e1000_hw *);
31871 - s32 (*get_cfg_done)(struct e1000_hw *hw);
31872 - s32 (*get_cable_length)(struct e1000_hw *);
31873 - s32 (*get_phy_info)(struct e1000_hw *);
31874 - s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *);
31875 - s32 (*read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
31876 - void (*release_phy)(struct e1000_hw *);
31877 - s32 (*reset_phy)(struct e1000_hw *);
31878 - s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
31879 - s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
31880 - s32 (*write_phy_reg)(struct e1000_hw *, u32, u16);
31881 - s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
31882 - s32 (*cfg_on_link_up)(struct e1000_hw *);
31883 + s32 (* acquire_phy)(struct e1000_hw *);
31884 + s32 (* check_polarity)(struct e1000_hw *);
31885 + s32 (* check_reset_block)(struct e1000_hw *);
31886 + s32 (* commit_phy)(struct e1000_hw *);
31887 + s32 (* force_speed_duplex)(struct e1000_hw *);
31888 + s32 (* get_cfg_done)(struct e1000_hw *hw);
31889 + s32 (* get_cable_length)(struct e1000_hw *);
31890 + s32 (* get_phy_info)(struct e1000_hw *);
31891 + s32 (* read_phy_reg)(struct e1000_hw *, u32, u16 *);
31892 + s32 (* read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
31893 + void (* release_phy)(struct e1000_hw *);
31894 + s32 (* reset_phy)(struct e1000_hw *);
31895 + s32 (* set_d0_lplu_state)(struct e1000_hw *, bool);
31896 + s32 (* set_d3_lplu_state)(struct e1000_hw *, bool);
31897 + s32 (* write_phy_reg)(struct e1000_hw *, u32, u16);
31898 + s32 (* write_phy_reg_locked)(struct e1000_hw *, u32, u16);
31899 + s32 (* cfg_on_link_up)(struct e1000_hw *);
31900 };
31901
31902 /* Function pointers for the NVM. */
31903 struct e1000_nvm_operations {
31904 - s32 (*acquire_nvm)(struct e1000_hw *);
31905 - s32 (*read_nvm)(struct e1000_hw *, u16, u16, u16 *);
31906 - void (*release_nvm)(struct e1000_hw *);
31907 - s32 (*update_nvm)(struct e1000_hw *);
31908 - s32 (*valid_led_default)(struct e1000_hw *, u16 *);
31909 - s32 (*validate_nvm)(struct e1000_hw *);
31910 - s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
31911 + s32 (* const acquire_nvm)(struct e1000_hw *);
31912 + s32 (* const read_nvm)(struct e1000_hw *, u16, u16, u16 *);
31913 + void (* const release_nvm)(struct e1000_hw *);
31914 + s32 (* const update_nvm)(struct e1000_hw *);
31915 + s32 (* const valid_led_default)(struct e1000_hw *, u16 *);
31916 + s32 (* const validate_nvm)(struct e1000_hw *);
31917 + s32 (* const write_nvm)(struct e1000_hw *, u16, u16, u16 *);
31918 };
31919
31920 struct e1000_mac_info {
31921 diff -urNp linux-2.6.32.41/drivers/net/e1000e/ich8lan.c linux-2.6.32.41/drivers/net/e1000e/ich8lan.c
31922 --- linux-2.6.32.41/drivers/net/e1000e/ich8lan.c 2011-05-10 22:12:01.000000000 -0400
31923 +++ linux-2.6.32.41/drivers/net/e1000e/ich8lan.c 2011-05-10 22:12:32.000000000 -0400
31924 @@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan
31925 }
31926 }
31927
31928 -static struct e1000_mac_operations ich8_mac_ops = {
31929 +static const struct e1000_mac_operations ich8_mac_ops = {
31930 .id_led_init = e1000e_id_led_init,
31931 .check_mng_mode = e1000_check_mng_mode_ich8lan,
31932 .check_for_link = e1000_check_for_copper_link_ich8lan,
31933 @@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_
31934 /* id_led_init dependent on mac type */
31935 };
31936
31937 -static struct e1000_phy_operations ich8_phy_ops = {
31938 +static const struct e1000_phy_operations ich8_phy_ops = {
31939 .acquire_phy = e1000_acquire_swflag_ich8lan,
31940 .check_reset_block = e1000_check_reset_block_ich8lan,
31941 .commit_phy = NULL,
31942 @@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_
31943 .write_phy_reg = e1000e_write_phy_reg_igp,
31944 };
31945
31946 -static struct e1000_nvm_operations ich8_nvm_ops = {
31947 +static const struct e1000_nvm_operations ich8_nvm_ops = {
31948 .acquire_nvm = e1000_acquire_nvm_ich8lan,
31949 .read_nvm = e1000_read_nvm_ich8lan,
31950 .release_nvm = e1000_release_nvm_ich8lan,
31951 diff -urNp linux-2.6.32.41/drivers/net/hamradio/6pack.c linux-2.6.32.41/drivers/net/hamradio/6pack.c
31952 --- linux-2.6.32.41/drivers/net/hamradio/6pack.c 2011-03-27 14:31:47.000000000 -0400
31953 +++ linux-2.6.32.41/drivers/net/hamradio/6pack.c 2011-05-16 21:46:57.000000000 -0400
31954 @@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct t
31955 unsigned char buf[512];
31956 int count1;
31957
31958 + pax_track_stack();
31959 +
31960 if (!count)
31961 return;
31962
31963 diff -urNp linux-2.6.32.41/drivers/net/ibmveth.c linux-2.6.32.41/drivers/net/ibmveth.c
31964 --- linux-2.6.32.41/drivers/net/ibmveth.c 2011-03-27 14:31:47.000000000 -0400
31965 +++ linux-2.6.32.41/drivers/net/ibmveth.c 2011-04-17 15:56:46.000000000 -0400
31966 @@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attr
31967 NULL,
31968 };
31969
31970 -static struct sysfs_ops veth_pool_ops = {
31971 +static const struct sysfs_ops veth_pool_ops = {
31972 .show = veth_pool_show,
31973 .store = veth_pool_store,
31974 };
31975 diff -urNp linux-2.6.32.41/drivers/net/igb/e1000_82575.c linux-2.6.32.41/drivers/net/igb/e1000_82575.c
31976 --- linux-2.6.32.41/drivers/net/igb/e1000_82575.c 2011-03-27 14:31:47.000000000 -0400
31977 +++ linux-2.6.32.41/drivers/net/igb/e1000_82575.c 2011-04-17 15:56:46.000000000 -0400
31978 @@ -1410,7 +1410,7 @@ void igb_vmdq_set_replication_pf(struct
31979 wr32(E1000_VT_CTL, vt_ctl);
31980 }
31981
31982 -static struct e1000_mac_operations e1000_mac_ops_82575 = {
31983 +static const struct e1000_mac_operations e1000_mac_ops_82575 = {
31984 .reset_hw = igb_reset_hw_82575,
31985 .init_hw = igb_init_hw_82575,
31986 .check_for_link = igb_check_for_link_82575,
31987 @@ -1419,13 +1419,13 @@ static struct e1000_mac_operations e1000
31988 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
31989 };
31990
31991 -static struct e1000_phy_operations e1000_phy_ops_82575 = {
31992 +static const struct e1000_phy_operations e1000_phy_ops_82575 = {
31993 .acquire = igb_acquire_phy_82575,
31994 .get_cfg_done = igb_get_cfg_done_82575,
31995 .release = igb_release_phy_82575,
31996 };
31997
31998 -static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
31999 +static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
32000 .acquire = igb_acquire_nvm_82575,
32001 .read = igb_read_nvm_eerd,
32002 .release = igb_release_nvm_82575,
32003 diff -urNp linux-2.6.32.41/drivers/net/igb/e1000_hw.h linux-2.6.32.41/drivers/net/igb/e1000_hw.h
32004 --- linux-2.6.32.41/drivers/net/igb/e1000_hw.h 2011-03-27 14:31:47.000000000 -0400
32005 +++ linux-2.6.32.41/drivers/net/igb/e1000_hw.h 2011-04-17 15:56:46.000000000 -0400
32006 @@ -305,17 +305,17 @@ struct e1000_phy_operations {
32007 };
32008
32009 struct e1000_nvm_operations {
32010 - s32 (*acquire)(struct e1000_hw *);
32011 - s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
32012 - void (*release)(struct e1000_hw *);
32013 - s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
32014 + s32 (* const acquire)(struct e1000_hw *);
32015 + s32 (* const read)(struct e1000_hw *, u16, u16, u16 *);
32016 + void (* const release)(struct e1000_hw *);
32017 + s32 (* const write)(struct e1000_hw *, u16, u16, u16 *);
32018 };
32019
32020 struct e1000_info {
32021 s32 (*get_invariants)(struct e1000_hw *);
32022 - struct e1000_mac_operations *mac_ops;
32023 - struct e1000_phy_operations *phy_ops;
32024 - struct e1000_nvm_operations *nvm_ops;
32025 + const struct e1000_mac_operations *mac_ops;
32026 + const struct e1000_phy_operations *phy_ops;
32027 + const struct e1000_nvm_operations *nvm_ops;
32028 };
32029
32030 extern const struct e1000_info e1000_82575_info;
32031 diff -urNp linux-2.6.32.41/drivers/net/iseries_veth.c linux-2.6.32.41/drivers/net/iseries_veth.c
32032 --- linux-2.6.32.41/drivers/net/iseries_veth.c 2011-03-27 14:31:47.000000000 -0400
32033 +++ linux-2.6.32.41/drivers/net/iseries_veth.c 2011-04-17 15:56:46.000000000 -0400
32034 @@ -384,7 +384,7 @@ static struct attribute *veth_cnx_defaul
32035 NULL
32036 };
32037
32038 -static struct sysfs_ops veth_cnx_sysfs_ops = {
32039 +static const struct sysfs_ops veth_cnx_sysfs_ops = {
32040 .show = veth_cnx_attribute_show
32041 };
32042
32043 @@ -441,7 +441,7 @@ static struct attribute *veth_port_defau
32044 NULL
32045 };
32046
32047 -static struct sysfs_ops veth_port_sysfs_ops = {
32048 +static const struct sysfs_ops veth_port_sysfs_ops = {
32049 .show = veth_port_attribute_show
32050 };
32051
32052 diff -urNp linux-2.6.32.41/drivers/net/ixgb/ixgb_main.c linux-2.6.32.41/drivers/net/ixgb/ixgb_main.c
32053 --- linux-2.6.32.41/drivers/net/ixgb/ixgb_main.c 2011-03-27 14:31:47.000000000 -0400
32054 +++ linux-2.6.32.41/drivers/net/ixgb/ixgb_main.c 2011-05-16 21:46:57.000000000 -0400
32055 @@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev
32056 u32 rctl;
32057 int i;
32058
32059 + pax_track_stack();
32060 +
32061 /* Check for Promiscuous and All Multicast modes */
32062
32063 rctl = IXGB_READ_REG(hw, RCTL);
32064 diff -urNp linux-2.6.32.41/drivers/net/ixgb/ixgb_param.c linux-2.6.32.41/drivers/net/ixgb/ixgb_param.c
32065 --- linux-2.6.32.41/drivers/net/ixgb/ixgb_param.c 2011-03-27 14:31:47.000000000 -0400
32066 +++ linux-2.6.32.41/drivers/net/ixgb/ixgb_param.c 2011-05-16 21:46:57.000000000 -0400
32067 @@ -260,6 +260,9 @@ void __devinit
32068 ixgb_check_options(struct ixgb_adapter *adapter)
32069 {
32070 int bd = adapter->bd_number;
32071 +
32072 + pax_track_stack();
32073 +
32074 if (bd >= IXGB_MAX_NIC) {
32075 printk(KERN_NOTICE
32076 "Warning: no configuration for board #%i\n", bd);
32077 diff -urNp linux-2.6.32.41/drivers/net/mlx4/main.c linux-2.6.32.41/drivers/net/mlx4/main.c
32078 --- linux-2.6.32.41/drivers/net/mlx4/main.c 2011-03-27 14:31:47.000000000 -0400
32079 +++ linux-2.6.32.41/drivers/net/mlx4/main.c 2011-05-18 20:09:37.000000000 -0400
32080 @@ -38,6 +38,7 @@
32081 #include <linux/errno.h>
32082 #include <linux/pci.h>
32083 #include <linux/dma-mapping.h>
32084 +#include <linux/sched.h>
32085
32086 #include <linux/mlx4/device.h>
32087 #include <linux/mlx4/doorbell.h>
32088 @@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev
32089 u64 icm_size;
32090 int err;
32091
32092 + pax_track_stack();
32093 +
32094 err = mlx4_QUERY_FW(dev);
32095 if (err) {
32096 if (err == -EACCES)
32097 diff -urNp linux-2.6.32.41/drivers/net/niu.c linux-2.6.32.41/drivers/net/niu.c
32098 --- linux-2.6.32.41/drivers/net/niu.c 2011-05-10 22:12:01.000000000 -0400
32099 +++ linux-2.6.32.41/drivers/net/niu.c 2011-05-16 21:46:57.000000000 -0400
32100 @@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struc
32101 int i, num_irqs, err;
32102 u8 first_ldg;
32103
32104 + pax_track_stack();
32105 +
32106 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
32107 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
32108 ldg_num_map[i] = first_ldg + i;
32109 diff -urNp linux-2.6.32.41/drivers/net/pcnet32.c linux-2.6.32.41/drivers/net/pcnet32.c
32110 --- linux-2.6.32.41/drivers/net/pcnet32.c 2011-03-27 14:31:47.000000000 -0400
32111 +++ linux-2.6.32.41/drivers/net/pcnet32.c 2011-04-17 15:56:46.000000000 -0400
32112 @@ -79,7 +79,7 @@ static int cards_found;
32113 /*
32114 * VLB I/O addresses
32115 */
32116 -static unsigned int pcnet32_portlist[] __initdata =
32117 +static unsigned int pcnet32_portlist[] __devinitdata =
32118 { 0x300, 0x320, 0x340, 0x360, 0 };
32119
32120 static int pcnet32_debug = 0;
32121 diff -urNp linux-2.6.32.41/drivers/net/tg3.h linux-2.6.32.41/drivers/net/tg3.h
32122 --- linux-2.6.32.41/drivers/net/tg3.h 2011-03-27 14:31:47.000000000 -0400
32123 +++ linux-2.6.32.41/drivers/net/tg3.h 2011-04-17 15:56:46.000000000 -0400
32124 @@ -95,6 +95,7 @@
32125 #define CHIPREV_ID_5750_A0 0x4000
32126 #define CHIPREV_ID_5750_A1 0x4001
32127 #define CHIPREV_ID_5750_A3 0x4003
32128 +#define CHIPREV_ID_5750_C1 0x4201
32129 #define CHIPREV_ID_5750_C2 0x4202
32130 #define CHIPREV_ID_5752_A0_HW 0x5000
32131 #define CHIPREV_ID_5752_A0 0x6000
32132 diff -urNp linux-2.6.32.41/drivers/net/tulip/de2104x.c linux-2.6.32.41/drivers/net/tulip/de2104x.c
32133 --- linux-2.6.32.41/drivers/net/tulip/de2104x.c 2011-03-27 14:31:47.000000000 -0400
32134 +++ linux-2.6.32.41/drivers/net/tulip/de2104x.c 2011-05-16 21:46:57.000000000 -0400
32135 @@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_i
32136 struct de_srom_info_leaf *il;
32137 void *bufp;
32138
32139 + pax_track_stack();
32140 +
32141 /* download entire eeprom */
32142 for (i = 0; i < DE_EEPROM_WORDS; i++)
32143 ((__le16 *)ee_data)[i] =
32144 diff -urNp linux-2.6.32.41/drivers/net/tulip/de4x5.c linux-2.6.32.41/drivers/net/tulip/de4x5.c
32145 --- linux-2.6.32.41/drivers/net/tulip/de4x5.c 2011-03-27 14:31:47.000000000 -0400
32146 +++ linux-2.6.32.41/drivers/net/tulip/de4x5.c 2011-04-17 15:56:46.000000000 -0400
32147 @@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, stru
32148 for (i=0; i<ETH_ALEN; i++) {
32149 tmp.addr[i] = dev->dev_addr[i];
32150 }
32151 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32152 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32153 break;
32154
32155 case DE4X5_SET_HWADDR: /* Set the hardware address */
32156 @@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, stru
32157 spin_lock_irqsave(&lp->lock, flags);
32158 memcpy(&statbuf, &lp->pktStats, ioc->len);
32159 spin_unlock_irqrestore(&lp->lock, flags);
32160 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
32161 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
32162 return -EFAULT;
32163 break;
32164 }
32165 diff -urNp linux-2.6.32.41/drivers/net/usb/hso.c linux-2.6.32.41/drivers/net/usb/hso.c
32166 --- linux-2.6.32.41/drivers/net/usb/hso.c 2011-03-27 14:31:47.000000000 -0400
32167 +++ linux-2.6.32.41/drivers/net/usb/hso.c 2011-04-17 15:56:46.000000000 -0400
32168 @@ -71,7 +71,7 @@
32169 #include <asm/byteorder.h>
32170 #include <linux/serial_core.h>
32171 #include <linux/serial.h>
32172 -
32173 +#include <asm/local.h>
32174
32175 #define DRIVER_VERSION "1.2"
32176 #define MOD_AUTHOR "Option Wireless"
32177 @@ -258,7 +258,7 @@ struct hso_serial {
32178
32179 /* from usb_serial_port */
32180 struct tty_struct *tty;
32181 - int open_count;
32182 + local_t open_count;
32183 spinlock_t serial_lock;
32184
32185 int (*write_data) (struct hso_serial *serial);
32186 @@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_
32187 struct urb *urb;
32188
32189 urb = serial->rx_urb[0];
32190 - if (serial->open_count > 0) {
32191 + if (local_read(&serial->open_count) > 0) {
32192 count = put_rxbuf_data(urb, serial);
32193 if (count == -1)
32194 return;
32195 @@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_cal
32196 DUMP1(urb->transfer_buffer, urb->actual_length);
32197
32198 /* Anyone listening? */
32199 - if (serial->open_count == 0)
32200 + if (local_read(&serial->open_count) == 0)
32201 return;
32202
32203 if (status == 0) {
32204 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
32205 spin_unlock_irq(&serial->serial_lock);
32206
32207 /* check for port already opened, if not set the termios */
32208 - serial->open_count++;
32209 - if (serial->open_count == 1) {
32210 + if (local_inc_return(&serial->open_count) == 1) {
32211 tty->low_latency = 1;
32212 serial->rx_state = RX_IDLE;
32213 /* Force default termio settings */
32214 @@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_st
32215 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
32216 if (result) {
32217 hso_stop_serial_device(serial->parent);
32218 - serial->open_count--;
32219 + local_dec(&serial->open_count);
32220 kref_put(&serial->parent->ref, hso_serial_ref_free);
32221 }
32222 } else {
32223 @@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_
32224
32225 /* reset the rts and dtr */
32226 /* do the actual close */
32227 - serial->open_count--;
32228 + local_dec(&serial->open_count);
32229
32230 - if (serial->open_count <= 0) {
32231 - serial->open_count = 0;
32232 + if (local_read(&serial->open_count) <= 0) {
32233 + local_set(&serial->open_count, 0);
32234 spin_lock_irq(&serial->serial_lock);
32235 if (serial->tty == tty) {
32236 serial->tty->driver_data = NULL;
32237 @@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struc
32238
32239 /* the actual setup */
32240 spin_lock_irqsave(&serial->serial_lock, flags);
32241 - if (serial->open_count)
32242 + if (local_read(&serial->open_count))
32243 _hso_serial_set_termios(tty, old);
32244 else
32245 tty->termios = old;
32246 @@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interfa
32247 /* Start all serial ports */
32248 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
32249 if (serial_table[i] && (serial_table[i]->interface == iface)) {
32250 - if (dev2ser(serial_table[i])->open_count) {
32251 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
32252 result =
32253 hso_start_serial_device(serial_table[i], GFP_NOIO);
32254 hso_kick_transmit(dev2ser(serial_table[i]));
32255 diff -urNp linux-2.6.32.41/drivers/net/vxge/vxge-main.c linux-2.6.32.41/drivers/net/vxge/vxge-main.c
32256 --- linux-2.6.32.41/drivers/net/vxge/vxge-main.c 2011-03-27 14:31:47.000000000 -0400
32257 +++ linux-2.6.32.41/drivers/net/vxge/vxge-main.c 2011-05-16 21:46:57.000000000 -0400
32258 @@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_T
32259 struct sk_buff *completed[NR_SKB_COMPLETED];
32260 int more;
32261
32262 + pax_track_stack();
32263 +
32264 do {
32265 more = 0;
32266 skb_ptr = completed;
32267 @@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_conf
32268 u8 mtable[256] = {0}; /* CPU to vpath mapping */
32269 int index;
32270
32271 + pax_track_stack();
32272 +
32273 /*
32274 * Filling
32275 * - itable with bucket numbers
32276 diff -urNp linux-2.6.32.41/drivers/net/wan/cycx_x25.c linux-2.6.32.41/drivers/net/wan/cycx_x25.c
32277 --- linux-2.6.32.41/drivers/net/wan/cycx_x25.c 2011-03-27 14:31:47.000000000 -0400
32278 +++ linux-2.6.32.41/drivers/net/wan/cycx_x25.c 2011-05-16 21:46:57.000000000 -0400
32279 @@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned
32280 unsigned char hex[1024],
32281 * phex = hex;
32282
32283 + pax_track_stack();
32284 +
32285 if (len >= (sizeof(hex) / 2))
32286 len = (sizeof(hex) / 2) - 1;
32287
32288 diff -urNp linux-2.6.32.41/drivers/net/wimax/i2400m/usb-fw.c linux-2.6.32.41/drivers/net/wimax/i2400m/usb-fw.c
32289 --- linux-2.6.32.41/drivers/net/wimax/i2400m/usb-fw.c 2011-03-27 14:31:47.000000000 -0400
32290 +++ linux-2.6.32.41/drivers/net/wimax/i2400m/usb-fw.c 2011-05-16 21:46:57.000000000 -0400
32291 @@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
32292 int do_autopm = 1;
32293 DECLARE_COMPLETION_ONSTACK(notif_completion);
32294
32295 + pax_track_stack();
32296 +
32297 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
32298 i2400m, ack, ack_size);
32299 BUG_ON(_ack == i2400m->bm_ack_buf);
32300 diff -urNp linux-2.6.32.41/drivers/net/wireless/airo.c linux-2.6.32.41/drivers/net/wireless/airo.c
32301 --- linux-2.6.32.41/drivers/net/wireless/airo.c 2011-03-27 14:31:47.000000000 -0400
32302 +++ linux-2.6.32.41/drivers/net/wireless/airo.c 2011-05-16 21:46:57.000000000 -0400
32303 @@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
32304 BSSListElement * loop_net;
32305 BSSListElement * tmp_net;
32306
32307 + pax_track_stack();
32308 +
32309 /* Blow away current list of scan results */
32310 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
32311 list_move_tail (&loop_net->list, &ai->network_free_list);
32312 @@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *
32313 WepKeyRid wkr;
32314 int rc;
32315
32316 + pax_track_stack();
32317 +
32318 memset( &mySsid, 0, sizeof( mySsid ) );
32319 kfree (ai->flash);
32320 ai->flash = NULL;
32321 @@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct i
32322 __le32 *vals = stats.vals;
32323 int len;
32324
32325 + pax_track_stack();
32326 +
32327 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
32328 return -ENOMEM;
32329 data = (struct proc_data *)file->private_data;
32330 @@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct ino
32331 /* If doLoseSync is not 1, we won't do a Lose Sync */
32332 int doLoseSync = -1;
32333
32334 + pax_track_stack();
32335 +
32336 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
32337 return -ENOMEM;
32338 data = (struct proc_data *)file->private_data;
32339 @@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_de
32340 int i;
32341 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
32342
32343 + pax_track_stack();
32344 +
32345 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
32346 if (!qual)
32347 return -ENOMEM;
32348 @@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(str
32349 CapabilityRid cap_rid;
32350 __le32 *vals = stats_rid.vals;
32351
32352 + pax_track_stack();
32353 +
32354 /* Get stats out of the card */
32355 clear_bit(JOB_WSTATS, &local->jobs);
32356 if (local->power.event) {
32357 diff -urNp linux-2.6.32.41/drivers/net/wireless/ath/ath5k/debug.c linux-2.6.32.41/drivers/net/wireless/ath/ath5k/debug.c
32358 --- linux-2.6.32.41/drivers/net/wireless/ath/ath5k/debug.c 2011-03-27 14:31:47.000000000 -0400
32359 +++ linux-2.6.32.41/drivers/net/wireless/ath/ath5k/debug.c 2011-05-16 21:46:57.000000000 -0400
32360 @@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct f
32361 unsigned int v;
32362 u64 tsf;
32363
32364 + pax_track_stack();
32365 +
32366 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
32367 len += snprintf(buf+len, sizeof(buf)-len,
32368 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
32369 @@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct fi
32370 unsigned int len = 0;
32371 unsigned int i;
32372
32373 + pax_track_stack();
32374 +
32375 len += snprintf(buf+len, sizeof(buf)-len,
32376 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
32377
32378 diff -urNp linux-2.6.32.41/drivers/net/wireless/ath/ath9k/debug.c linux-2.6.32.41/drivers/net/wireless/ath/ath9k/debug.c
32379 --- linux-2.6.32.41/drivers/net/wireless/ath/ath9k/debug.c 2011-03-27 14:31:47.000000000 -0400
32380 +++ linux-2.6.32.41/drivers/net/wireless/ath/ath9k/debug.c 2011-05-16 21:46:57.000000000 -0400
32381 @@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struc
32382 char buf[512];
32383 unsigned int len = 0;
32384
32385 + pax_track_stack();
32386 +
32387 len += snprintf(buf + len, sizeof(buf) - len,
32388 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
32389 len += snprintf(buf + len, sizeof(buf) - len,
32390 @@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct fi
32391 int i;
32392 u8 addr[ETH_ALEN];
32393
32394 + pax_track_stack();
32395 +
32396 len += snprintf(buf + len, sizeof(buf) - len,
32397 "primary: %s (%s chan=%d ht=%d)\n",
32398 wiphy_name(sc->pri_wiphy->hw->wiphy),
32399 diff -urNp linux-2.6.32.41/drivers/net/wireless/b43/debugfs.c linux-2.6.32.41/drivers/net/wireless/b43/debugfs.c
32400 --- linux-2.6.32.41/drivers/net/wireless/b43/debugfs.c 2011-03-27 14:31:47.000000000 -0400
32401 +++ linux-2.6.32.41/drivers/net/wireless/b43/debugfs.c 2011-04-17 15:56:46.000000000 -0400
32402 @@ -43,7 +43,7 @@ static struct dentry *rootdir;
32403 struct b43_debugfs_fops {
32404 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
32405 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
32406 - struct file_operations fops;
32407 + const struct file_operations fops;
32408 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
32409 size_t file_struct_offset;
32410 };
32411 diff -urNp linux-2.6.32.41/drivers/net/wireless/b43legacy/debugfs.c linux-2.6.32.41/drivers/net/wireless/b43legacy/debugfs.c
32412 --- linux-2.6.32.41/drivers/net/wireless/b43legacy/debugfs.c 2011-03-27 14:31:47.000000000 -0400
32413 +++ linux-2.6.32.41/drivers/net/wireless/b43legacy/debugfs.c 2011-04-17 15:56:46.000000000 -0400
32414 @@ -44,7 +44,7 @@ static struct dentry *rootdir;
32415 struct b43legacy_debugfs_fops {
32416 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
32417 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
32418 - struct file_operations fops;
32419 + const struct file_operations fops;
32420 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
32421 size_t file_struct_offset;
32422 /* Take wl->irq_lock before calling read/write? */
32423 diff -urNp linux-2.6.32.41/drivers/net/wireless/ipw2x00/ipw2100.c linux-2.6.32.41/drivers/net/wireless/ipw2x00/ipw2100.c
32424 --- linux-2.6.32.41/drivers/net/wireless/ipw2x00/ipw2100.c 2011-03-27 14:31:47.000000000 -0400
32425 +++ linux-2.6.32.41/drivers/net/wireless/ipw2x00/ipw2100.c 2011-05-16 21:46:57.000000000 -0400
32426 @@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2
32427 int err;
32428 DECLARE_SSID_BUF(ssid);
32429
32430 + pax_track_stack();
32431 +
32432 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
32433
32434 if (ssid_len)
32435 @@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw210
32436 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
32437 int err;
32438
32439 + pax_track_stack();
32440 +
32441 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
32442 idx, keylen, len);
32443
32444 diff -urNp linux-2.6.32.41/drivers/net/wireless/ipw2x00/libipw_rx.c linux-2.6.32.41/drivers/net/wireless/ipw2x00/libipw_rx.c
32445 --- linux-2.6.32.41/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-03-27 14:31:47.000000000 -0400
32446 +++ linux-2.6.32.41/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-05-16 21:46:57.000000000 -0400
32447 @@ -1566,6 +1566,8 @@ static void libipw_process_probe_respons
32448 unsigned long flags;
32449 DECLARE_SSID_BUF(ssid);
32450
32451 + pax_track_stack();
32452 +
32453 LIBIPW_DEBUG_SCAN("'%s' (%pM"
32454 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
32455 print_ssid(ssid, info_element->data, info_element->len),
32456 diff -urNp linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-1000.c linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-1000.c
32457 --- linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-03-27 14:31:47.000000000 -0400
32458 +++ linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-04-17 15:56:46.000000000 -0400
32459 @@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib =
32460 },
32461 };
32462
32463 -static struct iwl_ops iwl1000_ops = {
32464 +static const struct iwl_ops iwl1000_ops = {
32465 .ucode = &iwl5000_ucode,
32466 .lib = &iwl1000_lib,
32467 .hcmd = &iwl5000_hcmd,
32468 diff -urNp linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-3945.c linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-3945.c
32469 --- linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-03-27 14:31:47.000000000 -0400
32470 +++ linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-04-17 15:56:46.000000000 -0400
32471 @@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945
32472 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
32473 };
32474
32475 -static struct iwl_ops iwl3945_ops = {
32476 +static const struct iwl_ops iwl3945_ops = {
32477 .ucode = &iwl3945_ucode,
32478 .lib = &iwl3945_lib,
32479 .hcmd = &iwl3945_hcmd,
32480 diff -urNp linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-4965.c linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-4965.c
32481 --- linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-03-27 14:31:47.000000000 -0400
32482 +++ linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-04-17 15:56:46.000000000 -0400
32483 @@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib =
32484 },
32485 };
32486
32487 -static struct iwl_ops iwl4965_ops = {
32488 +static const struct iwl_ops iwl4965_ops = {
32489 .ucode = &iwl4965_ucode,
32490 .lib = &iwl4965_lib,
32491 .hcmd = &iwl4965_hcmd,
32492 diff -urNp linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-5000.c linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-5000.c
32493 --- linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-05-10 22:12:01.000000000 -0400
32494 +++ linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-05-10 22:12:32.000000000 -0400
32495 @@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib =
32496 },
32497 };
32498
32499 -struct iwl_ops iwl5000_ops = {
32500 +const struct iwl_ops iwl5000_ops = {
32501 .ucode = &iwl5000_ucode,
32502 .lib = &iwl5000_lib,
32503 .hcmd = &iwl5000_hcmd,
32504 .utils = &iwl5000_hcmd_utils,
32505 };
32506
32507 -static struct iwl_ops iwl5150_ops = {
32508 +static const struct iwl_ops iwl5150_ops = {
32509 .ucode = &iwl5000_ucode,
32510 .lib = &iwl5150_lib,
32511 .hcmd = &iwl5000_hcmd,
32512 diff -urNp linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-6000.c linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-6000.c
32513 --- linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-03-27 14:31:47.000000000 -0400
32514 +++ linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-04-17 15:56:46.000000000 -0400
32515 @@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000
32516 .calc_rssi = iwl5000_calc_rssi,
32517 };
32518
32519 -static struct iwl_ops iwl6000_ops = {
32520 +static const struct iwl_ops iwl6000_ops = {
32521 .ucode = &iwl5000_ucode,
32522 .lib = &iwl6000_lib,
32523 .hcmd = &iwl5000_hcmd,
32524 diff -urNp linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
32525 --- linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-03-27 14:31:47.000000000 -0400
32526 +++ linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-05-16 21:46:57.000000000 -0400
32527 @@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, s
32528 u8 active_index = 0;
32529 s32 tpt = 0;
32530
32531 + pax_track_stack();
32532 +
32533 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
32534
32535 if (!ieee80211_is_data(hdr->frame_control) ||
32536 @@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_
32537 u8 valid_tx_ant = 0;
32538 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
32539
32540 + pax_track_stack();
32541 +
32542 /* Override starting rate (index 0) if needed for debug purposes */
32543 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
32544
32545 diff -urNp linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-debugfs.c
32546 --- linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-03-27 14:31:47.000000000 -0400
32547 +++ linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-05-16 21:46:57.000000000 -0400
32548 @@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(str
32549 int pos = 0;
32550 const size_t bufsz = sizeof(buf);
32551
32552 + pax_track_stack();
32553 +
32554 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
32555 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
32556 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
32557 @@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
32558 const size_t bufsz = sizeof(buf);
32559 ssize_t ret;
32560
32561 + pax_track_stack();
32562 +
32563 for (i = 0; i < AC_NUM; i++) {
32564 pos += scnprintf(buf + pos, bufsz - pos,
32565 "\tcw_min\tcw_max\taifsn\ttxop\n");
32566 diff -urNp linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-debug.h linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-debug.h
32567 --- linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-03-27 14:31:47.000000000 -0400
32568 +++ linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-04-17 15:56:46.000000000 -0400
32569 @@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_pri
32570 #endif
32571
32572 #else
32573 -#define IWL_DEBUG(__priv, level, fmt, args...)
32574 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
32575 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
32576 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
32577 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
32578 void *p, u32 len)
32579 {}
32580 diff -urNp linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-dev.h linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-dev.h
32581 --- linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-03-27 14:31:47.000000000 -0400
32582 +++ linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-04-17 15:56:46.000000000 -0400
32583 @@ -68,7 +68,7 @@ struct iwl_tx_queue;
32584
32585 /* shared structures from iwl-5000.c */
32586 extern struct iwl_mod_params iwl50_mod_params;
32587 -extern struct iwl_ops iwl5000_ops;
32588 +extern const struct iwl_ops iwl5000_ops;
32589 extern struct iwl_ucode_ops iwl5000_ucode;
32590 extern struct iwl_lib_ops iwl5000_lib;
32591 extern struct iwl_hcmd_ops iwl5000_hcmd;
32592 diff -urNp linux-2.6.32.41/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-2.6.32.41/drivers/net/wireless/iwmc3200wifi/debugfs.c
32593 --- linux-2.6.32.41/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-03-27 14:31:47.000000000 -0400
32594 +++ linux-2.6.32.41/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-05-16 21:46:57.000000000 -0400
32595 @@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
32596 int buf_len = 512;
32597 size_t len = 0;
32598
32599 + pax_track_stack();
32600 +
32601 if (*ppos != 0)
32602 return 0;
32603 if (count < sizeof(buf))
32604 diff -urNp linux-2.6.32.41/drivers/net/wireless/libertas/debugfs.c linux-2.6.32.41/drivers/net/wireless/libertas/debugfs.c
32605 --- linux-2.6.32.41/drivers/net/wireless/libertas/debugfs.c 2011-03-27 14:31:47.000000000 -0400
32606 +++ linux-2.6.32.41/drivers/net/wireless/libertas/debugfs.c 2011-04-17 15:56:46.000000000 -0400
32607 @@ -708,7 +708,7 @@ out_unlock:
32608 struct lbs_debugfs_files {
32609 const char *name;
32610 int perm;
32611 - struct file_operations fops;
32612 + const struct file_operations fops;
32613 };
32614
32615 static const struct lbs_debugfs_files debugfs_files[] = {
32616 diff -urNp linux-2.6.32.41/drivers/net/wireless/rndis_wlan.c linux-2.6.32.41/drivers/net/wireless/rndis_wlan.c
32617 --- linux-2.6.32.41/drivers/net/wireless/rndis_wlan.c 2011-03-27 14:31:47.000000000 -0400
32618 +++ linux-2.6.32.41/drivers/net/wireless/rndis_wlan.c 2011-04-17 15:56:46.000000000 -0400
32619 @@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbn
32620
32621 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
32622
32623 - if (rts_threshold < 0 || rts_threshold > 2347)
32624 + if (rts_threshold > 2347)
32625 rts_threshold = 2347;
32626
32627 tmp = cpu_to_le32(rts_threshold);
32628 diff -urNp linux-2.6.32.41/drivers/oprofile/buffer_sync.c linux-2.6.32.41/drivers/oprofile/buffer_sync.c
32629 --- linux-2.6.32.41/drivers/oprofile/buffer_sync.c 2011-03-27 14:31:47.000000000 -0400
32630 +++ linux-2.6.32.41/drivers/oprofile/buffer_sync.c 2011-04-17 15:56:46.000000000 -0400
32631 @@ -341,7 +341,7 @@ static void add_data(struct op_entry *en
32632 if (cookie == NO_COOKIE)
32633 offset = pc;
32634 if (cookie == INVALID_COOKIE) {
32635 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
32636 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
32637 offset = pc;
32638 }
32639 if (cookie != last_cookie) {
32640 @@ -385,14 +385,14 @@ add_sample(struct mm_struct *mm, struct
32641 /* add userspace sample */
32642
32643 if (!mm) {
32644 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
32645 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
32646 return 0;
32647 }
32648
32649 cookie = lookup_dcookie(mm, s->eip, &offset);
32650
32651 if (cookie == INVALID_COOKIE) {
32652 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
32653 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
32654 return 0;
32655 }
32656
32657 @@ -561,7 +561,7 @@ void sync_buffer(int cpu)
32658 /* ignore backtraces if failed to add a sample */
32659 if (state == sb_bt_start) {
32660 state = sb_bt_ignore;
32661 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
32662 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
32663 }
32664 }
32665 release_mm(mm);
32666 diff -urNp linux-2.6.32.41/drivers/oprofile/event_buffer.c linux-2.6.32.41/drivers/oprofile/event_buffer.c
32667 --- linux-2.6.32.41/drivers/oprofile/event_buffer.c 2011-03-27 14:31:47.000000000 -0400
32668 +++ linux-2.6.32.41/drivers/oprofile/event_buffer.c 2011-04-17 15:56:46.000000000 -0400
32669 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
32670 }
32671
32672 if (buffer_pos == buffer_size) {
32673 - atomic_inc(&oprofile_stats.event_lost_overflow);
32674 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
32675 return;
32676 }
32677
32678 diff -urNp linux-2.6.32.41/drivers/oprofile/oprof.c linux-2.6.32.41/drivers/oprofile/oprof.c
32679 --- linux-2.6.32.41/drivers/oprofile/oprof.c 2011-03-27 14:31:47.000000000 -0400
32680 +++ linux-2.6.32.41/drivers/oprofile/oprof.c 2011-04-17 15:56:46.000000000 -0400
32681 @@ -110,7 +110,7 @@ static void switch_worker(struct work_st
32682 if (oprofile_ops.switch_events())
32683 return;
32684
32685 - atomic_inc(&oprofile_stats.multiplex_counter);
32686 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
32687 start_switch_worker();
32688 }
32689
32690 diff -urNp linux-2.6.32.41/drivers/oprofile/oprofilefs.c linux-2.6.32.41/drivers/oprofile/oprofilefs.c
32691 --- linux-2.6.32.41/drivers/oprofile/oprofilefs.c 2011-03-27 14:31:47.000000000 -0400
32692 +++ linux-2.6.32.41/drivers/oprofile/oprofilefs.c 2011-04-17 15:56:46.000000000 -0400
32693 @@ -187,7 +187,7 @@ static const struct file_operations atom
32694
32695
32696 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
32697 - char const *name, atomic_t *val)
32698 + char const *name, atomic_unchecked_t *val)
32699 {
32700 struct dentry *d = __oprofilefs_create_file(sb, root, name,
32701 &atomic_ro_fops, 0444);
32702 diff -urNp linux-2.6.32.41/drivers/oprofile/oprofile_stats.c linux-2.6.32.41/drivers/oprofile/oprofile_stats.c
32703 --- linux-2.6.32.41/drivers/oprofile/oprofile_stats.c 2011-03-27 14:31:47.000000000 -0400
32704 +++ linux-2.6.32.41/drivers/oprofile/oprofile_stats.c 2011-04-17 15:56:46.000000000 -0400
32705 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
32706 cpu_buf->sample_invalid_eip = 0;
32707 }
32708
32709 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
32710 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
32711 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
32712 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
32713 - atomic_set(&oprofile_stats.multiplex_counter, 0);
32714 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
32715 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
32716 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
32717 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
32718 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
32719 }
32720
32721
32722 diff -urNp linux-2.6.32.41/drivers/oprofile/oprofile_stats.h linux-2.6.32.41/drivers/oprofile/oprofile_stats.h
32723 --- linux-2.6.32.41/drivers/oprofile/oprofile_stats.h 2011-03-27 14:31:47.000000000 -0400
32724 +++ linux-2.6.32.41/drivers/oprofile/oprofile_stats.h 2011-04-17 15:56:46.000000000 -0400
32725 @@ -13,11 +13,11 @@
32726 #include <asm/atomic.h>
32727
32728 struct oprofile_stat_struct {
32729 - atomic_t sample_lost_no_mm;
32730 - atomic_t sample_lost_no_mapping;
32731 - atomic_t bt_lost_no_mapping;
32732 - atomic_t event_lost_overflow;
32733 - atomic_t multiplex_counter;
32734 + atomic_unchecked_t sample_lost_no_mm;
32735 + atomic_unchecked_t sample_lost_no_mapping;
32736 + atomic_unchecked_t bt_lost_no_mapping;
32737 + atomic_unchecked_t event_lost_overflow;
32738 + atomic_unchecked_t multiplex_counter;
32739 };
32740
32741 extern struct oprofile_stat_struct oprofile_stats;
32742 diff -urNp linux-2.6.32.41/drivers/parisc/pdc_stable.c linux-2.6.32.41/drivers/parisc/pdc_stable.c
32743 --- linux-2.6.32.41/drivers/parisc/pdc_stable.c 2011-03-27 14:31:47.000000000 -0400
32744 +++ linux-2.6.32.41/drivers/parisc/pdc_stable.c 2011-04-17 15:56:46.000000000 -0400
32745 @@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj
32746 return ret;
32747 }
32748
32749 -static struct sysfs_ops pdcspath_attr_ops = {
32750 +static const struct sysfs_ops pdcspath_attr_ops = {
32751 .show = pdcspath_attr_show,
32752 .store = pdcspath_attr_store,
32753 };
32754 diff -urNp linux-2.6.32.41/drivers/parport/procfs.c linux-2.6.32.41/drivers/parport/procfs.c
32755 --- linux-2.6.32.41/drivers/parport/procfs.c 2011-03-27 14:31:47.000000000 -0400
32756 +++ linux-2.6.32.41/drivers/parport/procfs.c 2011-04-17 15:56:46.000000000 -0400
32757 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
32758
32759 *ppos += len;
32760
32761 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
32762 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
32763 }
32764
32765 #ifdef CONFIG_PARPORT_1284
32766 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
32767
32768 *ppos += len;
32769
32770 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
32771 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
32772 }
32773 #endif /* IEEE1284.3 support. */
32774
32775 diff -urNp linux-2.6.32.41/drivers/pci/hotplug/acpiphp_glue.c linux-2.6.32.41/drivers/pci/hotplug/acpiphp_glue.c
32776 --- linux-2.6.32.41/drivers/pci/hotplug/acpiphp_glue.c 2011-03-27 14:31:47.000000000 -0400
32777 +++ linux-2.6.32.41/drivers/pci/hotplug/acpiphp_glue.c 2011-04-17 15:56:46.000000000 -0400
32778 @@ -111,7 +111,7 @@ static int post_dock_fixups(struct notif
32779 }
32780
32781
32782 -static struct acpi_dock_ops acpiphp_dock_ops = {
32783 +static const struct acpi_dock_ops acpiphp_dock_ops = {
32784 .handler = handle_hotplug_event_func,
32785 };
32786
32787 diff -urNp linux-2.6.32.41/drivers/pci/hotplug/cpqphp_nvram.c linux-2.6.32.41/drivers/pci/hotplug/cpqphp_nvram.c
32788 --- linux-2.6.32.41/drivers/pci/hotplug/cpqphp_nvram.c 2011-03-27 14:31:47.000000000 -0400
32789 +++ linux-2.6.32.41/drivers/pci/hotplug/cpqphp_nvram.c 2011-04-17 15:56:46.000000000 -0400
32790 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
32791
32792 void compaq_nvram_init (void __iomem *rom_start)
32793 {
32794 +
32795 +#ifndef CONFIG_PAX_KERNEXEC
32796 if (rom_start) {
32797 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
32798 }
32799 +#endif
32800 +
32801 dbg("int15 entry = %p\n", compaq_int15_entry_point);
32802
32803 /* initialize our int15 lock */
32804 diff -urNp linux-2.6.32.41/drivers/pci/hotplug/fakephp.c linux-2.6.32.41/drivers/pci/hotplug/fakephp.c
32805 --- linux-2.6.32.41/drivers/pci/hotplug/fakephp.c 2011-03-27 14:31:47.000000000 -0400
32806 +++ linux-2.6.32.41/drivers/pci/hotplug/fakephp.c 2011-04-17 15:56:46.000000000 -0400
32807 @@ -73,7 +73,7 @@ static void legacy_release(struct kobjec
32808 }
32809
32810 static struct kobj_type legacy_ktype = {
32811 - .sysfs_ops = &(struct sysfs_ops){
32812 + .sysfs_ops = &(const struct sysfs_ops){
32813 .store = legacy_store, .show = legacy_show
32814 },
32815 .release = &legacy_release,
32816 diff -urNp linux-2.6.32.41/drivers/pci/intel-iommu.c linux-2.6.32.41/drivers/pci/intel-iommu.c
32817 --- linux-2.6.32.41/drivers/pci/intel-iommu.c 2011-05-10 22:12:01.000000000 -0400
32818 +++ linux-2.6.32.41/drivers/pci/intel-iommu.c 2011-05-10 22:12:33.000000000 -0400
32819 @@ -2643,7 +2643,7 @@ error:
32820 return 0;
32821 }
32822
32823 -static dma_addr_t intel_map_page(struct device *dev, struct page *page,
32824 +dma_addr_t intel_map_page(struct device *dev, struct page *page,
32825 unsigned long offset, size_t size,
32826 enum dma_data_direction dir,
32827 struct dma_attrs *attrs)
32828 @@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain
32829 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
32830 }
32831
32832 -static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
32833 +void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
32834 size_t size, enum dma_data_direction dir,
32835 struct dma_attrs *attrs)
32836 {
32837 @@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct devi
32838 }
32839 }
32840
32841 -static void *intel_alloc_coherent(struct device *hwdev, size_t size,
32842 +void *intel_alloc_coherent(struct device *hwdev, size_t size,
32843 dma_addr_t *dma_handle, gfp_t flags)
32844 {
32845 void *vaddr;
32846 @@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct
32847 return NULL;
32848 }
32849
32850 -static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
32851 +void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
32852 dma_addr_t dma_handle)
32853 {
32854 int order;
32855 @@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct d
32856 free_pages((unsigned long)vaddr, order);
32857 }
32858
32859 -static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
32860 +void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
32861 int nelems, enum dma_data_direction dir,
32862 struct dma_attrs *attrs)
32863 {
32864 @@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(str
32865 return nelems;
32866 }
32867
32868 -static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
32869 +int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
32870 enum dma_data_direction dir, struct dma_attrs *attrs)
32871 {
32872 int i;
32873 @@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *h
32874 return nelems;
32875 }
32876
32877 -static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
32878 +int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
32879 {
32880 return !dma_addr;
32881 }
32882
32883 -struct dma_map_ops intel_dma_ops = {
32884 +const struct dma_map_ops intel_dma_ops = {
32885 .alloc_coherent = intel_alloc_coherent,
32886 .free_coherent = intel_free_coherent,
32887 .map_sg = intel_map_sg,
32888 diff -urNp linux-2.6.32.41/drivers/pci/pcie/aspm.c linux-2.6.32.41/drivers/pci/pcie/aspm.c
32889 --- linux-2.6.32.41/drivers/pci/pcie/aspm.c 2011-03-27 14:31:47.000000000 -0400
32890 +++ linux-2.6.32.41/drivers/pci/pcie/aspm.c 2011-04-17 15:56:46.000000000 -0400
32891 @@ -27,9 +27,9 @@
32892 #define MODULE_PARAM_PREFIX "pcie_aspm."
32893
32894 /* Note: those are not register definitions */
32895 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
32896 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
32897 -#define ASPM_STATE_L1 (4) /* L1 state */
32898 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
32899 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
32900 +#define ASPM_STATE_L1 (4U) /* L1 state */
32901 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
32902 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
32903
32904 diff -urNp linux-2.6.32.41/drivers/pci/probe.c linux-2.6.32.41/drivers/pci/probe.c
32905 --- linux-2.6.32.41/drivers/pci/probe.c 2011-03-27 14:31:47.000000000 -0400
32906 +++ linux-2.6.32.41/drivers/pci/probe.c 2011-04-17 15:56:46.000000000 -0400
32907 @@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(
32908 return ret;
32909 }
32910
32911 -static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
32912 +static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
32913 struct device_attribute *attr,
32914 char *buf)
32915 {
32916 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
32917 }
32918
32919 -static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
32920 +static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
32921 struct device_attribute *attr,
32922 char *buf)
32923 {
32924 diff -urNp linux-2.6.32.41/drivers/pci/proc.c linux-2.6.32.41/drivers/pci/proc.c
32925 --- linux-2.6.32.41/drivers/pci/proc.c 2011-03-27 14:31:47.000000000 -0400
32926 +++ linux-2.6.32.41/drivers/pci/proc.c 2011-04-17 15:56:46.000000000 -0400
32927 @@ -480,7 +480,16 @@ static const struct file_operations proc
32928 static int __init pci_proc_init(void)
32929 {
32930 struct pci_dev *dev = NULL;
32931 +
32932 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
32933 +#ifdef CONFIG_GRKERNSEC_PROC_USER
32934 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
32935 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
32936 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
32937 +#endif
32938 +#else
32939 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
32940 +#endif
32941 proc_create("devices", 0, proc_bus_pci_dir,
32942 &proc_bus_pci_dev_operations);
32943 proc_initialized = 1;
32944 diff -urNp linux-2.6.32.41/drivers/pci/slot.c linux-2.6.32.41/drivers/pci/slot.c
32945 --- linux-2.6.32.41/drivers/pci/slot.c 2011-03-27 14:31:47.000000000 -0400
32946 +++ linux-2.6.32.41/drivers/pci/slot.c 2011-04-17 15:56:46.000000000 -0400
32947 @@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struc
32948 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
32949 }
32950
32951 -static struct sysfs_ops pci_slot_sysfs_ops = {
32952 +static const struct sysfs_ops pci_slot_sysfs_ops = {
32953 .show = pci_slot_attr_show,
32954 .store = pci_slot_attr_store,
32955 };
32956 diff -urNp linux-2.6.32.41/drivers/pcmcia/pcmcia_ioctl.c linux-2.6.32.41/drivers/pcmcia/pcmcia_ioctl.c
32957 --- linux-2.6.32.41/drivers/pcmcia/pcmcia_ioctl.c 2011-03-27 14:31:47.000000000 -0400
32958 +++ linux-2.6.32.41/drivers/pcmcia/pcmcia_ioctl.c 2011-04-17 15:56:46.000000000 -0400
32959 @@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode
32960 return -EFAULT;
32961 }
32962 }
32963 - buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
32964 + buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
32965 if (!buf)
32966 return -ENOMEM;
32967
32968 diff -urNp linux-2.6.32.41/drivers/platform/x86/acer-wmi.c linux-2.6.32.41/drivers/platform/x86/acer-wmi.c
32969 --- linux-2.6.32.41/drivers/platform/x86/acer-wmi.c 2011-03-27 14:31:47.000000000 -0400
32970 +++ linux-2.6.32.41/drivers/platform/x86/acer-wmi.c 2011-04-17 15:56:46.000000000 -0400
32971 @@ -918,7 +918,7 @@ static int update_bl_status(struct backl
32972 return 0;
32973 }
32974
32975 -static struct backlight_ops acer_bl_ops = {
32976 +static const struct backlight_ops acer_bl_ops = {
32977 .get_brightness = read_brightness,
32978 .update_status = update_bl_status,
32979 };
32980 diff -urNp linux-2.6.32.41/drivers/platform/x86/asus_acpi.c linux-2.6.32.41/drivers/platform/x86/asus_acpi.c
32981 --- linux-2.6.32.41/drivers/platform/x86/asus_acpi.c 2011-03-27 14:31:47.000000000 -0400
32982 +++ linux-2.6.32.41/drivers/platform/x86/asus_acpi.c 2011-04-17 15:56:46.000000000 -0400
32983 @@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_
32984 return 0;
32985 }
32986
32987 -static struct backlight_ops asus_backlight_data = {
32988 +static const struct backlight_ops asus_backlight_data = {
32989 .get_brightness = read_brightness,
32990 .update_status = set_brightness_status,
32991 };
32992 diff -urNp linux-2.6.32.41/drivers/platform/x86/asus-laptop.c linux-2.6.32.41/drivers/platform/x86/asus-laptop.c
32993 --- linux-2.6.32.41/drivers/platform/x86/asus-laptop.c 2011-03-27 14:31:47.000000000 -0400
32994 +++ linux-2.6.32.41/drivers/platform/x86/asus-laptop.c 2011-04-17 15:56:46.000000000 -0400
32995 @@ -250,7 +250,7 @@ static struct backlight_device *asus_bac
32996 */
32997 static int read_brightness(struct backlight_device *bd);
32998 static int update_bl_status(struct backlight_device *bd);
32999 -static struct backlight_ops asusbl_ops = {
33000 +static const struct backlight_ops asusbl_ops = {
33001 .get_brightness = read_brightness,
33002 .update_status = update_bl_status,
33003 };
33004 diff -urNp linux-2.6.32.41/drivers/platform/x86/compal-laptop.c linux-2.6.32.41/drivers/platform/x86/compal-laptop.c
33005 --- linux-2.6.32.41/drivers/platform/x86/compal-laptop.c 2011-03-27 14:31:47.000000000 -0400
33006 +++ linux-2.6.32.41/drivers/platform/x86/compal-laptop.c 2011-04-17 15:56:46.000000000 -0400
33007 @@ -163,7 +163,7 @@ static int bl_update_status(struct backl
33008 return set_lcd_level(b->props.brightness);
33009 }
33010
33011 -static struct backlight_ops compalbl_ops = {
33012 +static const struct backlight_ops compalbl_ops = {
33013 .get_brightness = bl_get_brightness,
33014 .update_status = bl_update_status,
33015 };
33016 diff -urNp linux-2.6.32.41/drivers/platform/x86/dell-laptop.c linux-2.6.32.41/drivers/platform/x86/dell-laptop.c
33017 --- linux-2.6.32.41/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:01.000000000 -0400
33018 +++ linux-2.6.32.41/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:33.000000000 -0400
33019 @@ -318,7 +318,7 @@ static int dell_get_intensity(struct bac
33020 return buffer.output[1];
33021 }
33022
33023 -static struct backlight_ops dell_ops = {
33024 +static const struct backlight_ops dell_ops = {
33025 .get_brightness = dell_get_intensity,
33026 .update_status = dell_send_intensity,
33027 };
33028 diff -urNp linux-2.6.32.41/drivers/platform/x86/eeepc-laptop.c linux-2.6.32.41/drivers/platform/x86/eeepc-laptop.c
33029 --- linux-2.6.32.41/drivers/platform/x86/eeepc-laptop.c 2011-03-27 14:31:47.000000000 -0400
33030 +++ linux-2.6.32.41/drivers/platform/x86/eeepc-laptop.c 2011-04-17 15:56:46.000000000 -0400
33031 @@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device
33032 */
33033 static int read_brightness(struct backlight_device *bd);
33034 static int update_bl_status(struct backlight_device *bd);
33035 -static struct backlight_ops eeepcbl_ops = {
33036 +static const struct backlight_ops eeepcbl_ops = {
33037 .get_brightness = read_brightness,
33038 .update_status = update_bl_status,
33039 };
33040 diff -urNp linux-2.6.32.41/drivers/platform/x86/fujitsu-laptop.c linux-2.6.32.41/drivers/platform/x86/fujitsu-laptop.c
33041 --- linux-2.6.32.41/drivers/platform/x86/fujitsu-laptop.c 2011-03-27 14:31:47.000000000 -0400
33042 +++ linux-2.6.32.41/drivers/platform/x86/fujitsu-laptop.c 2011-04-17 15:56:46.000000000 -0400
33043 @@ -436,7 +436,7 @@ static int bl_update_status(struct backl
33044 return ret;
33045 }
33046
33047 -static struct backlight_ops fujitsubl_ops = {
33048 +static const struct backlight_ops fujitsubl_ops = {
33049 .get_brightness = bl_get_brightness,
33050 .update_status = bl_update_status,
33051 };
33052 diff -urNp linux-2.6.32.41/drivers/platform/x86/msi-laptop.c linux-2.6.32.41/drivers/platform/x86/msi-laptop.c
33053 --- linux-2.6.32.41/drivers/platform/x86/msi-laptop.c 2011-03-27 14:31:47.000000000 -0400
33054 +++ linux-2.6.32.41/drivers/platform/x86/msi-laptop.c 2011-04-17 15:56:46.000000000 -0400
33055 @@ -161,7 +161,7 @@ static int bl_update_status(struct backl
33056 return set_lcd_level(b->props.brightness);
33057 }
33058
33059 -static struct backlight_ops msibl_ops = {
33060 +static const struct backlight_ops msibl_ops = {
33061 .get_brightness = bl_get_brightness,
33062 .update_status = bl_update_status,
33063 };
33064 diff -urNp linux-2.6.32.41/drivers/platform/x86/panasonic-laptop.c linux-2.6.32.41/drivers/platform/x86/panasonic-laptop.c
33065 --- linux-2.6.32.41/drivers/platform/x86/panasonic-laptop.c 2011-03-27 14:31:47.000000000 -0400
33066 +++ linux-2.6.32.41/drivers/platform/x86/panasonic-laptop.c 2011-04-17 15:56:46.000000000 -0400
33067 @@ -352,7 +352,7 @@ static int bl_set_status(struct backligh
33068 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
33069 }
33070
33071 -static struct backlight_ops pcc_backlight_ops = {
33072 +static const struct backlight_ops pcc_backlight_ops = {
33073 .get_brightness = bl_get,
33074 .update_status = bl_set_status,
33075 };
33076 diff -urNp linux-2.6.32.41/drivers/platform/x86/sony-laptop.c linux-2.6.32.41/drivers/platform/x86/sony-laptop.c
33077 --- linux-2.6.32.41/drivers/platform/x86/sony-laptop.c 2011-03-27 14:31:47.000000000 -0400
33078 +++ linux-2.6.32.41/drivers/platform/x86/sony-laptop.c 2011-04-17 15:56:46.000000000 -0400
33079 @@ -850,7 +850,7 @@ static int sony_backlight_get_brightness
33080 }
33081
33082 static struct backlight_device *sony_backlight_device;
33083 -static struct backlight_ops sony_backlight_ops = {
33084 +static const struct backlight_ops sony_backlight_ops = {
33085 .update_status = sony_backlight_update_status,
33086 .get_brightness = sony_backlight_get_brightness,
33087 };
33088 diff -urNp linux-2.6.32.41/drivers/platform/x86/thinkpad_acpi.c linux-2.6.32.41/drivers/platform/x86/thinkpad_acpi.c
33089 --- linux-2.6.32.41/drivers/platform/x86/thinkpad_acpi.c 2011-03-27 14:31:47.000000000 -0400
33090 +++ linux-2.6.32.41/drivers/platform/x86/thinkpad_acpi.c 2011-04-17 15:56:46.000000000 -0400
33091 @@ -6122,7 +6122,7 @@ static void tpacpi_brightness_notify_cha
33092 BACKLIGHT_UPDATE_HOTKEY);
33093 }
33094
33095 -static struct backlight_ops ibm_backlight_data = {
33096 +static const struct backlight_ops ibm_backlight_data = {
33097 .get_brightness = brightness_get,
33098 .update_status = brightness_update_status,
33099 };
33100 diff -urNp linux-2.6.32.41/drivers/platform/x86/toshiba_acpi.c linux-2.6.32.41/drivers/platform/x86/toshiba_acpi.c
33101 --- linux-2.6.32.41/drivers/platform/x86/toshiba_acpi.c 2011-03-27 14:31:47.000000000 -0400
33102 +++ linux-2.6.32.41/drivers/platform/x86/toshiba_acpi.c 2011-04-17 15:56:46.000000000 -0400
33103 @@ -671,7 +671,7 @@ static acpi_status remove_device(void)
33104 return AE_OK;
33105 }
33106
33107 -static struct backlight_ops toshiba_backlight_data = {
33108 +static const struct backlight_ops toshiba_backlight_data = {
33109 .get_brightness = get_lcd,
33110 .update_status = set_lcd_status,
33111 };
33112 diff -urNp linux-2.6.32.41/drivers/pnp/pnpbios/bioscalls.c linux-2.6.32.41/drivers/pnp/pnpbios/bioscalls.c
33113 --- linux-2.6.32.41/drivers/pnp/pnpbios/bioscalls.c 2011-03-27 14:31:47.000000000 -0400
33114 +++ linux-2.6.32.41/drivers/pnp/pnpbios/bioscalls.c 2011-04-17 15:56:46.000000000 -0400
33115 @@ -60,7 +60,7 @@ do { \
33116 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
33117 } while(0)
33118
33119 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
33120 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
33121 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
33122
33123 /*
33124 @@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func
33125
33126 cpu = get_cpu();
33127 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
33128 +
33129 + pax_open_kernel();
33130 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
33131 + pax_close_kernel();
33132
33133 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
33134 spin_lock_irqsave(&pnp_bios_lock, flags);
33135 @@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func
33136 :"memory");
33137 spin_unlock_irqrestore(&pnp_bios_lock, flags);
33138
33139 + pax_open_kernel();
33140 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
33141 + pax_close_kernel();
33142 +
33143 put_cpu();
33144
33145 /* If we get here and this is set then the PnP BIOS faulted on us. */
33146 @@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 n
33147 return status;
33148 }
33149
33150 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
33151 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
33152 {
33153 int i;
33154
33155 @@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_i
33156 pnp_bios_callpoint.offset = header->fields.pm16offset;
33157 pnp_bios_callpoint.segment = PNP_CS16;
33158
33159 + pax_open_kernel();
33160 +
33161 for_each_possible_cpu(i) {
33162 struct desc_struct *gdt = get_cpu_gdt_table(i);
33163 if (!gdt)
33164 @@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_i
33165 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
33166 (unsigned long)__va(header->fields.pm16dseg));
33167 }
33168 +
33169 + pax_close_kernel();
33170 }
33171 diff -urNp linux-2.6.32.41/drivers/pnp/resource.c linux-2.6.32.41/drivers/pnp/resource.c
33172 --- linux-2.6.32.41/drivers/pnp/resource.c 2011-03-27 14:31:47.000000000 -0400
33173 +++ linux-2.6.32.41/drivers/pnp/resource.c 2011-04-17 15:56:46.000000000 -0400
33174 @@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
33175 return 1;
33176
33177 /* check if the resource is valid */
33178 - if (*irq < 0 || *irq > 15)
33179 + if (*irq > 15)
33180 return 0;
33181
33182 /* check if the resource is reserved */
33183 @@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
33184 return 1;
33185
33186 /* check if the resource is valid */
33187 - if (*dma < 0 || *dma == 4 || *dma > 7)
33188 + if (*dma == 4 || *dma > 7)
33189 return 0;
33190
33191 /* check if the resource is reserved */
33192 diff -urNp linux-2.6.32.41/drivers/rtc/rtc-dev.c linux-2.6.32.41/drivers/rtc/rtc-dev.c
33193 --- linux-2.6.32.41/drivers/rtc/rtc-dev.c 2011-03-27 14:31:47.000000000 -0400
33194 +++ linux-2.6.32.41/drivers/rtc/rtc-dev.c 2011-04-17 15:56:46.000000000 -0400
33195 @@ -14,6 +14,7 @@
33196 #include <linux/module.h>
33197 #include <linux/rtc.h>
33198 #include <linux/sched.h>
33199 +#include <linux/grsecurity.h>
33200 #include "rtc-core.h"
33201
33202 static dev_t rtc_devt;
33203 @@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *f
33204 if (copy_from_user(&tm, uarg, sizeof(tm)))
33205 return -EFAULT;
33206
33207 + gr_log_timechange();
33208 +
33209 return rtc_set_time(rtc, &tm);
33210
33211 case RTC_PIE_ON:
33212 diff -urNp linux-2.6.32.41/drivers/s390/cio/qdio_perf.c linux-2.6.32.41/drivers/s390/cio/qdio_perf.c
33213 --- linux-2.6.32.41/drivers/s390/cio/qdio_perf.c 2011-03-27 14:31:47.000000000 -0400
33214 +++ linux-2.6.32.41/drivers/s390/cio/qdio_perf.c 2011-04-17 15:56:46.000000000 -0400
33215 @@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_
33216 static int qdio_perf_proc_show(struct seq_file *m, void *v)
33217 {
33218 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
33219 - (long)atomic_long_read(&perf_stats.qdio_int));
33220 + (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
33221 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
33222 - (long)atomic_long_read(&perf_stats.pci_int));
33223 + (long)atomic_long_read_unchecked(&perf_stats.pci_int));
33224 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
33225 - (long)atomic_long_read(&perf_stats.thin_int));
33226 + (long)atomic_long_read_unchecked(&perf_stats.thin_int));
33227 seq_printf(m, "\n");
33228 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
33229 - (long)atomic_long_read(&perf_stats.tasklet_inbound));
33230 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
33231 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
33232 - (long)atomic_long_read(&perf_stats.tasklet_outbound));
33233 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
33234 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
33235 - (long)atomic_long_read(&perf_stats.tasklet_thinint),
33236 - (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
33237 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
33238 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
33239 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
33240 - (long)atomic_long_read(&perf_stats.thinint_inbound),
33241 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
33242 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
33243 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
33244 seq_printf(m, "\n");
33245 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
33246 - (long)atomic_long_read(&perf_stats.siga_in));
33247 + (long)atomic_long_read_unchecked(&perf_stats.siga_in));
33248 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
33249 - (long)atomic_long_read(&perf_stats.siga_out));
33250 + (long)atomic_long_read_unchecked(&perf_stats.siga_out));
33251 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
33252 - (long)atomic_long_read(&perf_stats.siga_sync));
33253 + (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
33254 seq_printf(m, "\n");
33255 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
33256 - (long)atomic_long_read(&perf_stats.inbound_handler));
33257 + (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
33258 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
33259 - (long)atomic_long_read(&perf_stats.outbound_handler));
33260 + (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
33261 seq_printf(m, "\n");
33262 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
33263 - (long)atomic_long_read(&perf_stats.fast_requeue));
33264 + (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
33265 seq_printf(m, "Number of outbound target full condition\t: %li\n",
33266 - (long)atomic_long_read(&perf_stats.outbound_target_full));
33267 + (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
33268 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
33269 - (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
33270 + (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
33271 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
33272 - (long)atomic_long_read(&perf_stats.debug_stop_polling));
33273 + (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
33274 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
33275 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
33276 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
33277 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
33278 - (long)atomic_long_read(&perf_stats.debug_eqbs_all),
33279 - (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
33280 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
33281 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
33282 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
33283 - (long)atomic_long_read(&perf_stats.debug_sqbs_all),
33284 - (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
33285 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
33286 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
33287 seq_printf(m, "\n");
33288 return 0;
33289 }
33290 diff -urNp linux-2.6.32.41/drivers/s390/cio/qdio_perf.h linux-2.6.32.41/drivers/s390/cio/qdio_perf.h
33291 --- linux-2.6.32.41/drivers/s390/cio/qdio_perf.h 2011-03-27 14:31:47.000000000 -0400
33292 +++ linux-2.6.32.41/drivers/s390/cio/qdio_perf.h 2011-04-17 15:56:46.000000000 -0400
33293 @@ -13,46 +13,46 @@
33294
33295 struct qdio_perf_stats {
33296 /* interrupt handler calls */
33297 - atomic_long_t qdio_int;
33298 - atomic_long_t pci_int;
33299 - atomic_long_t thin_int;
33300 + atomic_long_unchecked_t qdio_int;
33301 + atomic_long_unchecked_t pci_int;
33302 + atomic_long_unchecked_t thin_int;
33303
33304 /* tasklet runs */
33305 - atomic_long_t tasklet_inbound;
33306 - atomic_long_t tasklet_outbound;
33307 - atomic_long_t tasklet_thinint;
33308 - atomic_long_t tasklet_thinint_loop;
33309 - atomic_long_t thinint_inbound;
33310 - atomic_long_t thinint_inbound_loop;
33311 - atomic_long_t thinint_inbound_loop2;
33312 + atomic_long_unchecked_t tasklet_inbound;
33313 + atomic_long_unchecked_t tasklet_outbound;
33314 + atomic_long_unchecked_t tasklet_thinint;
33315 + atomic_long_unchecked_t tasklet_thinint_loop;
33316 + atomic_long_unchecked_t thinint_inbound;
33317 + atomic_long_unchecked_t thinint_inbound_loop;
33318 + atomic_long_unchecked_t thinint_inbound_loop2;
33319
33320 /* signal adapter calls */
33321 - atomic_long_t siga_out;
33322 - atomic_long_t siga_in;
33323 - atomic_long_t siga_sync;
33324 + atomic_long_unchecked_t siga_out;
33325 + atomic_long_unchecked_t siga_in;
33326 + atomic_long_unchecked_t siga_sync;
33327
33328 /* misc */
33329 - atomic_long_t inbound_handler;
33330 - atomic_long_t outbound_handler;
33331 - atomic_long_t fast_requeue;
33332 - atomic_long_t outbound_target_full;
33333 + atomic_long_unchecked_t inbound_handler;
33334 + atomic_long_unchecked_t outbound_handler;
33335 + atomic_long_unchecked_t fast_requeue;
33336 + atomic_long_unchecked_t outbound_target_full;
33337
33338 /* for debugging */
33339 - atomic_long_t debug_tl_out_timer;
33340 - atomic_long_t debug_stop_polling;
33341 - atomic_long_t debug_eqbs_all;
33342 - atomic_long_t debug_eqbs_incomplete;
33343 - atomic_long_t debug_sqbs_all;
33344 - atomic_long_t debug_sqbs_incomplete;
33345 + atomic_long_unchecked_t debug_tl_out_timer;
33346 + atomic_long_unchecked_t debug_stop_polling;
33347 + atomic_long_unchecked_t debug_eqbs_all;
33348 + atomic_long_unchecked_t debug_eqbs_incomplete;
33349 + atomic_long_unchecked_t debug_sqbs_all;
33350 + atomic_long_unchecked_t debug_sqbs_incomplete;
33351 };
33352
33353 extern struct qdio_perf_stats perf_stats;
33354 extern int qdio_performance_stats;
33355
33356 -static inline void qdio_perf_stat_inc(atomic_long_t *count)
33357 +static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
33358 {
33359 if (qdio_performance_stats)
33360 - atomic_long_inc(count);
33361 + atomic_long_inc_unchecked(count);
33362 }
33363
33364 int qdio_setup_perf_stats(void);
33365 diff -urNp linux-2.6.32.41/drivers/scsi/aacraid/commctrl.c linux-2.6.32.41/drivers/scsi/aacraid/commctrl.c
33366 --- linux-2.6.32.41/drivers/scsi/aacraid/commctrl.c 2011-03-27 14:31:47.000000000 -0400
33367 +++ linux-2.6.32.41/drivers/scsi/aacraid/commctrl.c 2011-05-16 21:46:57.000000000 -0400
33368 @@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_d
33369 u32 actual_fibsize64, actual_fibsize = 0;
33370 int i;
33371
33372 + pax_track_stack();
33373
33374 if (dev->in_reset) {
33375 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
33376 diff -urNp linux-2.6.32.41/drivers/scsi/aic94xx/aic94xx_init.c linux-2.6.32.41/drivers/scsi/aic94xx/aic94xx_init.c
33377 --- linux-2.6.32.41/drivers/scsi/aic94xx/aic94xx_init.c 2011-03-27 14:31:47.000000000 -0400
33378 +++ linux-2.6.32.41/drivers/scsi/aic94xx/aic94xx_init.c 2011-04-17 15:56:46.000000000 -0400
33379 @@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(stru
33380 flash_error_table[i].reason);
33381 }
33382
33383 -static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
33384 +static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
33385 asd_show_update_bios, asd_store_update_bios);
33386
33387 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
33388 diff -urNp linux-2.6.32.41/drivers/scsi/BusLogic.c linux-2.6.32.41/drivers/scsi/BusLogic.c
33389 --- linux-2.6.32.41/drivers/scsi/BusLogic.c 2011-03-27 14:31:47.000000000 -0400
33390 +++ linux-2.6.32.41/drivers/scsi/BusLogic.c 2011-05-16 21:46:57.000000000 -0400
33391 @@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFla
33392 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
33393 *PrototypeHostAdapter)
33394 {
33395 + pax_track_stack();
33396 +
33397 /*
33398 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
33399 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
33400 diff -urNp linux-2.6.32.41/drivers/scsi/dpt_i2o.c linux-2.6.32.41/drivers/scsi/dpt_i2o.c
33401 --- linux-2.6.32.41/drivers/scsi/dpt_i2o.c 2011-03-27 14:31:47.000000000 -0400
33402 +++ linux-2.6.32.41/drivers/scsi/dpt_i2o.c 2011-05-16 21:46:57.000000000 -0400
33403 @@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* p
33404 dma_addr_t addr;
33405 ulong flags = 0;
33406
33407 + pax_track_stack();
33408 +
33409 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
33410 // get user msg size in u32s
33411 if(get_user(size, &user_msg[0])){
33412 @@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
33413 s32 rcode;
33414 dma_addr_t addr;
33415
33416 + pax_track_stack();
33417 +
33418 memset(msg, 0 , sizeof(msg));
33419 len = scsi_bufflen(cmd);
33420 direction = 0x00000000;
33421 diff -urNp linux-2.6.32.41/drivers/scsi/eata.c linux-2.6.32.41/drivers/scsi/eata.c
33422 --- linux-2.6.32.41/drivers/scsi/eata.c 2011-03-27 14:31:47.000000000 -0400
33423 +++ linux-2.6.32.41/drivers/scsi/eata.c 2011-05-16 21:46:57.000000000 -0400
33424 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
33425 struct hostdata *ha;
33426 char name[16];
33427
33428 + pax_track_stack();
33429 +
33430 sprintf(name, "%s%d", driver_name, j);
33431
33432 if (!request_region(port_base, REGION_SIZE, driver_name)) {
33433 diff -urNp linux-2.6.32.41/drivers/scsi/fcoe/libfcoe.c linux-2.6.32.41/drivers/scsi/fcoe/libfcoe.c
33434 --- linux-2.6.32.41/drivers/scsi/fcoe/libfcoe.c 2011-03-27 14:31:47.000000000 -0400
33435 +++ linux-2.6.32.41/drivers/scsi/fcoe/libfcoe.c 2011-05-16 21:46:57.000000000 -0400
33436 @@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fc
33437 size_t rlen;
33438 size_t dlen;
33439
33440 + pax_track_stack();
33441 +
33442 fiph = (struct fip_header *)skb->data;
33443 sub = fiph->fip_subcode;
33444 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
33445 diff -urNp linux-2.6.32.41/drivers/scsi/gdth.c linux-2.6.32.41/drivers/scsi/gdth.c
33446 --- linux-2.6.32.41/drivers/scsi/gdth.c 2011-03-27 14:31:47.000000000 -0400
33447 +++ linux-2.6.32.41/drivers/scsi/gdth.c 2011-05-16 21:46:57.000000000 -0400
33448 @@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
33449 ulong flags;
33450 gdth_ha_str *ha;
33451
33452 + pax_track_stack();
33453 +
33454 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
33455 return -EFAULT;
33456 ha = gdth_find_ha(ldrv.ionode);
33457 @@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg
33458 gdth_ha_str *ha;
33459 int rval;
33460
33461 + pax_track_stack();
33462 +
33463 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
33464 res.number >= MAX_HDRIVES)
33465 return -EFAULT;
33466 @@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg,
33467 gdth_ha_str *ha;
33468 int rval;
33469
33470 + pax_track_stack();
33471 +
33472 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
33473 return -EFAULT;
33474 ha = gdth_find_ha(gen.ionode);
33475 @@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
33476 int i;
33477 gdth_cmd_str gdtcmd;
33478 char cmnd[MAX_COMMAND_SIZE];
33479 +
33480 + pax_track_stack();
33481 +
33482 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
33483
33484 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
33485 diff -urNp linux-2.6.32.41/drivers/scsi/gdth_proc.c linux-2.6.32.41/drivers/scsi/gdth_proc.c
33486 --- linux-2.6.32.41/drivers/scsi/gdth_proc.c 2011-03-27 14:31:47.000000000 -0400
33487 +++ linux-2.6.32.41/drivers/scsi/gdth_proc.c 2011-05-16 21:46:57.000000000 -0400
33488 @@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi
33489 ulong64 paddr;
33490
33491 char cmnd[MAX_COMMAND_SIZE];
33492 +
33493 + pax_track_stack();
33494 +
33495 memset(cmnd, 0xff, 12);
33496 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
33497
33498 @@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,ch
33499 gdth_hget_str *phg;
33500 char cmnd[MAX_COMMAND_SIZE];
33501
33502 + pax_track_stack();
33503 +
33504 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
33505 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
33506 if (!gdtcmd || !estr)
33507 diff -urNp linux-2.6.32.41/drivers/scsi/hosts.c linux-2.6.32.41/drivers/scsi/hosts.c
33508 --- linux-2.6.32.41/drivers/scsi/hosts.c 2011-03-27 14:31:47.000000000 -0400
33509 +++ linux-2.6.32.41/drivers/scsi/hosts.c 2011-05-04 17:56:28.000000000 -0400
33510 @@ -40,7 +40,7 @@
33511 #include "scsi_logging.h"
33512
33513
33514 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
33515 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
33516
33517
33518 static void scsi_host_cls_release(struct device *dev)
33519 @@ -344,7 +344,7 @@ struct Scsi_Host *scsi_host_alloc(struct
33520 * subtract one because we increment first then return, but we need to
33521 * know what the next host number was before increment
33522 */
33523 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
33524 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
33525 shost->dma_channel = 0xff;
33526
33527 /* These three are default values which can be overridden */
33528 diff -urNp linux-2.6.32.41/drivers/scsi/ipr.c linux-2.6.32.41/drivers/scsi/ipr.c
33529 --- linux-2.6.32.41/drivers/scsi/ipr.c 2011-03-27 14:31:47.000000000 -0400
33530 +++ linux-2.6.32.41/drivers/scsi/ipr.c 2011-04-17 15:56:46.000000000 -0400
33531 @@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_q
33532 return true;
33533 }
33534
33535 -static struct ata_port_operations ipr_sata_ops = {
33536 +static const struct ata_port_operations ipr_sata_ops = {
33537 .phy_reset = ipr_ata_phy_reset,
33538 .hardreset = ipr_sata_reset,
33539 .post_internal_cmd = ipr_ata_post_internal,
33540 diff -urNp linux-2.6.32.41/drivers/scsi/libfc/fc_exch.c linux-2.6.32.41/drivers/scsi/libfc/fc_exch.c
33541 --- linux-2.6.32.41/drivers/scsi/libfc/fc_exch.c 2011-03-27 14:31:47.000000000 -0400
33542 +++ linux-2.6.32.41/drivers/scsi/libfc/fc_exch.c 2011-04-17 15:56:46.000000000 -0400
33543 @@ -86,12 +86,12 @@ struct fc_exch_mgr {
33544 * all together if not used XXX
33545 */
33546 struct {
33547 - atomic_t no_free_exch;
33548 - atomic_t no_free_exch_xid;
33549 - atomic_t xid_not_found;
33550 - atomic_t xid_busy;
33551 - atomic_t seq_not_found;
33552 - atomic_t non_bls_resp;
33553 + atomic_unchecked_t no_free_exch;
33554 + atomic_unchecked_t no_free_exch_xid;
33555 + atomic_unchecked_t xid_not_found;
33556 + atomic_unchecked_t xid_busy;
33557 + atomic_unchecked_t seq_not_found;
33558 + atomic_unchecked_t non_bls_resp;
33559 } stats;
33560 };
33561 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
33562 @@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(
33563 /* allocate memory for exchange */
33564 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
33565 if (!ep) {
33566 - atomic_inc(&mp->stats.no_free_exch);
33567 + atomic_inc_unchecked(&mp->stats.no_free_exch);
33568 goto out;
33569 }
33570 memset(ep, 0, sizeof(*ep));
33571 @@ -557,7 +557,7 @@ out:
33572 return ep;
33573 err:
33574 spin_unlock_bh(&pool->lock);
33575 - atomic_inc(&mp->stats.no_free_exch_xid);
33576 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
33577 mempool_free(ep, mp->ep_pool);
33578 return NULL;
33579 }
33580 @@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33581 xid = ntohs(fh->fh_ox_id); /* we originated exch */
33582 ep = fc_exch_find(mp, xid);
33583 if (!ep) {
33584 - atomic_inc(&mp->stats.xid_not_found);
33585 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33586 reject = FC_RJT_OX_ID;
33587 goto out;
33588 }
33589 @@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33590 ep = fc_exch_find(mp, xid);
33591 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
33592 if (ep) {
33593 - atomic_inc(&mp->stats.xid_busy);
33594 + atomic_inc_unchecked(&mp->stats.xid_busy);
33595 reject = FC_RJT_RX_ID;
33596 goto rel;
33597 }
33598 @@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33599 }
33600 xid = ep->xid; /* get our XID */
33601 } else if (!ep) {
33602 - atomic_inc(&mp->stats.xid_not_found);
33603 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33604 reject = FC_RJT_RX_ID; /* XID not found */
33605 goto out;
33606 }
33607 @@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33608 } else {
33609 sp = &ep->seq;
33610 if (sp->id != fh->fh_seq_id) {
33611 - atomic_inc(&mp->stats.seq_not_found);
33612 + atomic_inc_unchecked(&mp->stats.seq_not_found);
33613 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
33614 goto rel;
33615 }
33616 @@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct
33617
33618 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
33619 if (!ep) {
33620 - atomic_inc(&mp->stats.xid_not_found);
33621 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33622 goto out;
33623 }
33624 if (ep->esb_stat & ESB_ST_COMPLETE) {
33625 - atomic_inc(&mp->stats.xid_not_found);
33626 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33627 goto out;
33628 }
33629 if (ep->rxid == FC_XID_UNKNOWN)
33630 ep->rxid = ntohs(fh->fh_rx_id);
33631 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
33632 - atomic_inc(&mp->stats.xid_not_found);
33633 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33634 goto rel;
33635 }
33636 if (ep->did != ntoh24(fh->fh_s_id) &&
33637 ep->did != FC_FID_FLOGI) {
33638 - atomic_inc(&mp->stats.xid_not_found);
33639 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33640 goto rel;
33641 }
33642 sof = fr_sof(fp);
33643 @@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct
33644 } else {
33645 sp = &ep->seq;
33646 if (sp->id != fh->fh_seq_id) {
33647 - atomic_inc(&mp->stats.seq_not_found);
33648 + atomic_inc_unchecked(&mp->stats.seq_not_found);
33649 goto rel;
33650 }
33651 }
33652 @@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_
33653 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
33654
33655 if (!sp)
33656 - atomic_inc(&mp->stats.xid_not_found);
33657 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33658 else
33659 - atomic_inc(&mp->stats.non_bls_resp);
33660 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
33661
33662 fc_frame_free(fp);
33663 }
33664 diff -urNp linux-2.6.32.41/drivers/scsi/libsas/sas_ata.c linux-2.6.32.41/drivers/scsi/libsas/sas_ata.c
33665 --- linux-2.6.32.41/drivers/scsi/libsas/sas_ata.c 2011-03-27 14:31:47.000000000 -0400
33666 +++ linux-2.6.32.41/drivers/scsi/libsas/sas_ata.c 2011-04-23 12:56:11.000000000 -0400
33667 @@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_l
33668 }
33669 }
33670
33671 -static struct ata_port_operations sas_sata_ops = {
33672 +static const struct ata_port_operations sas_sata_ops = {
33673 .phy_reset = sas_ata_phy_reset,
33674 .post_internal_cmd = sas_ata_post_internal,
33675 .qc_defer = ata_std_qc_defer,
33676 diff -urNp linux-2.6.32.41/drivers/scsi/lpfc/lpfc_debugfs.c linux-2.6.32.41/drivers/scsi/lpfc/lpfc_debugfs.c
33677 --- linux-2.6.32.41/drivers/scsi/lpfc/lpfc_debugfs.c 2011-03-27 14:31:47.000000000 -0400
33678 +++ linux-2.6.32.41/drivers/scsi/lpfc/lpfc_debugfs.c 2011-05-16 21:46:57.000000000 -0400
33679 @@ -124,7 +124,7 @@ struct lpfc_debug {
33680 int len;
33681 };
33682
33683 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
33684 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
33685 static unsigned long lpfc_debugfs_start_time = 0L;
33686
33687 /**
33688 @@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
33689 lpfc_debugfs_enable = 0;
33690
33691 len = 0;
33692 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
33693 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
33694 (lpfc_debugfs_max_disc_trc - 1);
33695 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
33696 dtp = vport->disc_trc + i;
33697 @@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
33698 lpfc_debugfs_enable = 0;
33699
33700 len = 0;
33701 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
33702 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
33703 (lpfc_debugfs_max_slow_ring_trc - 1);
33704 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
33705 dtp = phba->slow_ring_trc + i;
33706 @@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
33707 uint32_t *ptr;
33708 char buffer[1024];
33709
33710 + pax_track_stack();
33711 +
33712 off = 0;
33713 spin_lock_irq(&phba->hbalock);
33714
33715 @@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
33716 !vport || !vport->disc_trc)
33717 return;
33718
33719 - index = atomic_inc_return(&vport->disc_trc_cnt) &
33720 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
33721 (lpfc_debugfs_max_disc_trc - 1);
33722 dtp = vport->disc_trc + index;
33723 dtp->fmt = fmt;
33724 dtp->data1 = data1;
33725 dtp->data2 = data2;
33726 dtp->data3 = data3;
33727 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
33728 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
33729 dtp->jif = jiffies;
33730 #endif
33731 return;
33732 @@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
33733 !phba || !phba->slow_ring_trc)
33734 return;
33735
33736 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
33737 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
33738 (lpfc_debugfs_max_slow_ring_trc - 1);
33739 dtp = phba->slow_ring_trc + index;
33740 dtp->fmt = fmt;
33741 dtp->data1 = data1;
33742 dtp->data2 = data2;
33743 dtp->data3 = data3;
33744 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
33745 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
33746 dtp->jif = jiffies;
33747 #endif
33748 return;
33749 @@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
33750 "slow_ring buffer\n");
33751 goto debug_failed;
33752 }
33753 - atomic_set(&phba->slow_ring_trc_cnt, 0);
33754 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
33755 memset(phba->slow_ring_trc, 0,
33756 (sizeof(struct lpfc_debugfs_trc) *
33757 lpfc_debugfs_max_slow_ring_trc));
33758 @@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
33759 "buffer\n");
33760 goto debug_failed;
33761 }
33762 - atomic_set(&vport->disc_trc_cnt, 0);
33763 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
33764
33765 snprintf(name, sizeof(name), "discovery_trace");
33766 vport->debug_disc_trc =
33767 diff -urNp linux-2.6.32.41/drivers/scsi/lpfc/lpfc.h linux-2.6.32.41/drivers/scsi/lpfc/lpfc.h
33768 --- linux-2.6.32.41/drivers/scsi/lpfc/lpfc.h 2011-03-27 14:31:47.000000000 -0400
33769 +++ linux-2.6.32.41/drivers/scsi/lpfc/lpfc.h 2011-05-04 17:56:28.000000000 -0400
33770 @@ -400,7 +400,7 @@ struct lpfc_vport {
33771 struct dentry *debug_nodelist;
33772 struct dentry *vport_debugfs_root;
33773 struct lpfc_debugfs_trc *disc_trc;
33774 - atomic_t disc_trc_cnt;
33775 + atomic_unchecked_t disc_trc_cnt;
33776 #endif
33777 uint8_t stat_data_enabled;
33778 uint8_t stat_data_blocked;
33779 @@ -725,8 +725,8 @@ struct lpfc_hba {
33780 struct timer_list fabric_block_timer;
33781 unsigned long bit_flags;
33782 #define FABRIC_COMANDS_BLOCKED 0
33783 - atomic_t num_rsrc_err;
33784 - atomic_t num_cmd_success;
33785 + atomic_unchecked_t num_rsrc_err;
33786 + atomic_unchecked_t num_cmd_success;
33787 unsigned long last_rsrc_error_time;
33788 unsigned long last_ramp_down_time;
33789 unsigned long last_ramp_up_time;
33790 @@ -740,7 +740,7 @@ struct lpfc_hba {
33791 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
33792 struct dentry *debug_slow_ring_trc;
33793 struct lpfc_debugfs_trc *slow_ring_trc;
33794 - atomic_t slow_ring_trc_cnt;
33795 + atomic_unchecked_t slow_ring_trc_cnt;
33796 #endif
33797
33798 /* Used for deferred freeing of ELS data buffers */
33799 diff -urNp linux-2.6.32.41/drivers/scsi/lpfc/lpfc_scsi.c linux-2.6.32.41/drivers/scsi/lpfc/lpfc_scsi.c
33800 --- linux-2.6.32.41/drivers/scsi/lpfc/lpfc_scsi.c 2011-03-27 14:31:47.000000000 -0400
33801 +++ linux-2.6.32.41/drivers/scsi/lpfc/lpfc_scsi.c 2011-05-04 17:56:28.000000000 -0400
33802 @@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
33803 uint32_t evt_posted;
33804
33805 spin_lock_irqsave(&phba->hbalock, flags);
33806 - atomic_inc(&phba->num_rsrc_err);
33807 + atomic_inc_unchecked(&phba->num_rsrc_err);
33808 phba->last_rsrc_error_time = jiffies;
33809
33810 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
33811 @@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
33812 unsigned long flags;
33813 struct lpfc_hba *phba = vport->phba;
33814 uint32_t evt_posted;
33815 - atomic_inc(&phba->num_cmd_success);
33816 + atomic_inc_unchecked(&phba->num_cmd_success);
33817
33818 if (vport->cfg_lun_queue_depth <= queue_depth)
33819 return;
33820 @@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
33821 int i;
33822 struct lpfc_rport_data *rdata;
33823
33824 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
33825 - num_cmd_success = atomic_read(&phba->num_cmd_success);
33826 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
33827 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
33828
33829 vports = lpfc_create_vport_work_array(phba);
33830 if (vports != NULL)
33831 @@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
33832 }
33833 }
33834 lpfc_destroy_vport_work_array(phba, vports);
33835 - atomic_set(&phba->num_rsrc_err, 0);
33836 - atomic_set(&phba->num_cmd_success, 0);
33837 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
33838 + atomic_set_unchecked(&phba->num_cmd_success, 0);
33839 }
33840
33841 /**
33842 @@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
33843 }
33844 }
33845 lpfc_destroy_vport_work_array(phba, vports);
33846 - atomic_set(&phba->num_rsrc_err, 0);
33847 - atomic_set(&phba->num_cmd_success, 0);
33848 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
33849 + atomic_set_unchecked(&phba->num_cmd_success, 0);
33850 }
33851
33852 /**
33853 diff -urNp linux-2.6.32.41/drivers/scsi/megaraid/megaraid_mbox.c linux-2.6.32.41/drivers/scsi/megaraid/megaraid_mbox.c
33854 --- linux-2.6.32.41/drivers/scsi/megaraid/megaraid_mbox.c 2011-03-27 14:31:47.000000000 -0400
33855 +++ linux-2.6.32.41/drivers/scsi/megaraid/megaraid_mbox.c 2011-05-16 21:46:57.000000000 -0400
33856 @@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
33857 int rval;
33858 int i;
33859
33860 + pax_track_stack();
33861 +
33862 // Allocate memory for the base list of scb for management module.
33863 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
33864
33865 diff -urNp linux-2.6.32.41/drivers/scsi/osd/osd_initiator.c linux-2.6.32.41/drivers/scsi/osd/osd_initiator.c
33866 --- linux-2.6.32.41/drivers/scsi/osd/osd_initiator.c 2011-03-27 14:31:47.000000000 -0400
33867 +++ linux-2.6.32.41/drivers/scsi/osd/osd_initiator.c 2011-05-16 21:46:57.000000000 -0400
33868 @@ -94,6 +94,8 @@ static int _osd_print_system_info(struct
33869 int nelem = ARRAY_SIZE(get_attrs), a = 0;
33870 int ret;
33871
33872 + pax_track_stack();
33873 +
33874 or = osd_start_request(od, GFP_KERNEL);
33875 if (!or)
33876 return -ENOMEM;
33877 diff -urNp linux-2.6.32.41/drivers/scsi/pmcraid.c linux-2.6.32.41/drivers/scsi/pmcraid.c
33878 --- linux-2.6.32.41/drivers/scsi/pmcraid.c 2011-05-10 22:12:01.000000000 -0400
33879 +++ linux-2.6.32.41/drivers/scsi/pmcraid.c 2011-05-10 22:12:33.000000000 -0400
33880 @@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct sc
33881 res->scsi_dev = scsi_dev;
33882 scsi_dev->hostdata = res;
33883 res->change_detected = 0;
33884 - atomic_set(&res->read_failures, 0);
33885 - atomic_set(&res->write_failures, 0);
33886 + atomic_set_unchecked(&res->read_failures, 0);
33887 + atomic_set_unchecked(&res->write_failures, 0);
33888 rc = 0;
33889 }
33890 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
33891 @@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct
33892
33893 /* If this was a SCSI read/write command keep count of errors */
33894 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
33895 - atomic_inc(&res->read_failures);
33896 + atomic_inc_unchecked(&res->read_failures);
33897 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
33898 - atomic_inc(&res->write_failures);
33899 + atomic_inc_unchecked(&res->write_failures);
33900
33901 if (!RES_IS_GSCSI(res->cfg_entry) &&
33902 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
33903 @@ -4113,7 +4113,7 @@ static void pmcraid_worker_function(stru
33904
33905 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
33906 /* add resources only after host is added into system */
33907 - if (!atomic_read(&pinstance->expose_resources))
33908 + if (!atomic_read_unchecked(&pinstance->expose_resources))
33909 return;
33910
33911 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
33912 @@ -4847,7 +4847,7 @@ static int __devinit pmcraid_init_instan
33913 init_waitqueue_head(&pinstance->reset_wait_q);
33914
33915 atomic_set(&pinstance->outstanding_cmds, 0);
33916 - atomic_set(&pinstance->expose_resources, 0);
33917 + atomic_set_unchecked(&pinstance->expose_resources, 0);
33918
33919 INIT_LIST_HEAD(&pinstance->free_res_q);
33920 INIT_LIST_HEAD(&pinstance->used_res_q);
33921 @@ -5499,7 +5499,7 @@ static int __devinit pmcraid_probe(
33922 /* Schedule worker thread to handle CCN and take care of adding and
33923 * removing devices to OS
33924 */
33925 - atomic_set(&pinstance->expose_resources, 1);
33926 + atomic_set_unchecked(&pinstance->expose_resources, 1);
33927 schedule_work(&pinstance->worker_q);
33928 return rc;
33929
33930 diff -urNp linux-2.6.32.41/drivers/scsi/pmcraid.h linux-2.6.32.41/drivers/scsi/pmcraid.h
33931 --- linux-2.6.32.41/drivers/scsi/pmcraid.h 2011-03-27 14:31:47.000000000 -0400
33932 +++ linux-2.6.32.41/drivers/scsi/pmcraid.h 2011-05-04 17:56:28.000000000 -0400
33933 @@ -690,7 +690,7 @@ struct pmcraid_instance {
33934 atomic_t outstanding_cmds;
33935
33936 /* should add/delete resources to mid-layer now ?*/
33937 - atomic_t expose_resources;
33938 + atomic_unchecked_t expose_resources;
33939
33940 /* Tasklet to handle deferred processing */
33941 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
33942 @@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
33943 struct list_head queue; /* link to "to be exposed" resources */
33944 struct pmcraid_config_table_entry cfg_entry;
33945 struct scsi_device *scsi_dev; /* Link scsi_device structure */
33946 - atomic_t read_failures; /* count of failed READ commands */
33947 - atomic_t write_failures; /* count of failed WRITE commands */
33948 + atomic_unchecked_t read_failures; /* count of failed READ commands */
33949 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
33950
33951 /* To indicate add/delete/modify during CCN */
33952 u8 change_detected;
33953 diff -urNp linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_def.h linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_def.h
33954 --- linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_def.h 2011-03-27 14:31:47.000000000 -0400
33955 +++ linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_def.h 2011-05-04 17:56:28.000000000 -0400
33956 @@ -240,7 +240,7 @@ struct ddb_entry {
33957 atomic_t retry_relogin_timer; /* Min Time between relogins
33958 * (4000 only) */
33959 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
33960 - atomic_t relogin_retry_count; /* Num of times relogin has been
33961 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
33962 * retried */
33963
33964 uint16_t port;
33965 diff -urNp linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_init.c linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_init.c
33966 --- linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_init.c 2011-03-27 14:31:47.000000000 -0400
33967 +++ linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_init.c 2011-05-04 17:56:28.000000000 -0400
33968 @@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_
33969 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
33970 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
33971 atomic_set(&ddb_entry->relogin_timer, 0);
33972 - atomic_set(&ddb_entry->relogin_retry_count, 0);
33973 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
33974 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
33975 list_add_tail(&ddb_entry->list, &ha->ddb_list);
33976 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
33977 @@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct s
33978 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
33979 atomic_set(&ddb_entry->port_down_timer,
33980 ha->port_down_retry_count);
33981 - atomic_set(&ddb_entry->relogin_retry_count, 0);
33982 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
33983 atomic_set(&ddb_entry->relogin_timer, 0);
33984 clear_bit(DF_RELOGIN, &ddb_entry->flags);
33985 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
33986 diff -urNp linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_os.c linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_os.c
33987 --- linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_os.c 2011-03-27 14:31:47.000000000 -0400
33988 +++ linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_os.c 2011-05-04 17:56:28.000000000 -0400
33989 @@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_ql
33990 ddb_entry->fw_ddb_device_state ==
33991 DDB_DS_SESSION_FAILED) {
33992 /* Reset retry relogin timer */
33993 - atomic_inc(&ddb_entry->relogin_retry_count);
33994 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
33995 DEBUG2(printk("scsi%ld: index[%d] relogin"
33996 " timed out-retrying"
33997 " relogin (%d)\n",
33998 ha->host_no,
33999 ddb_entry->fw_ddb_index,
34000 - atomic_read(&ddb_entry->
34001 + atomic_read_unchecked(&ddb_entry->
34002 relogin_retry_count))
34003 );
34004 start_dpc++;
34005 diff -urNp linux-2.6.32.41/drivers/scsi/scsi.c linux-2.6.32.41/drivers/scsi/scsi.c
34006 --- linux-2.6.32.41/drivers/scsi/scsi.c 2011-03-27 14:31:47.000000000 -0400
34007 +++ linux-2.6.32.41/drivers/scsi/scsi.c 2011-05-04 17:56:28.000000000 -0400
34008 @@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
34009 unsigned long timeout;
34010 int rtn = 0;
34011
34012 - atomic_inc(&cmd->device->iorequest_cnt);
34013 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34014
34015 /* check if the device is still usable */
34016 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
34017 diff -urNp linux-2.6.32.41/drivers/scsi/scsi_debug.c linux-2.6.32.41/drivers/scsi/scsi_debug.c
34018 --- linux-2.6.32.41/drivers/scsi/scsi_debug.c 2011-03-27 14:31:47.000000000 -0400
34019 +++ linux-2.6.32.41/drivers/scsi/scsi_debug.c 2011-05-16 21:46:57.000000000 -0400
34020 @@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_
34021 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
34022 unsigned char *cmd = (unsigned char *)scp->cmnd;
34023
34024 + pax_track_stack();
34025 +
34026 if ((errsts = check_readiness(scp, 1, devip)))
34027 return errsts;
34028 memset(arr, 0, sizeof(arr));
34029 @@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cm
34030 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
34031 unsigned char *cmd = (unsigned char *)scp->cmnd;
34032
34033 + pax_track_stack();
34034 +
34035 if ((errsts = check_readiness(scp, 1, devip)))
34036 return errsts;
34037 memset(arr, 0, sizeof(arr));
34038 diff -urNp linux-2.6.32.41/drivers/scsi/scsi_lib.c linux-2.6.32.41/drivers/scsi/scsi_lib.c
34039 --- linux-2.6.32.41/drivers/scsi/scsi_lib.c 2011-05-10 22:12:01.000000000 -0400
34040 +++ linux-2.6.32.41/drivers/scsi/scsi_lib.c 2011-05-10 22:12:33.000000000 -0400
34041 @@ -1384,7 +1384,7 @@ static void scsi_kill_request(struct req
34042
34043 scsi_init_cmd_errh(cmd);
34044 cmd->result = DID_NO_CONNECT << 16;
34045 - atomic_inc(&cmd->device->iorequest_cnt);
34046 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34047
34048 /*
34049 * SCSI request completion path will do scsi_device_unbusy(),
34050 @@ -1415,9 +1415,9 @@ static void scsi_softirq_done(struct req
34051 */
34052 cmd->serial_number = 0;
34053
34054 - atomic_inc(&cmd->device->iodone_cnt);
34055 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
34056 if (cmd->result)
34057 - atomic_inc(&cmd->device->ioerr_cnt);
34058 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
34059
34060 disposition = scsi_decide_disposition(cmd);
34061 if (disposition != SUCCESS &&
34062 diff -urNp linux-2.6.32.41/drivers/scsi/scsi_sysfs.c linux-2.6.32.41/drivers/scsi/scsi_sysfs.c
34063 --- linux-2.6.32.41/drivers/scsi/scsi_sysfs.c 2011-05-10 22:12:01.000000000 -0400
34064 +++ linux-2.6.32.41/drivers/scsi/scsi_sysfs.c 2011-05-10 22:12:33.000000000 -0400
34065 @@ -661,7 +661,7 @@ show_iostat_##field(struct device *dev,
34066 char *buf) \
34067 { \
34068 struct scsi_device *sdev = to_scsi_device(dev); \
34069 - unsigned long long count = atomic_read(&sdev->field); \
34070 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
34071 return snprintf(buf, 20, "0x%llx\n", count); \
34072 } \
34073 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
34074 diff -urNp linux-2.6.32.41/drivers/scsi/scsi_transport_fc.c linux-2.6.32.41/drivers/scsi/scsi_transport_fc.c
34075 --- linux-2.6.32.41/drivers/scsi/scsi_transport_fc.c 2011-03-27 14:31:47.000000000 -0400
34076 +++ linux-2.6.32.41/drivers/scsi/scsi_transport_fc.c 2011-05-04 17:56:28.000000000 -0400
34077 @@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
34078 * Netlink Infrastructure
34079 */
34080
34081 -static atomic_t fc_event_seq;
34082 +static atomic_unchecked_t fc_event_seq;
34083
34084 /**
34085 * fc_get_event_number - Obtain the next sequential FC event number
34086 @@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
34087 u32
34088 fc_get_event_number(void)
34089 {
34090 - return atomic_add_return(1, &fc_event_seq);
34091 + return atomic_add_return_unchecked(1, &fc_event_seq);
34092 }
34093 EXPORT_SYMBOL(fc_get_event_number);
34094
34095 @@ -641,7 +641,7 @@ static __init int fc_transport_init(void
34096 {
34097 int error;
34098
34099 - atomic_set(&fc_event_seq, 0);
34100 + atomic_set_unchecked(&fc_event_seq, 0);
34101
34102 error = transport_class_register(&fc_host_class);
34103 if (error)
34104 diff -urNp linux-2.6.32.41/drivers/scsi/scsi_transport_iscsi.c linux-2.6.32.41/drivers/scsi/scsi_transport_iscsi.c
34105 --- linux-2.6.32.41/drivers/scsi/scsi_transport_iscsi.c 2011-03-27 14:31:47.000000000 -0400
34106 +++ linux-2.6.32.41/drivers/scsi/scsi_transport_iscsi.c 2011-05-04 17:56:28.000000000 -0400
34107 @@ -81,7 +81,7 @@ struct iscsi_internal {
34108 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
34109 };
34110
34111 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
34112 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
34113 static struct workqueue_struct *iscsi_eh_timer_workq;
34114
34115 /*
34116 @@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_s
34117 int err;
34118
34119 ihost = shost->shost_data;
34120 - session->sid = atomic_add_return(1, &iscsi_session_nr);
34121 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
34122
34123 if (id == ISCSI_MAX_TARGET) {
34124 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
34125 @@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(v
34126 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
34127 ISCSI_TRANSPORT_VERSION);
34128
34129 - atomic_set(&iscsi_session_nr, 0);
34130 + atomic_set_unchecked(&iscsi_session_nr, 0);
34131
34132 err = class_register(&iscsi_transport_class);
34133 if (err)
34134 diff -urNp linux-2.6.32.41/drivers/scsi/scsi_transport_srp.c linux-2.6.32.41/drivers/scsi/scsi_transport_srp.c
34135 --- linux-2.6.32.41/drivers/scsi/scsi_transport_srp.c 2011-03-27 14:31:47.000000000 -0400
34136 +++ linux-2.6.32.41/drivers/scsi/scsi_transport_srp.c 2011-05-04 17:56:28.000000000 -0400
34137 @@ -33,7 +33,7 @@
34138 #include "scsi_transport_srp_internal.h"
34139
34140 struct srp_host_attrs {
34141 - atomic_t next_port_id;
34142 + atomic_unchecked_t next_port_id;
34143 };
34144 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
34145
34146 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
34147 struct Scsi_Host *shost = dev_to_shost(dev);
34148 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
34149
34150 - atomic_set(&srp_host->next_port_id, 0);
34151 + atomic_set_unchecked(&srp_host->next_port_id, 0);
34152 return 0;
34153 }
34154
34155 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
34156 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
34157 rport->roles = ids->roles;
34158
34159 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
34160 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
34161 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
34162
34163 transport_setup_device(&rport->dev);
34164 diff -urNp linux-2.6.32.41/drivers/scsi/sg.c linux-2.6.32.41/drivers/scsi/sg.c
34165 --- linux-2.6.32.41/drivers/scsi/sg.c 2011-03-27 14:31:47.000000000 -0400
34166 +++ linux-2.6.32.41/drivers/scsi/sg.c 2011-04-17 15:56:46.000000000 -0400
34167 @@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
34168 const struct file_operations * fops;
34169 };
34170
34171 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
34172 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
34173 {"allow_dio", &adio_fops},
34174 {"debug", &debug_fops},
34175 {"def_reserved_size", &dressz_fops},
34176 @@ -2307,7 +2307,7 @@ sg_proc_init(void)
34177 {
34178 int k, mask;
34179 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
34180 - struct sg_proc_leaf * leaf;
34181 + const struct sg_proc_leaf * leaf;
34182
34183 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
34184 if (!sg_proc_sgp)
34185 diff -urNp linux-2.6.32.41/drivers/scsi/sym53c8xx_2/sym_glue.c linux-2.6.32.41/drivers/scsi/sym53c8xx_2/sym_glue.c
34186 --- linux-2.6.32.41/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-03-27 14:31:47.000000000 -0400
34187 +++ linux-2.6.32.41/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-05-16 21:46:57.000000000 -0400
34188 @@ -1754,6 +1754,8 @@ static int __devinit sym2_probe(struct p
34189 int do_iounmap = 0;
34190 int do_disable_device = 1;
34191
34192 + pax_track_stack();
34193 +
34194 memset(&sym_dev, 0, sizeof(sym_dev));
34195 memset(&nvram, 0, sizeof(nvram));
34196 sym_dev.pdev = pdev;
34197 diff -urNp linux-2.6.32.41/drivers/serial/kgdboc.c linux-2.6.32.41/drivers/serial/kgdboc.c
34198 --- linux-2.6.32.41/drivers/serial/kgdboc.c 2011-03-27 14:31:47.000000000 -0400
34199 +++ linux-2.6.32.41/drivers/serial/kgdboc.c 2011-04-17 15:56:46.000000000 -0400
34200 @@ -18,7 +18,7 @@
34201
34202 #define MAX_CONFIG_LEN 40
34203
34204 -static struct kgdb_io kgdboc_io_ops;
34205 +static const struct kgdb_io kgdboc_io_ops;
34206
34207 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
34208 static int configured = -1;
34209 @@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void
34210 module_put(THIS_MODULE);
34211 }
34212
34213 -static struct kgdb_io kgdboc_io_ops = {
34214 +static const struct kgdb_io kgdboc_io_ops = {
34215 .name = "kgdboc",
34216 .read_char = kgdboc_get_char,
34217 .write_char = kgdboc_put_char,
34218 diff -urNp linux-2.6.32.41/drivers/spi/spi.c linux-2.6.32.41/drivers/spi/spi.c
34219 --- linux-2.6.32.41/drivers/spi/spi.c 2011-03-27 14:31:47.000000000 -0400
34220 +++ linux-2.6.32.41/drivers/spi/spi.c 2011-05-04 17:56:28.000000000 -0400
34221 @@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, str
34222 EXPORT_SYMBOL_GPL(spi_sync);
34223
34224 /* portable code must never pass more than 32 bytes */
34225 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
34226 +#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
34227
34228 static u8 *buf;
34229
34230 diff -urNp linux-2.6.32.41/drivers/staging/android/binder.c linux-2.6.32.41/drivers/staging/android/binder.c
34231 --- linux-2.6.32.41/drivers/staging/android/binder.c 2011-03-27 14:31:47.000000000 -0400
34232 +++ linux-2.6.32.41/drivers/staging/android/binder.c 2011-04-17 15:56:46.000000000 -0400
34233 @@ -2756,7 +2756,7 @@ static void binder_vma_close(struct vm_a
34234 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
34235 }
34236
34237 -static struct vm_operations_struct binder_vm_ops = {
34238 +static const struct vm_operations_struct binder_vm_ops = {
34239 .open = binder_vma_open,
34240 .close = binder_vma_close,
34241 };
34242 diff -urNp linux-2.6.32.41/drivers/staging/b3dfg/b3dfg.c linux-2.6.32.41/drivers/staging/b3dfg/b3dfg.c
34243 --- linux-2.6.32.41/drivers/staging/b3dfg/b3dfg.c 2011-03-27 14:31:47.000000000 -0400
34244 +++ linux-2.6.32.41/drivers/staging/b3dfg/b3dfg.c 2011-04-17 15:56:46.000000000 -0400
34245 @@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_are
34246 return VM_FAULT_NOPAGE;
34247 }
34248
34249 -static struct vm_operations_struct b3dfg_vm_ops = {
34250 +static const struct vm_operations_struct b3dfg_vm_ops = {
34251 .fault = b3dfg_vma_fault,
34252 };
34253
34254 @@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp,
34255 return r;
34256 }
34257
34258 -static struct file_operations b3dfg_fops = {
34259 +static const struct file_operations b3dfg_fops = {
34260 .owner = THIS_MODULE,
34261 .open = b3dfg_open,
34262 .release = b3dfg_release,
34263 diff -urNp linux-2.6.32.41/drivers/staging/comedi/comedi_fops.c linux-2.6.32.41/drivers/staging/comedi/comedi_fops.c
34264 --- linux-2.6.32.41/drivers/staging/comedi/comedi_fops.c 2011-03-27 14:31:47.000000000 -0400
34265 +++ linux-2.6.32.41/drivers/staging/comedi/comedi_fops.c 2011-04-17 15:56:46.000000000 -0400
34266 @@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct
34267 mutex_unlock(&dev->mutex);
34268 }
34269
34270 -static struct vm_operations_struct comedi_vm_ops = {
34271 +static const struct vm_operations_struct comedi_vm_ops = {
34272 .close = comedi_unmap,
34273 };
34274
34275 diff -urNp linux-2.6.32.41/drivers/staging/dream/qdsp5/adsp_driver.c linux-2.6.32.41/drivers/staging/dream/qdsp5/adsp_driver.c
34276 --- linux-2.6.32.41/drivers/staging/dream/qdsp5/adsp_driver.c 2011-03-27 14:31:47.000000000 -0400
34277 +++ linux-2.6.32.41/drivers/staging/dream/qdsp5/adsp_driver.c 2011-04-17 15:56:46.000000000 -0400
34278 @@ -576,7 +576,7 @@ static struct adsp_device *inode_to_devi
34279 static dev_t adsp_devno;
34280 static struct class *adsp_class;
34281
34282 -static struct file_operations adsp_fops = {
34283 +static const struct file_operations adsp_fops = {
34284 .owner = THIS_MODULE,
34285 .open = adsp_open,
34286 .unlocked_ioctl = adsp_ioctl,
34287 diff -urNp linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_aac.c linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_aac.c
34288 --- linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_aac.c 2011-03-27 14:31:47.000000000 -0400
34289 +++ linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_aac.c 2011-04-17 15:56:46.000000000 -0400
34290 @@ -1022,7 +1022,7 @@ done:
34291 return rc;
34292 }
34293
34294 -static struct file_operations audio_aac_fops = {
34295 +static const struct file_operations audio_aac_fops = {
34296 .owner = THIS_MODULE,
34297 .open = audio_open,
34298 .release = audio_release,
34299 diff -urNp linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_amrnb.c linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_amrnb.c
34300 --- linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-03-27 14:31:47.000000000 -0400
34301 +++ linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-04-17 15:56:46.000000000 -0400
34302 @@ -833,7 +833,7 @@ done:
34303 return rc;
34304 }
34305
34306 -static struct file_operations audio_amrnb_fops = {
34307 +static const struct file_operations audio_amrnb_fops = {
34308 .owner = THIS_MODULE,
34309 .open = audamrnb_open,
34310 .release = audamrnb_release,
34311 diff -urNp linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_evrc.c linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_evrc.c
34312 --- linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_evrc.c 2011-03-27 14:31:47.000000000 -0400
34313 +++ linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_evrc.c 2011-04-17 15:56:46.000000000 -0400
34314 @@ -805,7 +805,7 @@ dma_fail:
34315 return rc;
34316 }
34317
34318 -static struct file_operations audio_evrc_fops = {
34319 +static const struct file_operations audio_evrc_fops = {
34320 .owner = THIS_MODULE,
34321 .open = audevrc_open,
34322 .release = audevrc_release,
34323 diff -urNp linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_in.c linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_in.c
34324 --- linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_in.c 2011-03-27 14:31:47.000000000 -0400
34325 +++ linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_in.c 2011-04-17 15:56:46.000000000 -0400
34326 @@ -913,7 +913,7 @@ static int audpre_open(struct inode *ino
34327 return 0;
34328 }
34329
34330 -static struct file_operations audio_fops = {
34331 +static const struct file_operations audio_fops = {
34332 .owner = THIS_MODULE,
34333 .open = audio_in_open,
34334 .release = audio_in_release,
34335 @@ -922,7 +922,7 @@ static struct file_operations audio_fops
34336 .unlocked_ioctl = audio_in_ioctl,
34337 };
34338
34339 -static struct file_operations audpre_fops = {
34340 +static const struct file_operations audpre_fops = {
34341 .owner = THIS_MODULE,
34342 .open = audpre_open,
34343 .unlocked_ioctl = audpre_ioctl,
34344 diff -urNp linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_mp3.c linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_mp3.c
34345 --- linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_mp3.c 2011-03-27 14:31:47.000000000 -0400
34346 +++ linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_mp3.c 2011-04-17 15:56:46.000000000 -0400
34347 @@ -941,7 +941,7 @@ done:
34348 return rc;
34349 }
34350
34351 -static struct file_operations audio_mp3_fops = {
34352 +static const struct file_operations audio_mp3_fops = {
34353 .owner = THIS_MODULE,
34354 .open = audio_open,
34355 .release = audio_release,
34356 diff -urNp linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_out.c linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_out.c
34357 --- linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_out.c 2011-03-27 14:31:47.000000000 -0400
34358 +++ linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_out.c 2011-04-17 15:56:46.000000000 -0400
34359 @@ -810,7 +810,7 @@ static int audpp_open(struct inode *inod
34360 return 0;
34361 }
34362
34363 -static struct file_operations audio_fops = {
34364 +static const struct file_operations audio_fops = {
34365 .owner = THIS_MODULE,
34366 .open = audio_open,
34367 .release = audio_release,
34368 @@ -819,7 +819,7 @@ static struct file_operations audio_fops
34369 .unlocked_ioctl = audio_ioctl,
34370 };
34371
34372 -static struct file_operations audpp_fops = {
34373 +static const struct file_operations audpp_fops = {
34374 .owner = THIS_MODULE,
34375 .open = audpp_open,
34376 .unlocked_ioctl = audpp_ioctl,
34377 diff -urNp linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_qcelp.c linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_qcelp.c
34378 --- linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-03-27 14:31:47.000000000 -0400
34379 +++ linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-04-17 15:56:46.000000000 -0400
34380 @@ -816,7 +816,7 @@ err:
34381 return rc;
34382 }
34383
34384 -static struct file_operations audio_qcelp_fops = {
34385 +static const struct file_operations audio_qcelp_fops = {
34386 .owner = THIS_MODULE,
34387 .open = audqcelp_open,
34388 .release = audqcelp_release,
34389 diff -urNp linux-2.6.32.41/drivers/staging/dream/qdsp5/snd.c linux-2.6.32.41/drivers/staging/dream/qdsp5/snd.c
34390 --- linux-2.6.32.41/drivers/staging/dream/qdsp5/snd.c 2011-03-27 14:31:47.000000000 -0400
34391 +++ linux-2.6.32.41/drivers/staging/dream/qdsp5/snd.c 2011-04-17 15:56:46.000000000 -0400
34392 @@ -242,7 +242,7 @@ err:
34393 return rc;
34394 }
34395
34396 -static struct file_operations snd_fops = {
34397 +static const struct file_operations snd_fops = {
34398 .owner = THIS_MODULE,
34399 .open = snd_open,
34400 .release = snd_release,
34401 diff -urNp linux-2.6.32.41/drivers/staging/dream/smd/smd_qmi.c linux-2.6.32.41/drivers/staging/dream/smd/smd_qmi.c
34402 --- linux-2.6.32.41/drivers/staging/dream/smd/smd_qmi.c 2011-03-27 14:31:47.000000000 -0400
34403 +++ linux-2.6.32.41/drivers/staging/dream/smd/smd_qmi.c 2011-04-17 15:56:46.000000000 -0400
34404 @@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip,
34405 return 0;
34406 }
34407
34408 -static struct file_operations qmi_fops = {
34409 +static const struct file_operations qmi_fops = {
34410 .owner = THIS_MODULE,
34411 .read = qmi_read,
34412 .write = qmi_write,
34413 diff -urNp linux-2.6.32.41/drivers/staging/dream/smd/smd_rpcrouter_device.c linux-2.6.32.41/drivers/staging/dream/smd/smd_rpcrouter_device.c
34414 --- linux-2.6.32.41/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-03-27 14:31:47.000000000 -0400
34415 +++ linux-2.6.32.41/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-04-17 15:56:46.000000000 -0400
34416 @@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file
34417 return rc;
34418 }
34419
34420 -static struct file_operations rpcrouter_server_fops = {
34421 +static const struct file_operations rpcrouter_server_fops = {
34422 .owner = THIS_MODULE,
34423 .open = rpcrouter_open,
34424 .release = rpcrouter_release,
34425 @@ -224,7 +224,7 @@ static struct file_operations rpcrouter_
34426 .unlocked_ioctl = rpcrouter_ioctl,
34427 };
34428
34429 -static struct file_operations rpcrouter_router_fops = {
34430 +static const struct file_operations rpcrouter_router_fops = {
34431 .owner = THIS_MODULE,
34432 .open = rpcrouter_open,
34433 .release = rpcrouter_release,
34434 diff -urNp linux-2.6.32.41/drivers/staging/dst/dcore.c linux-2.6.32.41/drivers/staging/dst/dcore.c
34435 --- linux-2.6.32.41/drivers/staging/dst/dcore.c 2011-03-27 14:31:47.000000000 -0400
34436 +++ linux-2.6.32.41/drivers/staging/dst/dcore.c 2011-04-17 15:56:46.000000000 -0400
34437 @@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendi
34438 return 0;
34439 }
34440
34441 -static struct block_device_operations dst_blk_ops = {
34442 +static const struct block_device_operations dst_blk_ops = {
34443 .open = dst_bdev_open,
34444 .release = dst_bdev_release,
34445 .owner = THIS_MODULE,
34446 @@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(s
34447 n->size = ctl->size;
34448
34449 atomic_set(&n->refcnt, 1);
34450 - atomic_long_set(&n->gen, 0);
34451 + atomic_long_set_unchecked(&n->gen, 0);
34452 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
34453
34454 err = dst_node_sysfs_init(n);
34455 diff -urNp linux-2.6.32.41/drivers/staging/dst/trans.c linux-2.6.32.41/drivers/staging/dst/trans.c
34456 --- linux-2.6.32.41/drivers/staging/dst/trans.c 2011-03-27 14:31:47.000000000 -0400
34457 +++ linux-2.6.32.41/drivers/staging/dst/trans.c 2011-04-17 15:56:46.000000000 -0400
34458 @@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n,
34459 t->error = 0;
34460 t->retries = 0;
34461 atomic_set(&t->refcnt, 1);
34462 - t->gen = atomic_long_inc_return(&n->gen);
34463 + t->gen = atomic_long_inc_return_unchecked(&n->gen);
34464
34465 t->enc = bio_data_dir(bio);
34466 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
34467 diff -urNp linux-2.6.32.41/drivers/staging/et131x/et1310_tx.c linux-2.6.32.41/drivers/staging/et131x/et1310_tx.c
34468 --- linux-2.6.32.41/drivers/staging/et131x/et1310_tx.c 2011-03-27 14:31:47.000000000 -0400
34469 +++ linux-2.6.32.41/drivers/staging/et131x/et1310_tx.c 2011-05-04 17:56:28.000000000 -0400
34470 @@ -710,11 +710,11 @@ inline void et131x_free_send_packet(stru
34471 struct net_device_stats *stats = &etdev->net_stats;
34472
34473 if (pMpTcb->Flags & fMP_DEST_BROAD)
34474 - atomic_inc(&etdev->Stats.brdcstxmt);
34475 + atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
34476 else if (pMpTcb->Flags & fMP_DEST_MULTI)
34477 - atomic_inc(&etdev->Stats.multixmt);
34478 + atomic_inc_unchecked(&etdev->Stats.multixmt);
34479 else
34480 - atomic_inc(&etdev->Stats.unixmt);
34481 + atomic_inc_unchecked(&etdev->Stats.unixmt);
34482
34483 if (pMpTcb->Packet) {
34484 stats->tx_bytes += pMpTcb->Packet->len;
34485 diff -urNp linux-2.6.32.41/drivers/staging/et131x/et131x_adapter.h linux-2.6.32.41/drivers/staging/et131x/et131x_adapter.h
34486 --- linux-2.6.32.41/drivers/staging/et131x/et131x_adapter.h 2011-03-27 14:31:47.000000000 -0400
34487 +++ linux-2.6.32.41/drivers/staging/et131x/et131x_adapter.h 2011-05-04 17:56:28.000000000 -0400
34488 @@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
34489 * operations
34490 */
34491 u32 unircv; /* # multicast packets received */
34492 - atomic_t unixmt; /* # multicast packets for Tx */
34493 + atomic_unchecked_t unixmt; /* # multicast packets for Tx */
34494 u32 multircv; /* # multicast packets received */
34495 - atomic_t multixmt; /* # multicast packets for Tx */
34496 + atomic_unchecked_t multixmt; /* # multicast packets for Tx */
34497 u32 brdcstrcv; /* # broadcast packets received */
34498 - atomic_t brdcstxmt; /* # broadcast packets for Tx */
34499 + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
34500 u32 norcvbuf; /* # Rx packets discarded */
34501 u32 noxmtbuf; /* # Tx packets discarded */
34502
34503 diff -urNp linux-2.6.32.41/drivers/staging/go7007/go7007-v4l2.c linux-2.6.32.41/drivers/staging/go7007/go7007-v4l2.c
34504 --- linux-2.6.32.41/drivers/staging/go7007/go7007-v4l2.c 2011-03-27 14:31:47.000000000 -0400
34505 +++ linux-2.6.32.41/drivers/staging/go7007/go7007-v4l2.c 2011-04-17 15:56:46.000000000 -0400
34506 @@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_are
34507 return 0;
34508 }
34509
34510 -static struct vm_operations_struct go7007_vm_ops = {
34511 +static const struct vm_operations_struct go7007_vm_ops = {
34512 .open = go7007_vm_open,
34513 .close = go7007_vm_close,
34514 .fault = go7007_vm_fault,
34515 diff -urNp linux-2.6.32.41/drivers/staging/hv/blkvsc_drv.c linux-2.6.32.41/drivers/staging/hv/blkvsc_drv.c
34516 --- linux-2.6.32.41/drivers/staging/hv/blkvsc_drv.c 2011-03-27 14:31:47.000000000 -0400
34517 +++ linux-2.6.32.41/drivers/staging/hv/blkvsc_drv.c 2011-04-17 15:56:46.000000000 -0400
34518 @@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKV
34519 /* The one and only one */
34520 static struct blkvsc_driver_context g_blkvsc_drv;
34521
34522 -static struct block_device_operations block_ops = {
34523 +static const struct block_device_operations block_ops = {
34524 .owner = THIS_MODULE,
34525 .open = blkvsc_open,
34526 .release = blkvsc_release,
34527 diff -urNp linux-2.6.32.41/drivers/staging/hv/Channel.c linux-2.6.32.41/drivers/staging/hv/Channel.c
34528 --- linux-2.6.32.41/drivers/staging/hv/Channel.c 2011-04-17 17:00:52.000000000 -0400
34529 +++ linux-2.6.32.41/drivers/staging/hv/Channel.c 2011-05-04 17:56:28.000000000 -0400
34530 @@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vm
34531
34532 DPRINT_ENTER(VMBUS);
34533
34534 - nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
34535 - atomic_inc(&gVmbusConnection.NextGpadlHandle);
34536 + nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
34537 + atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
34538
34539 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
34540 ASSERT(msgInfo != NULL);
34541 diff -urNp linux-2.6.32.41/drivers/staging/hv/Hv.c linux-2.6.32.41/drivers/staging/hv/Hv.c
34542 --- linux-2.6.32.41/drivers/staging/hv/Hv.c 2011-03-27 14:31:47.000000000 -0400
34543 +++ linux-2.6.32.41/drivers/staging/hv/Hv.c 2011-04-17 15:56:46.000000000 -0400
34544 @@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, vo
34545 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
34546 u32 outputAddressHi = outputAddress >> 32;
34547 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
34548 - volatile void *hypercallPage = gHvContext.HypercallPage;
34549 + volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
34550
34551 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
34552 Control, Input, Output);
34553 diff -urNp linux-2.6.32.41/drivers/staging/hv/vmbus_drv.c linux-2.6.32.41/drivers/staging/hv/vmbus_drv.c
34554 --- linux-2.6.32.41/drivers/staging/hv/vmbus_drv.c 2011-03-27 14:31:47.000000000 -0400
34555 +++ linux-2.6.32.41/drivers/staging/hv/vmbus_drv.c 2011-05-04 17:56:28.000000000 -0400
34556 @@ -532,7 +532,7 @@ static int vmbus_child_device_register(s
34557 to_device_context(root_device_obj);
34558 struct device_context *child_device_ctx =
34559 to_device_context(child_device_obj);
34560 - static atomic_t device_num = ATOMIC_INIT(0);
34561 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
34562
34563 DPRINT_ENTER(VMBUS_DRV);
34564
34565 @@ -541,7 +541,7 @@ static int vmbus_child_device_register(s
34566
34567 /* Set the device name. Otherwise, device_register() will fail. */
34568 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
34569 - atomic_inc_return(&device_num));
34570 + atomic_inc_return_unchecked(&device_num));
34571
34572 /* The new device belongs to this bus */
34573 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
34574 diff -urNp linux-2.6.32.41/drivers/staging/hv/VmbusPrivate.h linux-2.6.32.41/drivers/staging/hv/VmbusPrivate.h
34575 --- linux-2.6.32.41/drivers/staging/hv/VmbusPrivate.h 2011-04-17 17:00:52.000000000 -0400
34576 +++ linux-2.6.32.41/drivers/staging/hv/VmbusPrivate.h 2011-05-04 17:56:28.000000000 -0400
34577 @@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
34578 struct VMBUS_CONNECTION {
34579 enum VMBUS_CONNECT_STATE ConnectState;
34580
34581 - atomic_t NextGpadlHandle;
34582 + atomic_unchecked_t NextGpadlHandle;
34583
34584 /*
34585 * Represents channel interrupts. Each bit position represents a
34586 diff -urNp linux-2.6.32.41/drivers/staging/octeon/ethernet.c linux-2.6.32.41/drivers/staging/octeon/ethernet.c
34587 --- linux-2.6.32.41/drivers/staging/octeon/ethernet.c 2011-03-27 14:31:47.000000000 -0400
34588 +++ linux-2.6.32.41/drivers/staging/octeon/ethernet.c 2011-05-04 17:56:28.000000000 -0400
34589 @@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_
34590 * since the RX tasklet also increments it.
34591 */
34592 #ifdef CONFIG_64BIT
34593 - atomic64_add(rx_status.dropped_packets,
34594 - (atomic64_t *)&priv->stats.rx_dropped);
34595 + atomic64_add_unchecked(rx_status.dropped_packets,
34596 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
34597 #else
34598 - atomic_add(rx_status.dropped_packets,
34599 - (atomic_t *)&priv->stats.rx_dropped);
34600 + atomic_add_unchecked(rx_status.dropped_packets,
34601 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
34602 #endif
34603 }
34604
34605 diff -urNp linux-2.6.32.41/drivers/staging/octeon/ethernet-rx.c linux-2.6.32.41/drivers/staging/octeon/ethernet-rx.c
34606 --- linux-2.6.32.41/drivers/staging/octeon/ethernet-rx.c 2011-03-27 14:31:47.000000000 -0400
34607 +++ linux-2.6.32.41/drivers/staging/octeon/ethernet-rx.c 2011-05-04 17:56:28.000000000 -0400
34608 @@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long un
34609 /* Increment RX stats for virtual ports */
34610 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
34611 #ifdef CONFIG_64BIT
34612 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
34613 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
34614 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
34615 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
34616 #else
34617 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
34618 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
34619 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
34620 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
34621 #endif
34622 }
34623 netif_receive_skb(skb);
34624 @@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long un
34625 dev->name);
34626 */
34627 #ifdef CONFIG_64BIT
34628 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
34629 + atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
34630 #else
34631 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
34632 + atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
34633 #endif
34634 dev_kfree_skb_irq(skb);
34635 }
34636 diff -urNp linux-2.6.32.41/drivers/staging/panel/panel.c linux-2.6.32.41/drivers/staging/panel/panel.c
34637 --- linux-2.6.32.41/drivers/staging/panel/panel.c 2011-03-27 14:31:47.000000000 -0400
34638 +++ linux-2.6.32.41/drivers/staging/panel/panel.c 2011-04-17 15:56:46.000000000 -0400
34639 @@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *ino
34640 return 0;
34641 }
34642
34643 -static struct file_operations lcd_fops = {
34644 +static const struct file_operations lcd_fops = {
34645 .write = lcd_write,
34646 .open = lcd_open,
34647 .release = lcd_release,
34648 @@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *
34649 return 0;
34650 }
34651
34652 -static struct file_operations keypad_fops = {
34653 +static const struct file_operations keypad_fops = {
34654 .read = keypad_read, /* read */
34655 .open = keypad_open, /* open */
34656 .release = keypad_release, /* close */
34657 diff -urNp linux-2.6.32.41/drivers/staging/phison/phison.c linux-2.6.32.41/drivers/staging/phison/phison.c
34658 --- linux-2.6.32.41/drivers/staging/phison/phison.c 2011-03-27 14:31:47.000000000 -0400
34659 +++ linux-2.6.32.41/drivers/staging/phison/phison.c 2011-04-17 15:56:46.000000000 -0400
34660 @@ -43,7 +43,7 @@ static struct scsi_host_template phison_
34661 ATA_BMDMA_SHT(DRV_NAME),
34662 };
34663
34664 -static struct ata_port_operations phison_ops = {
34665 +static const struct ata_port_operations phison_ops = {
34666 .inherits = &ata_bmdma_port_ops,
34667 .prereset = phison_pre_reset,
34668 };
34669 diff -urNp linux-2.6.32.41/drivers/staging/poch/poch.c linux-2.6.32.41/drivers/staging/poch/poch.c
34670 --- linux-2.6.32.41/drivers/staging/poch/poch.c 2011-03-27 14:31:47.000000000 -0400
34671 +++ linux-2.6.32.41/drivers/staging/poch/poch.c 2011-04-17 15:56:46.000000000 -0400
34672 @@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inod
34673 return 0;
34674 }
34675
34676 -static struct file_operations poch_fops = {
34677 +static const struct file_operations poch_fops = {
34678 .owner = THIS_MODULE,
34679 .open = poch_open,
34680 .release = poch_release,
34681 diff -urNp linux-2.6.32.41/drivers/staging/pohmelfs/inode.c linux-2.6.32.41/drivers/staging/pohmelfs/inode.c
34682 --- linux-2.6.32.41/drivers/staging/pohmelfs/inode.c 2011-03-27 14:31:47.000000000 -0400
34683 +++ linux-2.6.32.41/drivers/staging/pohmelfs/inode.c 2011-05-04 17:56:20.000000000 -0400
34684 @@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct su
34685 mutex_init(&psb->mcache_lock);
34686 psb->mcache_root = RB_ROOT;
34687 psb->mcache_timeout = msecs_to_jiffies(5000);
34688 - atomic_long_set(&psb->mcache_gen, 0);
34689 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
34690
34691 psb->trans_max_pages = 100;
34692
34693 @@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct su
34694 INIT_LIST_HEAD(&psb->crypto_ready_list);
34695 INIT_LIST_HEAD(&psb->crypto_active_list);
34696
34697 - atomic_set(&psb->trans_gen, 1);
34698 + atomic_set_unchecked(&psb->trans_gen, 1);
34699 atomic_long_set(&psb->total_inodes, 0);
34700
34701 mutex_init(&psb->state_lock);
34702 diff -urNp linux-2.6.32.41/drivers/staging/pohmelfs/mcache.c linux-2.6.32.41/drivers/staging/pohmelfs/mcache.c
34703 --- linux-2.6.32.41/drivers/staging/pohmelfs/mcache.c 2011-03-27 14:31:47.000000000 -0400
34704 +++ linux-2.6.32.41/drivers/staging/pohmelfs/mcache.c 2011-04-17 15:56:46.000000000 -0400
34705 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
34706 m->data = data;
34707 m->start = start;
34708 m->size = size;
34709 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
34710 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
34711
34712 mutex_lock(&psb->mcache_lock);
34713 err = pohmelfs_mcache_insert(psb, m);
34714 diff -urNp linux-2.6.32.41/drivers/staging/pohmelfs/netfs.h linux-2.6.32.41/drivers/staging/pohmelfs/netfs.h
34715 --- linux-2.6.32.41/drivers/staging/pohmelfs/netfs.h 2011-03-27 14:31:47.000000000 -0400
34716 +++ linux-2.6.32.41/drivers/staging/pohmelfs/netfs.h 2011-05-04 17:56:20.000000000 -0400
34717 @@ -570,14 +570,14 @@ struct pohmelfs_config;
34718 struct pohmelfs_sb {
34719 struct rb_root mcache_root;
34720 struct mutex mcache_lock;
34721 - atomic_long_t mcache_gen;
34722 + atomic_long_unchecked_t mcache_gen;
34723 unsigned long mcache_timeout;
34724
34725 unsigned int idx;
34726
34727 unsigned int trans_retries;
34728
34729 - atomic_t trans_gen;
34730 + atomic_unchecked_t trans_gen;
34731
34732 unsigned int crypto_attached_size;
34733 unsigned int crypto_align_size;
34734 diff -urNp linux-2.6.32.41/drivers/staging/pohmelfs/trans.c linux-2.6.32.41/drivers/staging/pohmelfs/trans.c
34735 --- linux-2.6.32.41/drivers/staging/pohmelfs/trans.c 2011-03-27 14:31:47.000000000 -0400
34736 +++ linux-2.6.32.41/drivers/staging/pohmelfs/trans.c 2011-05-04 17:56:28.000000000 -0400
34737 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
34738 int err;
34739 struct netfs_cmd *cmd = t->iovec.iov_base;
34740
34741 - t->gen = atomic_inc_return(&psb->trans_gen);
34742 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
34743
34744 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
34745 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
34746 diff -urNp linux-2.6.32.41/drivers/staging/sep/sep_driver.c linux-2.6.32.41/drivers/staging/sep/sep_driver.c
34747 --- linux-2.6.32.41/drivers/staging/sep/sep_driver.c 2011-03-27 14:31:47.000000000 -0400
34748 +++ linux-2.6.32.41/drivers/staging/sep/sep_driver.c 2011-04-17 15:56:46.000000000 -0400
34749 @@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver
34750 static dev_t sep_devno;
34751
34752 /* the files operations structure of the driver */
34753 -static struct file_operations sep_file_operations = {
34754 +static const struct file_operations sep_file_operations = {
34755 .owner = THIS_MODULE,
34756 .ioctl = sep_ioctl,
34757 .poll = sep_poll,
34758 diff -urNp linux-2.6.32.41/drivers/staging/usbip/vhci.h linux-2.6.32.41/drivers/staging/usbip/vhci.h
34759 --- linux-2.6.32.41/drivers/staging/usbip/vhci.h 2011-03-27 14:31:47.000000000 -0400
34760 +++ linux-2.6.32.41/drivers/staging/usbip/vhci.h 2011-05-04 17:56:28.000000000 -0400
34761 @@ -92,7 +92,7 @@ struct vhci_hcd {
34762 unsigned resuming:1;
34763 unsigned long re_timeout;
34764
34765 - atomic_t seqnum;
34766 + atomic_unchecked_t seqnum;
34767
34768 /*
34769 * NOTE:
34770 diff -urNp linux-2.6.32.41/drivers/staging/usbip/vhci_hcd.c linux-2.6.32.41/drivers/staging/usbip/vhci_hcd.c
34771 --- linux-2.6.32.41/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:01.000000000 -0400
34772 +++ linux-2.6.32.41/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:33.000000000 -0400
34773 @@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
34774 return;
34775 }
34776
34777 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
34778 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
34779 if (priv->seqnum == 0xffff)
34780 usbip_uinfo("seqnum max\n");
34781
34782 @@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_h
34783 return -ENOMEM;
34784 }
34785
34786 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
34787 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
34788 if (unlink->seqnum == 0xffff)
34789 usbip_uinfo("seqnum max\n");
34790
34791 @@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hc
34792 vdev->rhport = rhport;
34793 }
34794
34795 - atomic_set(&vhci->seqnum, 0);
34796 + atomic_set_unchecked(&vhci->seqnum, 0);
34797 spin_lock_init(&vhci->lock);
34798
34799
34800 diff -urNp linux-2.6.32.41/drivers/staging/usbip/vhci_rx.c linux-2.6.32.41/drivers/staging/usbip/vhci_rx.c
34801 --- linux-2.6.32.41/drivers/staging/usbip/vhci_rx.c 2011-04-17 17:00:52.000000000 -0400
34802 +++ linux-2.6.32.41/drivers/staging/usbip/vhci_rx.c 2011-05-04 17:56:28.000000000 -0400
34803 @@ -78,7 +78,7 @@ static void vhci_recv_ret_submit(struct
34804 usbip_uerr("cannot find a urb of seqnum %u\n",
34805 pdu->base.seqnum);
34806 usbip_uinfo("max seqnum %d\n",
34807 - atomic_read(&the_controller->seqnum));
34808 + atomic_read_unchecked(&the_controller->seqnum));
34809 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
34810 return;
34811 }
34812 diff -urNp linux-2.6.32.41/drivers/staging/vme/devices/vme_user.c linux-2.6.32.41/drivers/staging/vme/devices/vme_user.c
34813 --- linux-2.6.32.41/drivers/staging/vme/devices/vme_user.c 2011-03-27 14:31:47.000000000 -0400
34814 +++ linux-2.6.32.41/drivers/staging/vme/devices/vme_user.c 2011-04-17 15:56:46.000000000 -0400
34815 @@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *
34816 static int __init vme_user_probe(struct device *, int, int);
34817 static int __exit vme_user_remove(struct device *, int, int);
34818
34819 -static struct file_operations vme_user_fops = {
34820 +static const struct file_operations vme_user_fops = {
34821 .open = vme_user_open,
34822 .release = vme_user_release,
34823 .read = vme_user_read,
34824 diff -urNp linux-2.6.32.41/drivers/telephony/ixj.c linux-2.6.32.41/drivers/telephony/ixj.c
34825 --- linux-2.6.32.41/drivers/telephony/ixj.c 2011-03-27 14:31:47.000000000 -0400
34826 +++ linux-2.6.32.41/drivers/telephony/ixj.c 2011-05-16 21:46:57.000000000 -0400
34827 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
34828 bool mContinue;
34829 char *pIn, *pOut;
34830
34831 + pax_track_stack();
34832 +
34833 if (!SCI_Prepare(j))
34834 return 0;
34835
34836 diff -urNp linux-2.6.32.41/drivers/uio/uio.c linux-2.6.32.41/drivers/uio/uio.c
34837 --- linux-2.6.32.41/drivers/uio/uio.c 2011-03-27 14:31:47.000000000 -0400
34838 +++ linux-2.6.32.41/drivers/uio/uio.c 2011-05-04 17:56:20.000000000 -0400
34839 @@ -23,6 +23,7 @@
34840 #include <linux/string.h>
34841 #include <linux/kobject.h>
34842 #include <linux/uio_driver.h>
34843 +#include <asm/local.h>
34844
34845 #define UIO_MAX_DEVICES 255
34846
34847 @@ -30,10 +31,10 @@ struct uio_device {
34848 struct module *owner;
34849 struct device *dev;
34850 int minor;
34851 - atomic_t event;
34852 + atomic_unchecked_t event;
34853 struct fasync_struct *async_queue;
34854 wait_queue_head_t wait;
34855 - int vma_count;
34856 + local_t vma_count;
34857 struct uio_info *info;
34858 struct kobject *map_dir;
34859 struct kobject *portio_dir;
34860 @@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobj
34861 return entry->show(mem, buf);
34862 }
34863
34864 -static struct sysfs_ops map_sysfs_ops = {
34865 +static const struct sysfs_ops map_sysfs_ops = {
34866 .show = map_type_show,
34867 };
34868
34869 @@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct k
34870 return entry->show(port, buf);
34871 }
34872
34873 -static struct sysfs_ops portio_sysfs_ops = {
34874 +static const struct sysfs_ops portio_sysfs_ops = {
34875 .show = portio_type_show,
34876 };
34877
34878 @@ -255,7 +256,7 @@ static ssize_t show_event(struct device
34879 struct uio_device *idev = dev_get_drvdata(dev);
34880 if (idev)
34881 return sprintf(buf, "%u\n",
34882 - (unsigned int)atomic_read(&idev->event));
34883 + (unsigned int)atomic_read_unchecked(&idev->event));
34884 else
34885 return -ENODEV;
34886 }
34887 @@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *i
34888 {
34889 struct uio_device *idev = info->uio_dev;
34890
34891 - atomic_inc(&idev->event);
34892 + atomic_inc_unchecked(&idev->event);
34893 wake_up_interruptible(&idev->wait);
34894 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
34895 }
34896 @@ -477,7 +478,7 @@ static int uio_open(struct inode *inode,
34897 }
34898
34899 listener->dev = idev;
34900 - listener->event_count = atomic_read(&idev->event);
34901 + listener->event_count = atomic_read_unchecked(&idev->event);
34902 filep->private_data = listener;
34903
34904 if (idev->info->open) {
34905 @@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file
34906 return -EIO;
34907
34908 poll_wait(filep, &idev->wait, wait);
34909 - if (listener->event_count != atomic_read(&idev->event))
34910 + if (listener->event_count != atomic_read_unchecked(&idev->event))
34911 return POLLIN | POLLRDNORM;
34912 return 0;
34913 }
34914 @@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *fil
34915 do {
34916 set_current_state(TASK_INTERRUPTIBLE);
34917
34918 - event_count = atomic_read(&idev->event);
34919 + event_count = atomic_read_unchecked(&idev->event);
34920 if (event_count != listener->event_count) {
34921 if (copy_to_user(buf, &event_count, count))
34922 retval = -EFAULT;
34923 @@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_
34924 static void uio_vma_open(struct vm_area_struct *vma)
34925 {
34926 struct uio_device *idev = vma->vm_private_data;
34927 - idev->vma_count++;
34928 + local_inc(&idev->vma_count);
34929 }
34930
34931 static void uio_vma_close(struct vm_area_struct *vma)
34932 {
34933 struct uio_device *idev = vma->vm_private_data;
34934 - idev->vma_count--;
34935 + local_dec(&idev->vma_count);
34936 }
34937
34938 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
34939 @@ -840,7 +841,7 @@ int __uio_register_device(struct module
34940 idev->owner = owner;
34941 idev->info = info;
34942 init_waitqueue_head(&idev->wait);
34943 - atomic_set(&idev->event, 0);
34944 + atomic_set_unchecked(&idev->event, 0);
34945
34946 ret = uio_get_minor(idev);
34947 if (ret)
34948 diff -urNp linux-2.6.32.41/drivers/usb/atm/usbatm.c linux-2.6.32.41/drivers/usb/atm/usbatm.c
34949 --- linux-2.6.32.41/drivers/usb/atm/usbatm.c 2011-03-27 14:31:47.000000000 -0400
34950 +++ linux-2.6.32.41/drivers/usb/atm/usbatm.c 2011-04-17 15:56:46.000000000 -0400
34951 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(stru
34952 if (printk_ratelimit())
34953 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
34954 __func__, vpi, vci);
34955 - atomic_inc(&vcc->stats->rx_err);
34956 + atomic_inc_unchecked(&vcc->stats->rx_err);
34957 return;
34958 }
34959
34960 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(stru
34961 if (length > ATM_MAX_AAL5_PDU) {
34962 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
34963 __func__, length, vcc);
34964 - atomic_inc(&vcc->stats->rx_err);
34965 + atomic_inc_unchecked(&vcc->stats->rx_err);
34966 goto out;
34967 }
34968
34969 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(stru
34970 if (sarb->len < pdu_length) {
34971 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
34972 __func__, pdu_length, sarb->len, vcc);
34973 - atomic_inc(&vcc->stats->rx_err);
34974 + atomic_inc_unchecked(&vcc->stats->rx_err);
34975 goto out;
34976 }
34977
34978 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
34979 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
34980 __func__, vcc);
34981 - atomic_inc(&vcc->stats->rx_err);
34982 + atomic_inc_unchecked(&vcc->stats->rx_err);
34983 goto out;
34984 }
34985
34986 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(stru
34987 if (printk_ratelimit())
34988 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
34989 __func__, length);
34990 - atomic_inc(&vcc->stats->rx_drop);
34991 + atomic_inc_unchecked(&vcc->stats->rx_drop);
34992 goto out;
34993 }
34994
34995 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(stru
34996
34997 vcc->push(vcc, skb);
34998
34999 - atomic_inc(&vcc->stats->rx);
35000 + atomic_inc_unchecked(&vcc->stats->rx);
35001 out:
35002 skb_trim(sarb, 0);
35003 }
35004 @@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned l
35005 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
35006
35007 usbatm_pop(vcc, skb);
35008 - atomic_inc(&vcc->stats->tx);
35009 + atomic_inc_unchecked(&vcc->stats->tx);
35010
35011 skb = skb_dequeue(&instance->sndqueue);
35012 }
35013 @@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct a
35014 if (!left--)
35015 return sprintf(page,
35016 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
35017 - atomic_read(&atm_dev->stats.aal5.tx),
35018 - atomic_read(&atm_dev->stats.aal5.tx_err),
35019 - atomic_read(&atm_dev->stats.aal5.rx),
35020 - atomic_read(&atm_dev->stats.aal5.rx_err),
35021 - atomic_read(&atm_dev->stats.aal5.rx_drop));
35022 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
35023 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
35024 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
35025 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
35026 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
35027
35028 if (!left--) {
35029 if (instance->disconnected)
35030 diff -urNp linux-2.6.32.41/drivers/usb/class/cdc-wdm.c linux-2.6.32.41/drivers/usb/class/cdc-wdm.c
35031 --- linux-2.6.32.41/drivers/usb/class/cdc-wdm.c 2011-03-27 14:31:47.000000000 -0400
35032 +++ linux-2.6.32.41/drivers/usb/class/cdc-wdm.c 2011-04-17 15:56:46.000000000 -0400
35033 @@ -314,7 +314,7 @@ static ssize_t wdm_write
35034 if (r < 0)
35035 goto outnp;
35036
35037 - if (!file->f_flags && O_NONBLOCK)
35038 + if (!(file->f_flags & O_NONBLOCK))
35039 r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
35040 &desc->flags));
35041 else
35042 diff -urNp linux-2.6.32.41/drivers/usb/core/hcd.c linux-2.6.32.41/drivers/usb/core/hcd.c
35043 --- linux-2.6.32.41/drivers/usb/core/hcd.c 2011-03-27 14:31:47.000000000 -0400
35044 +++ linux-2.6.32.41/drivers/usb/core/hcd.c 2011-04-17 15:56:46.000000000 -0400
35045 @@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutd
35046
35047 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
35048
35049 -struct usb_mon_operations *mon_ops;
35050 +const struct usb_mon_operations *mon_ops;
35051
35052 /*
35053 * The registration is unlocked.
35054 @@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
35055 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
35056 */
35057
35058 -int usb_mon_register (struct usb_mon_operations *ops)
35059 +int usb_mon_register (const struct usb_mon_operations *ops)
35060 {
35061
35062 if (mon_ops)
35063 diff -urNp linux-2.6.32.41/drivers/usb/core/hcd.h linux-2.6.32.41/drivers/usb/core/hcd.h
35064 --- linux-2.6.32.41/drivers/usb/core/hcd.h 2011-03-27 14:31:47.000000000 -0400
35065 +++ linux-2.6.32.41/drivers/usb/core/hcd.h 2011-04-17 15:56:46.000000000 -0400
35066 @@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) {
35067 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
35068
35069 struct usb_mon_operations {
35070 - void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
35071 - void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
35072 - void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
35073 + void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
35074 + void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
35075 + void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
35076 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
35077 };
35078
35079 -extern struct usb_mon_operations *mon_ops;
35080 +extern const struct usb_mon_operations *mon_ops;
35081
35082 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
35083 {
35084 @@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(s
35085 (*mon_ops->urb_complete)(bus, urb, status);
35086 }
35087
35088 -int usb_mon_register(struct usb_mon_operations *ops);
35089 +int usb_mon_register(const struct usb_mon_operations *ops);
35090 void usb_mon_deregister(void);
35091
35092 #else
35093 diff -urNp linux-2.6.32.41/drivers/usb/core/message.c linux-2.6.32.41/drivers/usb/core/message.c
35094 --- linux-2.6.32.41/drivers/usb/core/message.c 2011-03-27 14:31:47.000000000 -0400
35095 +++ linux-2.6.32.41/drivers/usb/core/message.c 2011-04-17 15:56:46.000000000 -0400
35096 @@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device
35097 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
35098 if (buf) {
35099 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
35100 - if (len > 0) {
35101 - smallbuf = kmalloc(++len, GFP_NOIO);
35102 + if (len++ > 0) {
35103 + smallbuf = kmalloc(len, GFP_NOIO);
35104 if (!smallbuf)
35105 return buf;
35106 memcpy(smallbuf, buf, len);
35107 diff -urNp linux-2.6.32.41/drivers/usb/misc/appledisplay.c linux-2.6.32.41/drivers/usb/misc/appledisplay.c
35108 --- linux-2.6.32.41/drivers/usb/misc/appledisplay.c 2011-03-27 14:31:47.000000000 -0400
35109 +++ linux-2.6.32.41/drivers/usb/misc/appledisplay.c 2011-04-17 15:56:46.000000000 -0400
35110 @@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightnes
35111 return pdata->msgdata[1];
35112 }
35113
35114 -static struct backlight_ops appledisplay_bl_data = {
35115 +static const struct backlight_ops appledisplay_bl_data = {
35116 .get_brightness = appledisplay_bl_get_brightness,
35117 .update_status = appledisplay_bl_update_status,
35118 };
35119 diff -urNp linux-2.6.32.41/drivers/usb/mon/mon_main.c linux-2.6.32.41/drivers/usb/mon/mon_main.c
35120 --- linux-2.6.32.41/drivers/usb/mon/mon_main.c 2011-03-27 14:31:47.000000000 -0400
35121 +++ linux-2.6.32.41/drivers/usb/mon/mon_main.c 2011-04-17 15:56:46.000000000 -0400
35122 @@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
35123 /*
35124 * Ops
35125 */
35126 -static struct usb_mon_operations mon_ops_0 = {
35127 +static const struct usb_mon_operations mon_ops_0 = {
35128 .urb_submit = mon_submit,
35129 .urb_submit_error = mon_submit_error,
35130 .urb_complete = mon_complete,
35131 diff -urNp linux-2.6.32.41/drivers/usb/wusbcore/wa-hc.h linux-2.6.32.41/drivers/usb/wusbcore/wa-hc.h
35132 --- linux-2.6.32.41/drivers/usb/wusbcore/wa-hc.h 2011-03-27 14:31:47.000000000 -0400
35133 +++ linux-2.6.32.41/drivers/usb/wusbcore/wa-hc.h 2011-05-04 17:56:28.000000000 -0400
35134 @@ -192,7 +192,7 @@ struct wahc {
35135 struct list_head xfer_delayed_list;
35136 spinlock_t xfer_list_lock;
35137 struct work_struct xfer_work;
35138 - atomic_t xfer_id_count;
35139 + atomic_unchecked_t xfer_id_count;
35140 };
35141
35142
35143 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
35144 INIT_LIST_HEAD(&wa->xfer_delayed_list);
35145 spin_lock_init(&wa->xfer_list_lock);
35146 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
35147 - atomic_set(&wa->xfer_id_count, 1);
35148 + atomic_set_unchecked(&wa->xfer_id_count, 1);
35149 }
35150
35151 /**
35152 diff -urNp linux-2.6.32.41/drivers/usb/wusbcore/wa-xfer.c linux-2.6.32.41/drivers/usb/wusbcore/wa-xfer.c
35153 --- linux-2.6.32.41/drivers/usb/wusbcore/wa-xfer.c 2011-03-27 14:31:47.000000000 -0400
35154 +++ linux-2.6.32.41/drivers/usb/wusbcore/wa-xfer.c 2011-05-04 17:56:28.000000000 -0400
35155 @@ -293,7 +293,7 @@ out:
35156 */
35157 static void wa_xfer_id_init(struct wa_xfer *xfer)
35158 {
35159 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
35160 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
35161 }
35162
35163 /*
35164 diff -urNp linux-2.6.32.41/drivers/uwb/wlp/messages.c linux-2.6.32.41/drivers/uwb/wlp/messages.c
35165 --- linux-2.6.32.41/drivers/uwb/wlp/messages.c 2011-03-27 14:31:47.000000000 -0400
35166 +++ linux-2.6.32.41/drivers/uwb/wlp/messages.c 2011-04-17 15:56:46.000000000 -0400
35167 @@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct
35168 size_t len = skb->len;
35169 size_t used;
35170 ssize_t result;
35171 - struct wlp_nonce enonce, rnonce;
35172 + struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
35173 enum wlp_assc_error assc_err;
35174 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
35175 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
35176 diff -urNp linux-2.6.32.41/drivers/uwb/wlp/sysfs.c linux-2.6.32.41/drivers/uwb/wlp/sysfs.c
35177 --- linux-2.6.32.41/drivers/uwb/wlp/sysfs.c 2011-03-27 14:31:47.000000000 -0400
35178 +++ linux-2.6.32.41/drivers/uwb/wlp/sysfs.c 2011-04-17 15:56:46.000000000 -0400
35179 @@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobjec
35180 return ret;
35181 }
35182
35183 -static
35184 -struct sysfs_ops wss_sysfs_ops = {
35185 +static const struct sysfs_ops wss_sysfs_ops = {
35186 .show = wlp_wss_attr_show,
35187 .store = wlp_wss_attr_store,
35188 };
35189 diff -urNp linux-2.6.32.41/drivers/video/atmel_lcdfb.c linux-2.6.32.41/drivers/video/atmel_lcdfb.c
35190 --- linux-2.6.32.41/drivers/video/atmel_lcdfb.c 2011-03-27 14:31:47.000000000 -0400
35191 +++ linux-2.6.32.41/drivers/video/atmel_lcdfb.c 2011-04-17 15:56:46.000000000 -0400
35192 @@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struc
35193 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
35194 }
35195
35196 -static struct backlight_ops atmel_lcdc_bl_ops = {
35197 +static const struct backlight_ops atmel_lcdc_bl_ops = {
35198 .update_status = atmel_bl_update_status,
35199 .get_brightness = atmel_bl_get_brightness,
35200 };
35201 diff -urNp linux-2.6.32.41/drivers/video/aty/aty128fb.c linux-2.6.32.41/drivers/video/aty/aty128fb.c
35202 --- linux-2.6.32.41/drivers/video/aty/aty128fb.c 2011-03-27 14:31:47.000000000 -0400
35203 +++ linux-2.6.32.41/drivers/video/aty/aty128fb.c 2011-04-17 15:56:46.000000000 -0400
35204 @@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(stru
35205 return bd->props.brightness;
35206 }
35207
35208 -static struct backlight_ops aty128_bl_data = {
35209 +static const struct backlight_ops aty128_bl_data = {
35210 .get_brightness = aty128_bl_get_brightness,
35211 .update_status = aty128_bl_update_status,
35212 };
35213 diff -urNp linux-2.6.32.41/drivers/video/aty/atyfb_base.c linux-2.6.32.41/drivers/video/aty/atyfb_base.c
35214 --- linux-2.6.32.41/drivers/video/aty/atyfb_base.c 2011-03-27 14:31:47.000000000 -0400
35215 +++ linux-2.6.32.41/drivers/video/aty/atyfb_base.c 2011-04-17 15:56:46.000000000 -0400
35216 @@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct
35217 return bd->props.brightness;
35218 }
35219
35220 -static struct backlight_ops aty_bl_data = {
35221 +static const struct backlight_ops aty_bl_data = {
35222 .get_brightness = aty_bl_get_brightness,
35223 .update_status = aty_bl_update_status,
35224 };
35225 diff -urNp linux-2.6.32.41/drivers/video/aty/radeon_backlight.c linux-2.6.32.41/drivers/video/aty/radeon_backlight.c
35226 --- linux-2.6.32.41/drivers/video/aty/radeon_backlight.c 2011-03-27 14:31:47.000000000 -0400
35227 +++ linux-2.6.32.41/drivers/video/aty/radeon_backlight.c 2011-04-17 15:56:46.000000000 -0400
35228 @@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(stru
35229 return bd->props.brightness;
35230 }
35231
35232 -static struct backlight_ops radeon_bl_data = {
35233 +static const struct backlight_ops radeon_bl_data = {
35234 .get_brightness = radeon_bl_get_brightness,
35235 .update_status = radeon_bl_update_status,
35236 };
35237 diff -urNp linux-2.6.32.41/drivers/video/backlight/adp5520_bl.c linux-2.6.32.41/drivers/video/backlight/adp5520_bl.c
35238 --- linux-2.6.32.41/drivers/video/backlight/adp5520_bl.c 2011-03-27 14:31:47.000000000 -0400
35239 +++ linux-2.6.32.41/drivers/video/backlight/adp5520_bl.c 2011-04-17 15:56:46.000000000 -0400
35240 @@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(str
35241 return error ? data->current_brightness : reg_val;
35242 }
35243
35244 -static struct backlight_ops adp5520_bl_ops = {
35245 +static const struct backlight_ops adp5520_bl_ops = {
35246 .update_status = adp5520_bl_update_status,
35247 .get_brightness = adp5520_bl_get_brightness,
35248 };
35249 diff -urNp linux-2.6.32.41/drivers/video/backlight/adx_bl.c linux-2.6.32.41/drivers/video/backlight/adx_bl.c
35250 --- linux-2.6.32.41/drivers/video/backlight/adx_bl.c 2011-03-27 14:31:47.000000000 -0400
35251 +++ linux-2.6.32.41/drivers/video/backlight/adx_bl.c 2011-04-17 15:56:46.000000000 -0400
35252 @@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct
35253 return 1;
35254 }
35255
35256 -static struct backlight_ops adx_backlight_ops = {
35257 +static const struct backlight_ops adx_backlight_ops = {
35258 .options = 0,
35259 .update_status = adx_backlight_update_status,
35260 .get_brightness = adx_backlight_get_brightness,
35261 diff -urNp linux-2.6.32.41/drivers/video/backlight/atmel-pwm-bl.c linux-2.6.32.41/drivers/video/backlight/atmel-pwm-bl.c
35262 --- linux-2.6.32.41/drivers/video/backlight/atmel-pwm-bl.c 2011-03-27 14:31:47.000000000 -0400
35263 +++ linux-2.6.32.41/drivers/video/backlight/atmel-pwm-bl.c 2011-04-17 15:56:46.000000000 -0400
35264 @@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct
35265 return pwm_channel_enable(&pwmbl->pwmc);
35266 }
35267
35268 -static struct backlight_ops atmel_pwm_bl_ops = {
35269 +static const struct backlight_ops atmel_pwm_bl_ops = {
35270 .get_brightness = atmel_pwm_bl_get_intensity,
35271 .update_status = atmel_pwm_bl_set_intensity,
35272 };
35273 diff -urNp linux-2.6.32.41/drivers/video/backlight/backlight.c linux-2.6.32.41/drivers/video/backlight/backlight.c
35274 --- linux-2.6.32.41/drivers/video/backlight/backlight.c 2011-03-27 14:31:47.000000000 -0400
35275 +++ linux-2.6.32.41/drivers/video/backlight/backlight.c 2011-04-17 15:56:46.000000000 -0400
35276 @@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
35277 * ERR_PTR() or a pointer to the newly allocated device.
35278 */
35279 struct backlight_device *backlight_device_register(const char *name,
35280 - struct device *parent, void *devdata, struct backlight_ops *ops)
35281 + struct device *parent, void *devdata, const struct backlight_ops *ops)
35282 {
35283 struct backlight_device *new_bd;
35284 int rc;
35285 diff -urNp linux-2.6.32.41/drivers/video/backlight/corgi_lcd.c linux-2.6.32.41/drivers/video/backlight/corgi_lcd.c
35286 --- linux-2.6.32.41/drivers/video/backlight/corgi_lcd.c 2011-03-27 14:31:47.000000000 -0400
35287 +++ linux-2.6.32.41/drivers/video/backlight/corgi_lcd.c 2011-04-17 15:56:46.000000000 -0400
35288 @@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit
35289 }
35290 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
35291
35292 -static struct backlight_ops corgi_bl_ops = {
35293 +static const struct backlight_ops corgi_bl_ops = {
35294 .get_brightness = corgi_bl_get_intensity,
35295 .update_status = corgi_bl_update_status,
35296 };
35297 diff -urNp linux-2.6.32.41/drivers/video/backlight/cr_bllcd.c linux-2.6.32.41/drivers/video/backlight/cr_bllcd.c
35298 --- linux-2.6.32.41/drivers/video/backlight/cr_bllcd.c 2011-03-27 14:31:47.000000000 -0400
35299 +++ linux-2.6.32.41/drivers/video/backlight/cr_bllcd.c 2011-04-17 15:56:46.000000000 -0400
35300 @@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(st
35301 return intensity;
35302 }
35303
35304 -static struct backlight_ops cr_backlight_ops = {
35305 +static const struct backlight_ops cr_backlight_ops = {
35306 .get_brightness = cr_backlight_get_intensity,
35307 .update_status = cr_backlight_set_intensity,
35308 };
35309 diff -urNp linux-2.6.32.41/drivers/video/backlight/da903x_bl.c linux-2.6.32.41/drivers/video/backlight/da903x_bl.c
35310 --- linux-2.6.32.41/drivers/video/backlight/da903x_bl.c 2011-03-27 14:31:47.000000000 -0400
35311 +++ linux-2.6.32.41/drivers/video/backlight/da903x_bl.c 2011-04-17 15:56:46.000000000 -0400
35312 @@ -94,7 +94,7 @@ static int da903x_backlight_get_brightne
35313 return data->current_brightness;
35314 }
35315
35316 -static struct backlight_ops da903x_backlight_ops = {
35317 +static const struct backlight_ops da903x_backlight_ops = {
35318 .update_status = da903x_backlight_update_status,
35319 .get_brightness = da903x_backlight_get_brightness,
35320 };
35321 diff -urNp linux-2.6.32.41/drivers/video/backlight/generic_bl.c linux-2.6.32.41/drivers/video/backlight/generic_bl.c
35322 --- linux-2.6.32.41/drivers/video/backlight/generic_bl.c 2011-03-27 14:31:47.000000000 -0400
35323 +++ linux-2.6.32.41/drivers/video/backlight/generic_bl.c 2011-04-17 15:56:46.000000000 -0400
35324 @@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
35325 }
35326 EXPORT_SYMBOL(corgibl_limit_intensity);
35327
35328 -static struct backlight_ops genericbl_ops = {
35329 +static const struct backlight_ops genericbl_ops = {
35330 .options = BL_CORE_SUSPENDRESUME,
35331 .get_brightness = genericbl_get_intensity,
35332 .update_status = genericbl_send_intensity,
35333 diff -urNp linux-2.6.32.41/drivers/video/backlight/hp680_bl.c linux-2.6.32.41/drivers/video/backlight/hp680_bl.c
35334 --- linux-2.6.32.41/drivers/video/backlight/hp680_bl.c 2011-03-27 14:31:47.000000000 -0400
35335 +++ linux-2.6.32.41/drivers/video/backlight/hp680_bl.c 2011-04-17 15:56:46.000000000 -0400
35336 @@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct
35337 return current_intensity;
35338 }
35339
35340 -static struct backlight_ops hp680bl_ops = {
35341 +static const struct backlight_ops hp680bl_ops = {
35342 .get_brightness = hp680bl_get_intensity,
35343 .update_status = hp680bl_set_intensity,
35344 };
35345 diff -urNp linux-2.6.32.41/drivers/video/backlight/jornada720_bl.c linux-2.6.32.41/drivers/video/backlight/jornada720_bl.c
35346 --- linux-2.6.32.41/drivers/video/backlight/jornada720_bl.c 2011-03-27 14:31:47.000000000 -0400
35347 +++ linux-2.6.32.41/drivers/video/backlight/jornada720_bl.c 2011-04-17 15:56:46.000000000 -0400
35348 @@ -93,7 +93,7 @@ out:
35349 return ret;
35350 }
35351
35352 -static struct backlight_ops jornada_bl_ops = {
35353 +static const struct backlight_ops jornada_bl_ops = {
35354 .get_brightness = jornada_bl_get_brightness,
35355 .update_status = jornada_bl_update_status,
35356 .options = BL_CORE_SUSPENDRESUME,
35357 diff -urNp linux-2.6.32.41/drivers/video/backlight/kb3886_bl.c linux-2.6.32.41/drivers/video/backlight/kb3886_bl.c
35358 --- linux-2.6.32.41/drivers/video/backlight/kb3886_bl.c 2011-03-27 14:31:47.000000000 -0400
35359 +++ linux-2.6.32.41/drivers/video/backlight/kb3886_bl.c 2011-04-17 15:56:46.000000000 -0400
35360 @@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct
35361 return kb3886bl_intensity;
35362 }
35363
35364 -static struct backlight_ops kb3886bl_ops = {
35365 +static const struct backlight_ops kb3886bl_ops = {
35366 .get_brightness = kb3886bl_get_intensity,
35367 .update_status = kb3886bl_send_intensity,
35368 };
35369 diff -urNp linux-2.6.32.41/drivers/video/backlight/locomolcd.c linux-2.6.32.41/drivers/video/backlight/locomolcd.c
35370 --- linux-2.6.32.41/drivers/video/backlight/locomolcd.c 2011-03-27 14:31:47.000000000 -0400
35371 +++ linux-2.6.32.41/drivers/video/backlight/locomolcd.c 2011-04-17 15:56:46.000000000 -0400
35372 @@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struc
35373 return current_intensity;
35374 }
35375
35376 -static struct backlight_ops locomobl_data = {
35377 +static const struct backlight_ops locomobl_data = {
35378 .get_brightness = locomolcd_get_intensity,
35379 .update_status = locomolcd_set_intensity,
35380 };
35381 diff -urNp linux-2.6.32.41/drivers/video/backlight/mbp_nvidia_bl.c linux-2.6.32.41/drivers/video/backlight/mbp_nvidia_bl.c
35382 --- linux-2.6.32.41/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:01.000000000 -0400
35383 +++ linux-2.6.32.41/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:33.000000000 -0400
35384 @@ -33,7 +33,7 @@ struct dmi_match_data {
35385 unsigned long iostart;
35386 unsigned long iolen;
35387 /* Backlight operations structure. */
35388 - struct backlight_ops backlight_ops;
35389 + const struct backlight_ops backlight_ops;
35390 };
35391
35392 /* Module parameters. */
35393 diff -urNp linux-2.6.32.41/drivers/video/backlight/omap1_bl.c linux-2.6.32.41/drivers/video/backlight/omap1_bl.c
35394 --- linux-2.6.32.41/drivers/video/backlight/omap1_bl.c 2011-03-27 14:31:47.000000000 -0400
35395 +++ linux-2.6.32.41/drivers/video/backlight/omap1_bl.c 2011-04-17 15:56:46.000000000 -0400
35396 @@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct b
35397 return bl->current_intensity;
35398 }
35399
35400 -static struct backlight_ops omapbl_ops = {
35401 +static const struct backlight_ops omapbl_ops = {
35402 .get_brightness = omapbl_get_intensity,
35403 .update_status = omapbl_update_status,
35404 };
35405 diff -urNp linux-2.6.32.41/drivers/video/backlight/progear_bl.c linux-2.6.32.41/drivers/video/backlight/progear_bl.c
35406 --- linux-2.6.32.41/drivers/video/backlight/progear_bl.c 2011-03-27 14:31:47.000000000 -0400
35407 +++ linux-2.6.32.41/drivers/video/backlight/progear_bl.c 2011-04-17 15:56:46.000000000 -0400
35408 @@ -54,7 +54,7 @@ static int progearbl_get_intensity(struc
35409 return intensity - HW_LEVEL_MIN;
35410 }
35411
35412 -static struct backlight_ops progearbl_ops = {
35413 +static const struct backlight_ops progearbl_ops = {
35414 .get_brightness = progearbl_get_intensity,
35415 .update_status = progearbl_set_intensity,
35416 };
35417 diff -urNp linux-2.6.32.41/drivers/video/backlight/pwm_bl.c linux-2.6.32.41/drivers/video/backlight/pwm_bl.c
35418 --- linux-2.6.32.41/drivers/video/backlight/pwm_bl.c 2011-03-27 14:31:47.000000000 -0400
35419 +++ linux-2.6.32.41/drivers/video/backlight/pwm_bl.c 2011-04-17 15:56:46.000000000 -0400
35420 @@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(
35421 return bl->props.brightness;
35422 }
35423
35424 -static struct backlight_ops pwm_backlight_ops = {
35425 +static const struct backlight_ops pwm_backlight_ops = {
35426 .update_status = pwm_backlight_update_status,
35427 .get_brightness = pwm_backlight_get_brightness,
35428 };
35429 diff -urNp linux-2.6.32.41/drivers/video/backlight/tosa_bl.c linux-2.6.32.41/drivers/video/backlight/tosa_bl.c
35430 --- linux-2.6.32.41/drivers/video/backlight/tosa_bl.c 2011-03-27 14:31:47.000000000 -0400
35431 +++ linux-2.6.32.41/drivers/video/backlight/tosa_bl.c 2011-04-17 15:56:46.000000000 -0400
35432 @@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct
35433 return props->brightness;
35434 }
35435
35436 -static struct backlight_ops bl_ops = {
35437 +static const struct backlight_ops bl_ops = {
35438 .get_brightness = tosa_bl_get_brightness,
35439 .update_status = tosa_bl_update_status,
35440 };
35441 diff -urNp linux-2.6.32.41/drivers/video/backlight/wm831x_bl.c linux-2.6.32.41/drivers/video/backlight/wm831x_bl.c
35442 --- linux-2.6.32.41/drivers/video/backlight/wm831x_bl.c 2011-03-27 14:31:47.000000000 -0400
35443 +++ linux-2.6.32.41/drivers/video/backlight/wm831x_bl.c 2011-04-17 15:56:46.000000000 -0400
35444 @@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightne
35445 return data->current_brightness;
35446 }
35447
35448 -static struct backlight_ops wm831x_backlight_ops = {
35449 +static const struct backlight_ops wm831x_backlight_ops = {
35450 .options = BL_CORE_SUSPENDRESUME,
35451 .update_status = wm831x_backlight_update_status,
35452 .get_brightness = wm831x_backlight_get_brightness,
35453 diff -urNp linux-2.6.32.41/drivers/video/bf54x-lq043fb.c linux-2.6.32.41/drivers/video/bf54x-lq043fb.c
35454 --- linux-2.6.32.41/drivers/video/bf54x-lq043fb.c 2011-03-27 14:31:47.000000000 -0400
35455 +++ linux-2.6.32.41/drivers/video/bf54x-lq043fb.c 2011-04-17 15:56:46.000000000 -0400
35456 @@ -463,7 +463,7 @@ static int bl_get_brightness(struct back
35457 return 0;
35458 }
35459
35460 -static struct backlight_ops bfin_lq043fb_bl_ops = {
35461 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
35462 .get_brightness = bl_get_brightness,
35463 };
35464
35465 diff -urNp linux-2.6.32.41/drivers/video/bfin-t350mcqb-fb.c linux-2.6.32.41/drivers/video/bfin-t350mcqb-fb.c
35466 --- linux-2.6.32.41/drivers/video/bfin-t350mcqb-fb.c 2011-03-27 14:31:47.000000000 -0400
35467 +++ linux-2.6.32.41/drivers/video/bfin-t350mcqb-fb.c 2011-04-17 15:56:46.000000000 -0400
35468 @@ -381,7 +381,7 @@ static int bl_get_brightness(struct back
35469 return 0;
35470 }
35471
35472 -static struct backlight_ops bfin_lq043fb_bl_ops = {
35473 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
35474 .get_brightness = bl_get_brightness,
35475 };
35476
35477 diff -urNp linux-2.6.32.41/drivers/video/fbcmap.c linux-2.6.32.41/drivers/video/fbcmap.c
35478 --- linux-2.6.32.41/drivers/video/fbcmap.c 2011-03-27 14:31:47.000000000 -0400
35479 +++ linux-2.6.32.41/drivers/video/fbcmap.c 2011-04-17 15:56:46.000000000 -0400
35480 @@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user
35481 rc = -ENODEV;
35482 goto out;
35483 }
35484 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
35485 - !info->fbops->fb_setcmap)) {
35486 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
35487 rc = -EINVAL;
35488 goto out1;
35489 }
35490 diff -urNp linux-2.6.32.41/drivers/video/fbmem.c linux-2.6.32.41/drivers/video/fbmem.c
35491 --- linux-2.6.32.41/drivers/video/fbmem.c 2011-03-27 14:31:47.000000000 -0400
35492 +++ linux-2.6.32.41/drivers/video/fbmem.c 2011-05-16 21:46:57.000000000 -0400
35493 @@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_in
35494 image->dx += image->width + 8;
35495 }
35496 } else if (rotate == FB_ROTATE_UD) {
35497 - for (x = 0; x < num && image->dx >= 0; x++) {
35498 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
35499 info->fbops->fb_imageblit(info, image);
35500 image->dx -= image->width + 8;
35501 }
35502 @@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_in
35503 image->dy += image->height + 8;
35504 }
35505 } else if (rotate == FB_ROTATE_CCW) {
35506 - for (x = 0; x < num && image->dy >= 0; x++) {
35507 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
35508 info->fbops->fb_imageblit(info, image);
35509 image->dy -= image->height + 8;
35510 }
35511 @@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct
35512 int flags = info->flags;
35513 int ret = 0;
35514
35515 + pax_track_stack();
35516 +
35517 if (var->activate & FB_ACTIVATE_INV_MODE) {
35518 struct fb_videomode mode1, mode2;
35519
35520 @@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *
35521 void __user *argp = (void __user *)arg;
35522 long ret = 0;
35523
35524 + pax_track_stack();
35525 +
35526 switch (cmd) {
35527 case FBIOGET_VSCREENINFO:
35528 if (!lock_fb_info(info))
35529 @@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *
35530 return -EFAULT;
35531 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
35532 return -EINVAL;
35533 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
35534 + if (con2fb.framebuffer >= FB_MAX)
35535 return -EINVAL;
35536 if (!registered_fb[con2fb.framebuffer])
35537 request_module("fb%d", con2fb.framebuffer);
35538 diff -urNp linux-2.6.32.41/drivers/video/i810/i810_accel.c linux-2.6.32.41/drivers/video/i810/i810_accel.c
35539 --- linux-2.6.32.41/drivers/video/i810/i810_accel.c 2011-03-27 14:31:47.000000000 -0400
35540 +++ linux-2.6.32.41/drivers/video/i810/i810_accel.c 2011-04-17 15:56:46.000000000 -0400
35541 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct
35542 }
35543 }
35544 printk("ringbuffer lockup!!!\n");
35545 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
35546 i810_report_error(mmio);
35547 par->dev_flags |= LOCKUP;
35548 info->pixmap.scan_align = 1;
35549 diff -urNp linux-2.6.32.41/drivers/video/nvidia/nv_backlight.c linux-2.6.32.41/drivers/video/nvidia/nv_backlight.c
35550 --- linux-2.6.32.41/drivers/video/nvidia/nv_backlight.c 2011-03-27 14:31:47.000000000 -0400
35551 +++ linux-2.6.32.41/drivers/video/nvidia/nv_backlight.c 2011-04-17 15:56:46.000000000 -0400
35552 @@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(stru
35553 return bd->props.brightness;
35554 }
35555
35556 -static struct backlight_ops nvidia_bl_ops = {
35557 +static const struct backlight_ops nvidia_bl_ops = {
35558 .get_brightness = nvidia_bl_get_brightness,
35559 .update_status = nvidia_bl_update_status,
35560 };
35561 diff -urNp linux-2.6.32.41/drivers/video/riva/fbdev.c linux-2.6.32.41/drivers/video/riva/fbdev.c
35562 --- linux-2.6.32.41/drivers/video/riva/fbdev.c 2011-03-27 14:31:47.000000000 -0400
35563 +++ linux-2.6.32.41/drivers/video/riva/fbdev.c 2011-04-17 15:56:46.000000000 -0400
35564 @@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct
35565 return bd->props.brightness;
35566 }
35567
35568 -static struct backlight_ops riva_bl_ops = {
35569 +static const struct backlight_ops riva_bl_ops = {
35570 .get_brightness = riva_bl_get_brightness,
35571 .update_status = riva_bl_update_status,
35572 };
35573 diff -urNp linux-2.6.32.41/drivers/video/uvesafb.c linux-2.6.32.41/drivers/video/uvesafb.c
35574 --- linux-2.6.32.41/drivers/video/uvesafb.c 2011-03-27 14:31:47.000000000 -0400
35575 +++ linux-2.6.32.41/drivers/video/uvesafb.c 2011-04-17 15:56:46.000000000 -0400
35576 @@ -18,6 +18,7 @@
35577 #include <linux/fb.h>
35578 #include <linux/io.h>
35579 #include <linux/mutex.h>
35580 +#include <linux/moduleloader.h>
35581 #include <video/edid.h>
35582 #include <video/uvesafb.h>
35583 #ifdef CONFIG_X86
35584 @@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
35585 NULL,
35586 };
35587
35588 - return call_usermodehelper(v86d_path, argv, envp, 1);
35589 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
35590 }
35591
35592 /*
35593 @@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(
35594 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
35595 par->pmi_setpal = par->ypan = 0;
35596 } else {
35597 +
35598 +#ifdef CONFIG_PAX_KERNEXEC
35599 +#ifdef CONFIG_MODULES
35600 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
35601 +#endif
35602 + if (!par->pmi_code) {
35603 + par->pmi_setpal = par->ypan = 0;
35604 + return 0;
35605 + }
35606 +#endif
35607 +
35608 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
35609 + task->t.regs.edi);
35610 +
35611 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35612 + pax_open_kernel();
35613 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
35614 + pax_close_kernel();
35615 +
35616 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
35617 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
35618 +#else
35619 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
35620 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
35621 +#endif
35622 +
35623 printk(KERN_INFO "uvesafb: protected mode interface info at "
35624 "%04x:%04x\n",
35625 (u16)task->t.regs.es, (u16)task->t.regs.edi);
35626 @@ -1799,6 +1822,11 @@ out:
35627 if (par->vbe_modes)
35628 kfree(par->vbe_modes);
35629
35630 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35631 + if (par->pmi_code)
35632 + module_free_exec(NULL, par->pmi_code);
35633 +#endif
35634 +
35635 framebuffer_release(info);
35636 return err;
35637 }
35638 @@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platfor
35639 kfree(par->vbe_state_orig);
35640 if (par->vbe_state_saved)
35641 kfree(par->vbe_state_saved);
35642 +
35643 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35644 + if (par->pmi_code)
35645 + module_free_exec(NULL, par->pmi_code);
35646 +#endif
35647 +
35648 }
35649
35650 framebuffer_release(info);
35651 diff -urNp linux-2.6.32.41/drivers/video/vesafb.c linux-2.6.32.41/drivers/video/vesafb.c
35652 --- linux-2.6.32.41/drivers/video/vesafb.c 2011-03-27 14:31:47.000000000 -0400
35653 +++ linux-2.6.32.41/drivers/video/vesafb.c 2011-04-17 15:56:46.000000000 -0400
35654 @@ -9,6 +9,7 @@
35655 */
35656
35657 #include <linux/module.h>
35658 +#include <linux/moduleloader.h>
35659 #include <linux/kernel.h>
35660 #include <linux/errno.h>
35661 #include <linux/string.h>
35662 @@ -53,8 +54,8 @@ static int vram_remap __initdata; /*
35663 static int vram_total __initdata; /* Set total amount of memory */
35664 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
35665 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
35666 -static void (*pmi_start)(void) __read_mostly;
35667 -static void (*pmi_pal) (void) __read_mostly;
35668 +static void (*pmi_start)(void) __read_only;
35669 +static void (*pmi_pal) (void) __read_only;
35670 static int depth __read_mostly;
35671 static int vga_compat __read_mostly;
35672 /* --------------------------------------------------------------------- */
35673 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
35674 unsigned int size_vmode;
35675 unsigned int size_remap;
35676 unsigned int size_total;
35677 + void *pmi_code = NULL;
35678
35679 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
35680 return -ENODEV;
35681 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
35682 size_remap = size_total;
35683 vesafb_fix.smem_len = size_remap;
35684
35685 -#ifndef __i386__
35686 - screen_info.vesapm_seg = 0;
35687 -#endif
35688 -
35689 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
35690 printk(KERN_WARNING
35691 "vesafb: cannot reserve video memory at 0x%lx\n",
35692 @@ -315,9 +313,21 @@ static int __init vesafb_probe(struct pl
35693 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
35694 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
35695
35696 +#ifdef __i386__
35697 +
35698 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35699 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
35700 + if (!pmi_code)
35701 +#elif !defined(CONFIG_PAX_KERNEXEC)
35702 + if (0)
35703 +#endif
35704 +
35705 +#endif
35706 + screen_info.vesapm_seg = 0;
35707 +
35708 if (screen_info.vesapm_seg) {
35709 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
35710 - screen_info.vesapm_seg,screen_info.vesapm_off);
35711 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
35712 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
35713 }
35714
35715 if (screen_info.vesapm_seg < 0xc000)
35716 @@ -325,9 +335,25 @@ static int __init vesafb_probe(struct pl
35717
35718 if (ypan || pmi_setpal) {
35719 unsigned short *pmi_base;
35720 - pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
35721 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
35722 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
35723 +
35724 + pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
35725 +
35726 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35727 + pax_open_kernel();
35728 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
35729 +#else
35730 + pmi_code = pmi_base;
35731 +#endif
35732 +
35733 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
35734 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
35735 +
35736 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35737 + pmi_start = ktva_ktla(pmi_start);
35738 + pmi_pal = ktva_ktla(pmi_pal);
35739 + pax_close_kernel();
35740 +#endif
35741 +
35742 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
35743 if (pmi_base[3]) {
35744 printk(KERN_INFO "vesafb: pmi: ports = ");
35745 @@ -469,6 +495,11 @@ static int __init vesafb_probe(struct pl
35746 info->node, info->fix.id);
35747 return 0;
35748 err:
35749 +
35750 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35751 + module_free_exec(NULL, pmi_code);
35752 +#endif
35753 +
35754 if (info->screen_base)
35755 iounmap(info->screen_base);
35756 framebuffer_release(info);
35757 diff -urNp linux-2.6.32.41/drivers/xen/sys-hypervisor.c linux-2.6.32.41/drivers/xen/sys-hypervisor.c
35758 --- linux-2.6.32.41/drivers/xen/sys-hypervisor.c 2011-03-27 14:31:47.000000000 -0400
35759 +++ linux-2.6.32.41/drivers/xen/sys-hypervisor.c 2011-04-17 15:56:46.000000000 -0400
35760 @@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct ko
35761 return 0;
35762 }
35763
35764 -static struct sysfs_ops hyp_sysfs_ops = {
35765 +static const struct sysfs_ops hyp_sysfs_ops = {
35766 .show = hyp_sysfs_show,
35767 .store = hyp_sysfs_store,
35768 };
35769 diff -urNp linux-2.6.32.41/fs/9p/vfs_inode.c linux-2.6.32.41/fs/9p/vfs_inode.c
35770 --- linux-2.6.32.41/fs/9p/vfs_inode.c 2011-03-27 14:31:47.000000000 -0400
35771 +++ linux-2.6.32.41/fs/9p/vfs_inode.c 2011-04-17 15:56:46.000000000 -0400
35772 @@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct
35773 static void
35774 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
35775 {
35776 - char *s = nd_get_link(nd);
35777 + const char *s = nd_get_link(nd);
35778
35779 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
35780 IS_ERR(s) ? "<error>" : s);
35781 diff -urNp linux-2.6.32.41/fs/aio.c linux-2.6.32.41/fs/aio.c
35782 --- linux-2.6.32.41/fs/aio.c 2011-03-27 14:31:47.000000000 -0400
35783 +++ linux-2.6.32.41/fs/aio.c 2011-06-04 20:40:21.000000000 -0400
35784 @@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx
35785 size += sizeof(struct io_event) * nr_events;
35786 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
35787
35788 - if (nr_pages < 0)
35789 + if (nr_pages <= 0)
35790 return -EINVAL;
35791
35792 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
35793 @@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ct
35794 struct aio_timeout to;
35795 int retry = 0;
35796
35797 + pax_track_stack();
35798 +
35799 /* needed to zero any padding within an entry (there shouldn't be
35800 * any, but C is fun!
35801 */
35802 @@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *i
35803 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
35804 {
35805 ssize_t ret;
35806 + struct iovec iovstack;
35807
35808 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
35809 kiocb->ki_nbytes, 1,
35810 - &kiocb->ki_inline_vec, &kiocb->ki_iovec);
35811 + &iovstack, &kiocb->ki_iovec);
35812 if (ret < 0)
35813 goto out;
35814
35815 + if (kiocb->ki_iovec == &iovstack) {
35816 + kiocb->ki_inline_vec = iovstack;
35817 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
35818 + }
35819 kiocb->ki_nr_segs = kiocb->ki_nbytes;
35820 kiocb->ki_cur_seg = 0;
35821 /* ki_nbytes/left now reflect bytes instead of segs */
35822 diff -urNp linux-2.6.32.41/fs/attr.c linux-2.6.32.41/fs/attr.c
35823 --- linux-2.6.32.41/fs/attr.c 2011-03-27 14:31:47.000000000 -0400
35824 +++ linux-2.6.32.41/fs/attr.c 2011-04-17 15:56:46.000000000 -0400
35825 @@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode
35826 unsigned long limit;
35827
35828 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
35829 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
35830 if (limit != RLIM_INFINITY && offset > limit)
35831 goto out_sig;
35832 if (offset > inode->i_sb->s_maxbytes)
35833 diff -urNp linux-2.6.32.41/fs/autofs/root.c linux-2.6.32.41/fs/autofs/root.c
35834 --- linux-2.6.32.41/fs/autofs/root.c 2011-03-27 14:31:47.000000000 -0400
35835 +++ linux-2.6.32.41/fs/autofs/root.c 2011-04-17 15:56:46.000000000 -0400
35836 @@ -299,7 +299,8 @@ static int autofs_root_symlink(struct in
35837 set_bit(n,sbi->symlink_bitmap);
35838 sl = &sbi->symlink[n];
35839 sl->len = strlen(symname);
35840 - sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
35841 + slsize = sl->len+1;
35842 + sl->data = kmalloc(slsize, GFP_KERNEL);
35843 if (!sl->data) {
35844 clear_bit(n,sbi->symlink_bitmap);
35845 unlock_kernel();
35846 diff -urNp linux-2.6.32.41/fs/autofs4/symlink.c linux-2.6.32.41/fs/autofs4/symlink.c
35847 --- linux-2.6.32.41/fs/autofs4/symlink.c 2011-03-27 14:31:47.000000000 -0400
35848 +++ linux-2.6.32.41/fs/autofs4/symlink.c 2011-04-17 15:56:46.000000000 -0400
35849 @@ -15,7 +15,7 @@
35850 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
35851 {
35852 struct autofs_info *ino = autofs4_dentry_ino(dentry);
35853 - nd_set_link(nd, (char *)ino->u.symlink);
35854 + nd_set_link(nd, ino->u.symlink);
35855 return NULL;
35856 }
35857
35858 diff -urNp linux-2.6.32.41/fs/befs/linuxvfs.c linux-2.6.32.41/fs/befs/linuxvfs.c
35859 --- linux-2.6.32.41/fs/befs/linuxvfs.c 2011-03-27 14:31:47.000000000 -0400
35860 +++ linux-2.6.32.41/fs/befs/linuxvfs.c 2011-04-17 15:56:46.000000000 -0400
35861 @@ -493,7 +493,7 @@ static void befs_put_link(struct dentry
35862 {
35863 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
35864 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
35865 - char *link = nd_get_link(nd);
35866 + const char *link = nd_get_link(nd);
35867 if (!IS_ERR(link))
35868 kfree(link);
35869 }
35870 diff -urNp linux-2.6.32.41/fs/binfmt_aout.c linux-2.6.32.41/fs/binfmt_aout.c
35871 --- linux-2.6.32.41/fs/binfmt_aout.c 2011-03-27 14:31:47.000000000 -0400
35872 +++ linux-2.6.32.41/fs/binfmt_aout.c 2011-04-17 15:56:46.000000000 -0400
35873 @@ -16,6 +16,7 @@
35874 #include <linux/string.h>
35875 #include <linux/fs.h>
35876 #include <linux/file.h>
35877 +#include <linux/security.h>
35878 #include <linux/stat.h>
35879 #include <linux/fcntl.h>
35880 #include <linux/ptrace.h>
35881 @@ -102,6 +103,8 @@ static int aout_core_dump(long signr, st
35882 #endif
35883 # define START_STACK(u) (u.start_stack)
35884
35885 + memset(&dump, 0, sizeof(dump));
35886 +
35887 fs = get_fs();
35888 set_fs(KERNEL_DS);
35889 has_dumped = 1;
35890 @@ -113,10 +116,12 @@ static int aout_core_dump(long signr, st
35891
35892 /* If the size of the dump file exceeds the rlimit, then see what would happen
35893 if we wrote the stack, but not the data area. */
35894 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
35895 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
35896 dump.u_dsize = 0;
35897
35898 /* Make sure we have enough room to write the stack and data areas. */
35899 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
35900 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
35901 dump.u_ssize = 0;
35902
35903 @@ -146,9 +151,7 @@ static int aout_core_dump(long signr, st
35904 dump_size = dump.u_ssize << PAGE_SHIFT;
35905 DUMP_WRITE(dump_start,dump_size);
35906 }
35907 -/* Finally dump the task struct. Not be used by gdb, but could be useful */
35908 - set_fs(KERNEL_DS);
35909 - DUMP_WRITE(current,sizeof(*current));
35910 +/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
35911 end_coredump:
35912 set_fs(fs);
35913 return has_dumped;
35914 @@ -249,6 +252,8 @@ static int load_aout_binary(struct linux
35915 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
35916 if (rlim >= RLIM_INFINITY)
35917 rlim = ~0;
35918 +
35919 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
35920 if (ex.a_data + ex.a_bss > rlim)
35921 return -ENOMEM;
35922
35923 @@ -277,6 +282,27 @@ static int load_aout_binary(struct linux
35924 install_exec_creds(bprm);
35925 current->flags &= ~PF_FORKNOEXEC;
35926
35927 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
35928 + current->mm->pax_flags = 0UL;
35929 +#endif
35930 +
35931 +#ifdef CONFIG_PAX_PAGEEXEC
35932 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
35933 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
35934 +
35935 +#ifdef CONFIG_PAX_EMUTRAMP
35936 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
35937 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
35938 +#endif
35939 +
35940 +#ifdef CONFIG_PAX_MPROTECT
35941 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
35942 + current->mm->pax_flags |= MF_PAX_MPROTECT;
35943 +#endif
35944 +
35945 + }
35946 +#endif
35947 +
35948 if (N_MAGIC(ex) == OMAGIC) {
35949 unsigned long text_addr, map_size;
35950 loff_t pos;
35951 @@ -349,7 +375,7 @@ static int load_aout_binary(struct linux
35952
35953 down_write(&current->mm->mmap_sem);
35954 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
35955 - PROT_READ | PROT_WRITE | PROT_EXEC,
35956 + PROT_READ | PROT_WRITE,
35957 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
35958 fd_offset + ex.a_text);
35959 up_write(&current->mm->mmap_sem);
35960 diff -urNp linux-2.6.32.41/fs/binfmt_elf.c linux-2.6.32.41/fs/binfmt_elf.c
35961 --- linux-2.6.32.41/fs/binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
35962 +++ linux-2.6.32.41/fs/binfmt_elf.c 2011-05-16 21:46:57.000000000 -0400
35963 @@ -50,6 +50,10 @@ static int elf_core_dump(long signr, str
35964 #define elf_core_dump NULL
35965 #endif
35966
35967 +#ifdef CONFIG_PAX_MPROTECT
35968 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
35969 +#endif
35970 +
35971 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
35972 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
35973 #else
35974 @@ -69,6 +73,11 @@ static struct linux_binfmt elf_format =
35975 .load_binary = load_elf_binary,
35976 .load_shlib = load_elf_library,
35977 .core_dump = elf_core_dump,
35978 +
35979 +#ifdef CONFIG_PAX_MPROTECT
35980 + .handle_mprotect= elf_handle_mprotect,
35981 +#endif
35982 +
35983 .min_coredump = ELF_EXEC_PAGESIZE,
35984 .hasvdso = 1
35985 };
35986 @@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
35987
35988 static int set_brk(unsigned long start, unsigned long end)
35989 {
35990 + unsigned long e = end;
35991 +
35992 start = ELF_PAGEALIGN(start);
35993 end = ELF_PAGEALIGN(end);
35994 if (end > start) {
35995 @@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
35996 if (BAD_ADDR(addr))
35997 return addr;
35998 }
35999 - current->mm->start_brk = current->mm->brk = end;
36000 + current->mm->start_brk = current->mm->brk = e;
36001 return 0;
36002 }
36003
36004 @@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
36005 elf_addr_t __user *u_rand_bytes;
36006 const char *k_platform = ELF_PLATFORM;
36007 const char *k_base_platform = ELF_BASE_PLATFORM;
36008 - unsigned char k_rand_bytes[16];
36009 + u32 k_rand_bytes[4];
36010 int items;
36011 elf_addr_t *elf_info;
36012 int ei_index = 0;
36013 const struct cred *cred = current_cred();
36014 struct vm_area_struct *vma;
36015 + unsigned long saved_auxv[AT_VECTOR_SIZE];
36016 +
36017 + pax_track_stack();
36018
36019 /*
36020 * In some cases (e.g. Hyper-Threading), we want to avoid L1
36021 @@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
36022 * Generate 16 random bytes for userspace PRNG seeding.
36023 */
36024 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
36025 - u_rand_bytes = (elf_addr_t __user *)
36026 - STACK_ALLOC(p, sizeof(k_rand_bytes));
36027 + srandom32(k_rand_bytes[0] ^ random32());
36028 + srandom32(k_rand_bytes[1] ^ random32());
36029 + srandom32(k_rand_bytes[2] ^ random32());
36030 + srandom32(k_rand_bytes[3] ^ random32());
36031 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
36032 + u_rand_bytes = (elf_addr_t __user *) p;
36033 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
36034 return -EFAULT;
36035
36036 @@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
36037 return -EFAULT;
36038 current->mm->env_end = p;
36039
36040 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
36041 +
36042 /* Put the elf_info on the stack in the right place. */
36043 sp = (elf_addr_t __user *)envp + 1;
36044 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
36045 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
36046 return -EFAULT;
36047 return 0;
36048 }
36049 @@ -385,10 +405,10 @@ static unsigned long load_elf_interp(str
36050 {
36051 struct elf_phdr *elf_phdata;
36052 struct elf_phdr *eppnt;
36053 - unsigned long load_addr = 0;
36054 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
36055 int load_addr_set = 0;
36056 unsigned long last_bss = 0, elf_bss = 0;
36057 - unsigned long error = ~0UL;
36058 + unsigned long error = -EINVAL;
36059 unsigned long total_size;
36060 int retval, i, size;
36061
36062 @@ -434,6 +454,11 @@ static unsigned long load_elf_interp(str
36063 goto out_close;
36064 }
36065
36066 +#ifdef CONFIG_PAX_SEGMEXEC
36067 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
36068 + pax_task_size = SEGMEXEC_TASK_SIZE;
36069 +#endif
36070 +
36071 eppnt = elf_phdata;
36072 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
36073 if (eppnt->p_type == PT_LOAD) {
36074 @@ -477,8 +502,8 @@ static unsigned long load_elf_interp(str
36075 k = load_addr + eppnt->p_vaddr;
36076 if (BAD_ADDR(k) ||
36077 eppnt->p_filesz > eppnt->p_memsz ||
36078 - eppnt->p_memsz > TASK_SIZE ||
36079 - TASK_SIZE - eppnt->p_memsz < k) {
36080 + eppnt->p_memsz > pax_task_size ||
36081 + pax_task_size - eppnt->p_memsz < k) {
36082 error = -ENOMEM;
36083 goto out_close;
36084 }
36085 @@ -532,6 +557,194 @@ out:
36086 return error;
36087 }
36088
36089 +#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
36090 +static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
36091 +{
36092 + unsigned long pax_flags = 0UL;
36093 +
36094 +#ifdef CONFIG_PAX_PAGEEXEC
36095 + if (elf_phdata->p_flags & PF_PAGEEXEC)
36096 + pax_flags |= MF_PAX_PAGEEXEC;
36097 +#endif
36098 +
36099 +#ifdef CONFIG_PAX_SEGMEXEC
36100 + if (elf_phdata->p_flags & PF_SEGMEXEC)
36101 + pax_flags |= MF_PAX_SEGMEXEC;
36102 +#endif
36103 +
36104 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36105 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36106 + if (nx_enabled)
36107 + pax_flags &= ~MF_PAX_SEGMEXEC;
36108 + else
36109 + pax_flags &= ~MF_PAX_PAGEEXEC;
36110 + }
36111 +#endif
36112 +
36113 +#ifdef CONFIG_PAX_EMUTRAMP
36114 + if (elf_phdata->p_flags & PF_EMUTRAMP)
36115 + pax_flags |= MF_PAX_EMUTRAMP;
36116 +#endif
36117 +
36118 +#ifdef CONFIG_PAX_MPROTECT
36119 + if (elf_phdata->p_flags & PF_MPROTECT)
36120 + pax_flags |= MF_PAX_MPROTECT;
36121 +#endif
36122 +
36123 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
36124 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
36125 + pax_flags |= MF_PAX_RANDMMAP;
36126 +#endif
36127 +
36128 + return pax_flags;
36129 +}
36130 +#endif
36131 +
36132 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
36133 +static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
36134 +{
36135 + unsigned long pax_flags = 0UL;
36136 +
36137 +#ifdef CONFIG_PAX_PAGEEXEC
36138 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
36139 + pax_flags |= MF_PAX_PAGEEXEC;
36140 +#endif
36141 +
36142 +#ifdef CONFIG_PAX_SEGMEXEC
36143 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
36144 + pax_flags |= MF_PAX_SEGMEXEC;
36145 +#endif
36146 +
36147 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36148 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36149 + if (nx_enabled)
36150 + pax_flags &= ~MF_PAX_SEGMEXEC;
36151 + else
36152 + pax_flags &= ~MF_PAX_PAGEEXEC;
36153 + }
36154 +#endif
36155 +
36156 +#ifdef CONFIG_PAX_EMUTRAMP
36157 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
36158 + pax_flags |= MF_PAX_EMUTRAMP;
36159 +#endif
36160 +
36161 +#ifdef CONFIG_PAX_MPROTECT
36162 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
36163 + pax_flags |= MF_PAX_MPROTECT;
36164 +#endif
36165 +
36166 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
36167 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
36168 + pax_flags |= MF_PAX_RANDMMAP;
36169 +#endif
36170 +
36171 + return pax_flags;
36172 +}
36173 +#endif
36174 +
36175 +#ifdef CONFIG_PAX_EI_PAX
36176 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
36177 +{
36178 + unsigned long pax_flags = 0UL;
36179 +
36180 +#ifdef CONFIG_PAX_PAGEEXEC
36181 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
36182 + pax_flags |= MF_PAX_PAGEEXEC;
36183 +#endif
36184 +
36185 +#ifdef CONFIG_PAX_SEGMEXEC
36186 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
36187 + pax_flags |= MF_PAX_SEGMEXEC;
36188 +#endif
36189 +
36190 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36191 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36192 + if (nx_enabled)
36193 + pax_flags &= ~MF_PAX_SEGMEXEC;
36194 + else
36195 + pax_flags &= ~MF_PAX_PAGEEXEC;
36196 + }
36197 +#endif
36198 +
36199 +#ifdef CONFIG_PAX_EMUTRAMP
36200 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
36201 + pax_flags |= MF_PAX_EMUTRAMP;
36202 +#endif
36203 +
36204 +#ifdef CONFIG_PAX_MPROTECT
36205 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
36206 + pax_flags |= MF_PAX_MPROTECT;
36207 +#endif
36208 +
36209 +#ifdef CONFIG_PAX_ASLR
36210 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
36211 + pax_flags |= MF_PAX_RANDMMAP;
36212 +#endif
36213 +
36214 + return pax_flags;
36215 +}
36216 +#endif
36217 +
36218 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
36219 +static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
36220 +{
36221 + unsigned long pax_flags = 0UL;
36222 +
36223 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
36224 + unsigned long i;
36225 + int found_flags = 0;
36226 +#endif
36227 +
36228 +#ifdef CONFIG_PAX_EI_PAX
36229 + pax_flags = pax_parse_ei_pax(elf_ex);
36230 +#endif
36231 +
36232 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
36233 + for (i = 0UL; i < elf_ex->e_phnum; i++)
36234 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
36235 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
36236 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
36237 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
36238 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
36239 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
36240 + return -EINVAL;
36241 +
36242 +#ifdef CONFIG_PAX_SOFTMODE
36243 + if (pax_softmode)
36244 + pax_flags = pax_parse_softmode(&elf_phdata[i]);
36245 + else
36246 +#endif
36247 +
36248 + pax_flags = pax_parse_hardmode(&elf_phdata[i]);
36249 + found_flags = 1;
36250 + break;
36251 + }
36252 +#endif
36253 +
36254 +#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
36255 + if (found_flags == 0) {
36256 + struct elf_phdr phdr;
36257 + memset(&phdr, 0, sizeof(phdr));
36258 + phdr.p_flags = PF_NOEMUTRAMP;
36259 +#ifdef CONFIG_PAX_SOFTMODE
36260 + if (pax_softmode)
36261 + pax_flags = pax_parse_softmode(&phdr);
36262 + else
36263 +#endif
36264 + pax_flags = pax_parse_hardmode(&phdr);
36265 + }
36266 +#endif
36267 +
36268 +
36269 + if (0 > pax_check_flags(&pax_flags))
36270 + return -EINVAL;
36271 +
36272 + current->mm->pax_flags = pax_flags;
36273 + return 0;
36274 +}
36275 +#endif
36276 +
36277 /*
36278 * These are the functions used to load ELF style executables and shared
36279 * libraries. There is no binary dependent code anywhere else.
36280 @@ -548,6 +761,11 @@ static unsigned long randomize_stack_top
36281 {
36282 unsigned int random_variable = 0;
36283
36284 +#ifdef CONFIG_PAX_RANDUSTACK
36285 + if (randomize_va_space)
36286 + return stack_top - current->mm->delta_stack;
36287 +#endif
36288 +
36289 if ((current->flags & PF_RANDOMIZE) &&
36290 !(current->personality & ADDR_NO_RANDOMIZE)) {
36291 random_variable = get_random_int() & STACK_RND_MASK;
36292 @@ -566,7 +784,7 @@ static int load_elf_binary(struct linux_
36293 unsigned long load_addr = 0, load_bias = 0;
36294 int load_addr_set = 0;
36295 char * elf_interpreter = NULL;
36296 - unsigned long error;
36297 + unsigned long error = 0;
36298 struct elf_phdr *elf_ppnt, *elf_phdata;
36299 unsigned long elf_bss, elf_brk;
36300 int retval, i;
36301 @@ -576,11 +794,11 @@ static int load_elf_binary(struct linux_
36302 unsigned long start_code, end_code, start_data, end_data;
36303 unsigned long reloc_func_desc = 0;
36304 int executable_stack = EXSTACK_DEFAULT;
36305 - unsigned long def_flags = 0;
36306 struct {
36307 struct elfhdr elf_ex;
36308 struct elfhdr interp_elf_ex;
36309 } *loc;
36310 + unsigned long pax_task_size = TASK_SIZE;
36311
36312 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
36313 if (!loc) {
36314 @@ -718,11 +936,80 @@ static int load_elf_binary(struct linux_
36315
36316 /* OK, This is the point of no return */
36317 current->flags &= ~PF_FORKNOEXEC;
36318 - current->mm->def_flags = def_flags;
36319 +
36320 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
36321 + current->mm->pax_flags = 0UL;
36322 +#endif
36323 +
36324 +#ifdef CONFIG_PAX_DLRESOLVE
36325 + current->mm->call_dl_resolve = 0UL;
36326 +#endif
36327 +
36328 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
36329 + current->mm->call_syscall = 0UL;
36330 +#endif
36331 +
36332 +#ifdef CONFIG_PAX_ASLR
36333 + current->mm->delta_mmap = 0UL;
36334 + current->mm->delta_stack = 0UL;
36335 +#endif
36336 +
36337 + current->mm->def_flags = 0;
36338 +
36339 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
36340 + if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
36341 + send_sig(SIGKILL, current, 0);
36342 + goto out_free_dentry;
36343 + }
36344 +#endif
36345 +
36346 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
36347 + pax_set_initial_flags(bprm);
36348 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
36349 + if (pax_set_initial_flags_func)
36350 + (pax_set_initial_flags_func)(bprm);
36351 +#endif
36352 +
36353 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
36354 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
36355 + current->mm->context.user_cs_limit = PAGE_SIZE;
36356 + current->mm->def_flags |= VM_PAGEEXEC;
36357 + }
36358 +#endif
36359 +
36360 +#ifdef CONFIG_PAX_SEGMEXEC
36361 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
36362 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
36363 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
36364 + pax_task_size = SEGMEXEC_TASK_SIZE;
36365 + }
36366 +#endif
36367 +
36368 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
36369 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36370 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
36371 + put_cpu();
36372 + }
36373 +#endif
36374
36375 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
36376 may depend on the personality. */
36377 SET_PERSONALITY(loc->elf_ex);
36378 +
36379 +#ifdef CONFIG_PAX_ASLR
36380 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
36381 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
36382 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
36383 + }
36384 +#endif
36385 +
36386 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
36387 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36388 + executable_stack = EXSTACK_DISABLE_X;
36389 + current->personality &= ~READ_IMPLIES_EXEC;
36390 + } else
36391 +#endif
36392 +
36393 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
36394 current->personality |= READ_IMPLIES_EXEC;
36395
36396 @@ -804,6 +1091,20 @@ static int load_elf_binary(struct linux_
36397 #else
36398 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
36399 #endif
36400 +
36401 +#ifdef CONFIG_PAX_RANDMMAP
36402 + /* PaX: randomize base address at the default exe base if requested */
36403 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
36404 +#ifdef CONFIG_SPARC64
36405 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
36406 +#else
36407 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
36408 +#endif
36409 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
36410 + elf_flags |= MAP_FIXED;
36411 + }
36412 +#endif
36413 +
36414 }
36415
36416 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
36417 @@ -836,9 +1137,9 @@ static int load_elf_binary(struct linux_
36418 * allowed task size. Note that p_filesz must always be
36419 * <= p_memsz so it is only necessary to check p_memsz.
36420 */
36421 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
36422 - elf_ppnt->p_memsz > TASK_SIZE ||
36423 - TASK_SIZE - elf_ppnt->p_memsz < k) {
36424 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
36425 + elf_ppnt->p_memsz > pax_task_size ||
36426 + pax_task_size - elf_ppnt->p_memsz < k) {
36427 /* set_brk can never work. Avoid overflows. */
36428 send_sig(SIGKILL, current, 0);
36429 retval = -EINVAL;
36430 @@ -866,6 +1167,11 @@ static int load_elf_binary(struct linux_
36431 start_data += load_bias;
36432 end_data += load_bias;
36433
36434 +#ifdef CONFIG_PAX_RANDMMAP
36435 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
36436 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
36437 +#endif
36438 +
36439 /* Calling set_brk effectively mmaps the pages that we need
36440 * for the bss and break sections. We must do this before
36441 * mapping in the interpreter, to make sure it doesn't wind
36442 @@ -877,9 +1183,11 @@ static int load_elf_binary(struct linux_
36443 goto out_free_dentry;
36444 }
36445 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
36446 - send_sig(SIGSEGV, current, 0);
36447 - retval = -EFAULT; /* Nobody gets to see this, but.. */
36448 - goto out_free_dentry;
36449 + /*
36450 + * This bss-zeroing can fail if the ELF
36451 + * file specifies odd protections. So
36452 + * we don't check the return value
36453 + */
36454 }
36455
36456 if (elf_interpreter) {
36457 @@ -1112,8 +1420,10 @@ static int dump_seek(struct file *file,
36458 unsigned long n = off;
36459 if (n > PAGE_SIZE)
36460 n = PAGE_SIZE;
36461 - if (!dump_write(file, buf, n))
36462 + if (!dump_write(file, buf, n)) {
36463 + free_page((unsigned long)buf);
36464 return 0;
36465 + }
36466 off -= n;
36467 }
36468 free_page((unsigned long)buf);
36469 @@ -1125,7 +1435,7 @@ static int dump_seek(struct file *file,
36470 * Decide what to dump of a segment, part, all or none.
36471 */
36472 static unsigned long vma_dump_size(struct vm_area_struct *vma,
36473 - unsigned long mm_flags)
36474 + unsigned long mm_flags, long signr)
36475 {
36476 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
36477
36478 @@ -1159,7 +1469,7 @@ static unsigned long vma_dump_size(struc
36479 if (vma->vm_file == NULL)
36480 return 0;
36481
36482 - if (FILTER(MAPPED_PRIVATE))
36483 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
36484 goto whole;
36485
36486 /*
36487 @@ -1255,8 +1565,11 @@ static int writenote(struct memelfnote *
36488 #undef DUMP_WRITE
36489
36490 #define DUMP_WRITE(addr, nr) \
36491 + do { \
36492 + gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
36493 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
36494 - goto end_coredump;
36495 + goto end_coredump; \
36496 + } while (0);
36497
36498 static void fill_elf_header(struct elfhdr *elf, int segs,
36499 u16 machine, u32 flags, u8 osabi)
36500 @@ -1385,9 +1698,9 @@ static void fill_auxv_note(struct memelf
36501 {
36502 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
36503 int i = 0;
36504 - do
36505 + do {
36506 i += 2;
36507 - while (auxv[i - 2] != AT_NULL);
36508 + } while (auxv[i - 2] != AT_NULL);
36509 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
36510 }
36511
36512 @@ -1973,7 +2286,7 @@ static int elf_core_dump(long signr, str
36513 phdr.p_offset = offset;
36514 phdr.p_vaddr = vma->vm_start;
36515 phdr.p_paddr = 0;
36516 - phdr.p_filesz = vma_dump_size(vma, mm_flags);
36517 + phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
36518 phdr.p_memsz = vma->vm_end - vma->vm_start;
36519 offset += phdr.p_filesz;
36520 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
36521 @@ -2006,7 +2319,7 @@ static int elf_core_dump(long signr, str
36522 unsigned long addr;
36523 unsigned long end;
36524
36525 - end = vma->vm_start + vma_dump_size(vma, mm_flags);
36526 + end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
36527
36528 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
36529 struct page *page;
36530 @@ -2015,6 +2328,7 @@ static int elf_core_dump(long signr, str
36531 page = get_dump_page(addr);
36532 if (page) {
36533 void *kaddr = kmap(page);
36534 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
36535 stop = ((size += PAGE_SIZE) > limit) ||
36536 !dump_write(file, kaddr, PAGE_SIZE);
36537 kunmap(page);
36538 @@ -2042,6 +2356,97 @@ out:
36539
36540 #endif /* USE_ELF_CORE_DUMP */
36541
36542 +#ifdef CONFIG_PAX_MPROTECT
36543 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
36544 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
36545 + * we'll remove VM_MAYWRITE for good on RELRO segments.
36546 + *
36547 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
36548 + * basis because we want to allow the common case and not the special ones.
36549 + */
36550 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
36551 +{
36552 + struct elfhdr elf_h;
36553 + struct elf_phdr elf_p;
36554 + unsigned long i;
36555 + unsigned long oldflags;
36556 + bool is_textrel_rw, is_textrel_rx, is_relro;
36557 +
36558 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
36559 + return;
36560 +
36561 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
36562 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
36563 +
36564 +#ifdef CONFIG_PAX_ELFRELOCS
36565 + /* possible TEXTREL */
36566 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
36567 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
36568 +#else
36569 + is_textrel_rw = false;
36570 + is_textrel_rx = false;
36571 +#endif
36572 +
36573 + /* possible RELRO */
36574 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
36575 +
36576 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
36577 + return;
36578 +
36579 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
36580 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
36581 +
36582 +#ifdef CONFIG_PAX_ETEXECRELOCS
36583 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
36584 +#else
36585 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
36586 +#endif
36587 +
36588 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
36589 + !elf_check_arch(&elf_h) ||
36590 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
36591 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
36592 + return;
36593 +
36594 + for (i = 0UL; i < elf_h.e_phnum; i++) {
36595 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
36596 + return;
36597 + switch (elf_p.p_type) {
36598 + case PT_DYNAMIC:
36599 + if (!is_textrel_rw && !is_textrel_rx)
36600 + continue;
36601 + i = 0UL;
36602 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
36603 + elf_dyn dyn;
36604 +
36605 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
36606 + return;
36607 + if (dyn.d_tag == DT_NULL)
36608 + return;
36609 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
36610 + gr_log_textrel(vma);
36611 + if (is_textrel_rw)
36612 + vma->vm_flags |= VM_MAYWRITE;
36613 + else
36614 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
36615 + vma->vm_flags &= ~VM_MAYWRITE;
36616 + return;
36617 + }
36618 + i++;
36619 + }
36620 + return;
36621 +
36622 + case PT_GNU_RELRO:
36623 + if (!is_relro)
36624 + continue;
36625 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
36626 + vma->vm_flags &= ~VM_MAYWRITE;
36627 + return;
36628 + }
36629 + }
36630 +}
36631 +#endif
36632 +
36633 static int __init init_elf_binfmt(void)
36634 {
36635 return register_binfmt(&elf_format);
36636 diff -urNp linux-2.6.32.41/fs/binfmt_flat.c linux-2.6.32.41/fs/binfmt_flat.c
36637 --- linux-2.6.32.41/fs/binfmt_flat.c 2011-03-27 14:31:47.000000000 -0400
36638 +++ linux-2.6.32.41/fs/binfmt_flat.c 2011-04-17 15:56:46.000000000 -0400
36639 @@ -564,7 +564,9 @@ static int load_flat_file(struct linux_b
36640 realdatastart = (unsigned long) -ENOMEM;
36641 printk("Unable to allocate RAM for process data, errno %d\n",
36642 (int)-realdatastart);
36643 + down_write(&current->mm->mmap_sem);
36644 do_munmap(current->mm, textpos, text_len);
36645 + up_write(&current->mm->mmap_sem);
36646 ret = realdatastart;
36647 goto err;
36648 }
36649 @@ -588,8 +590,10 @@ static int load_flat_file(struct linux_b
36650 }
36651 if (IS_ERR_VALUE(result)) {
36652 printk("Unable to read data+bss, errno %d\n", (int)-result);
36653 + down_write(&current->mm->mmap_sem);
36654 do_munmap(current->mm, textpos, text_len);
36655 do_munmap(current->mm, realdatastart, data_len + extra);
36656 + up_write(&current->mm->mmap_sem);
36657 ret = result;
36658 goto err;
36659 }
36660 @@ -658,8 +662,10 @@ static int load_flat_file(struct linux_b
36661 }
36662 if (IS_ERR_VALUE(result)) {
36663 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
36664 + down_write(&current->mm->mmap_sem);
36665 do_munmap(current->mm, textpos, text_len + data_len + extra +
36666 MAX_SHARED_LIBS * sizeof(unsigned long));
36667 + up_write(&current->mm->mmap_sem);
36668 ret = result;
36669 goto err;
36670 }
36671 diff -urNp linux-2.6.32.41/fs/bio.c linux-2.6.32.41/fs/bio.c
36672 --- linux-2.6.32.41/fs/bio.c 2011-03-27 14:31:47.000000000 -0400
36673 +++ linux-2.6.32.41/fs/bio.c 2011-04-17 15:56:46.000000000 -0400
36674 @@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_cr
36675
36676 i = 0;
36677 while (i < bio_slab_nr) {
36678 - struct bio_slab *bslab = &bio_slabs[i];
36679 + bslab = &bio_slabs[i];
36680
36681 if (!bslab->slab && entry == -1)
36682 entry = i;
36683 @@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct b
36684 const int read = bio_data_dir(bio) == READ;
36685 struct bio_map_data *bmd = bio->bi_private;
36686 int i;
36687 - char *p = bmd->sgvecs[0].iov_base;
36688 + char *p = (__force char *)bmd->sgvecs[0].iov_base;
36689
36690 __bio_for_each_segment(bvec, bio, i, 0) {
36691 char *addr = page_address(bvec->bv_page);
36692 diff -urNp linux-2.6.32.41/fs/block_dev.c linux-2.6.32.41/fs/block_dev.c
36693 --- linux-2.6.32.41/fs/block_dev.c 2011-03-27 14:31:47.000000000 -0400
36694 +++ linux-2.6.32.41/fs/block_dev.c 2011-04-17 15:56:46.000000000 -0400
36695 @@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev,
36696 else if (bdev->bd_contains == bdev)
36697 res = 0; /* is a whole device which isn't held */
36698
36699 - else if (bdev->bd_contains->bd_holder == bd_claim)
36700 + else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
36701 res = 0; /* is a partition of a device that is being partitioned */
36702 else if (bdev->bd_contains->bd_holder != NULL)
36703 res = -EBUSY; /* is a partition of a held device */
36704 diff -urNp linux-2.6.32.41/fs/btrfs/ctree.c linux-2.6.32.41/fs/btrfs/ctree.c
36705 --- linux-2.6.32.41/fs/btrfs/ctree.c 2011-03-27 14:31:47.000000000 -0400
36706 +++ linux-2.6.32.41/fs/btrfs/ctree.c 2011-04-17 15:56:46.000000000 -0400
36707 @@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(st
36708 free_extent_buffer(buf);
36709 add_root_to_dirty_list(root);
36710 } else {
36711 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
36712 - parent_start = parent->start;
36713 - else
36714 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
36715 + if (parent)
36716 + parent_start = parent->start;
36717 + else
36718 + parent_start = 0;
36719 + } else
36720 parent_start = 0;
36721
36722 WARN_ON(trans->transid != btrfs_header_generation(parent));
36723 @@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_tran
36724
36725 ret = 0;
36726 if (slot == 0) {
36727 - struct btrfs_disk_key disk_key;
36728 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
36729 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
36730 }
36731 diff -urNp linux-2.6.32.41/fs/btrfs/disk-io.c linux-2.6.32.41/fs/btrfs/disk-io.c
36732 --- linux-2.6.32.41/fs/btrfs/disk-io.c 2011-04-17 17:00:52.000000000 -0400
36733 +++ linux-2.6.32.41/fs/btrfs/disk-io.c 2011-04-17 17:03:11.000000000 -0400
36734 @@ -39,7 +39,7 @@
36735 #include "tree-log.h"
36736 #include "free-space-cache.h"
36737
36738 -static struct extent_io_ops btree_extent_io_ops;
36739 +static const struct extent_io_ops btree_extent_io_ops;
36740 static void end_workqueue_fn(struct btrfs_work *work);
36741 static void free_fs_root(struct btrfs_root *root);
36742
36743 @@ -2607,7 +2607,7 @@ out:
36744 return 0;
36745 }
36746
36747 -static struct extent_io_ops btree_extent_io_ops = {
36748 +static const struct extent_io_ops btree_extent_io_ops = {
36749 .write_cache_pages_lock_hook = btree_lock_page_hook,
36750 .readpage_end_io_hook = btree_readpage_end_io_hook,
36751 .submit_bio_hook = btree_submit_bio_hook,
36752 diff -urNp linux-2.6.32.41/fs/btrfs/extent_io.h linux-2.6.32.41/fs/btrfs/extent_io.h
36753 --- linux-2.6.32.41/fs/btrfs/extent_io.h 2011-03-27 14:31:47.000000000 -0400
36754 +++ linux-2.6.32.41/fs/btrfs/extent_io.h 2011-04-17 15:56:46.000000000 -0400
36755 @@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(s
36756 struct bio *bio, int mirror_num,
36757 unsigned long bio_flags);
36758 struct extent_io_ops {
36759 - int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
36760 + int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
36761 u64 start, u64 end, int *page_started,
36762 unsigned long *nr_written);
36763 - int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
36764 - int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
36765 + int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
36766 + int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
36767 extent_submit_bio_hook_t *submit_bio_hook;
36768 - int (*merge_bio_hook)(struct page *page, unsigned long offset,
36769 + int (* const merge_bio_hook)(struct page *page, unsigned long offset,
36770 size_t size, struct bio *bio,
36771 unsigned long bio_flags);
36772 - int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
36773 - int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
36774 + int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
36775 + int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
36776 u64 start, u64 end,
36777 struct extent_state *state);
36778 - int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
36779 + int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
36780 u64 start, u64 end,
36781 struct extent_state *state);
36782 - int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
36783 + int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
36784 struct extent_state *state);
36785 - int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
36786 + int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
36787 struct extent_state *state, int uptodate);
36788 - int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
36789 + int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
36790 unsigned long old, unsigned long bits);
36791 - int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
36792 + int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
36793 unsigned long bits);
36794 - int (*merge_extent_hook)(struct inode *inode,
36795 + int (* const merge_extent_hook)(struct inode *inode,
36796 struct extent_state *new,
36797 struct extent_state *other);
36798 - int (*split_extent_hook)(struct inode *inode,
36799 + int (* const split_extent_hook)(struct inode *inode,
36800 struct extent_state *orig, u64 split);
36801 - int (*write_cache_pages_lock_hook)(struct page *page);
36802 + int (* const write_cache_pages_lock_hook)(struct page *page);
36803 };
36804
36805 struct extent_io_tree {
36806 @@ -88,7 +88,7 @@ struct extent_io_tree {
36807 u64 dirty_bytes;
36808 spinlock_t lock;
36809 spinlock_t buffer_lock;
36810 - struct extent_io_ops *ops;
36811 + const struct extent_io_ops *ops;
36812 };
36813
36814 struct extent_state {
36815 diff -urNp linux-2.6.32.41/fs/btrfs/extent-tree.c linux-2.6.32.41/fs/btrfs/extent-tree.c
36816 --- linux-2.6.32.41/fs/btrfs/extent-tree.c 2011-03-27 14:31:47.000000000 -0400
36817 +++ linux-2.6.32.41/fs/btrfs/extent-tree.c 2011-06-12 06:39:08.000000000 -0400
36818 @@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(
36819 u64 group_start = group->key.objectid;
36820 new_extents = kmalloc(sizeof(*new_extents),
36821 GFP_NOFS);
36822 + if (!new_extents) {
36823 + ret = -ENOMEM;
36824 + goto out;
36825 + }
36826 nr_extents = 1;
36827 ret = get_new_locations(reloc_inode,
36828 extent_key,
36829 diff -urNp linux-2.6.32.41/fs/btrfs/free-space-cache.c linux-2.6.32.41/fs/btrfs/free-space-cache.c
36830 --- linux-2.6.32.41/fs/btrfs/free-space-cache.c 2011-03-27 14:31:47.000000000 -0400
36831 +++ linux-2.6.32.41/fs/btrfs/free-space-cache.c 2011-04-17 15:56:46.000000000 -0400
36832 @@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
36833
36834 while(1) {
36835 if (entry->bytes < bytes || entry->offset < min_start) {
36836 - struct rb_node *node;
36837 -
36838 node = rb_next(&entry->offset_index);
36839 if (!node)
36840 break;
36841 @@ -1226,7 +1224,7 @@ again:
36842 */
36843 while (entry->bitmap || found_bitmap ||
36844 (!entry->bitmap && entry->bytes < min_bytes)) {
36845 - struct rb_node *node = rb_next(&entry->offset_index);
36846 + node = rb_next(&entry->offset_index);
36847
36848 if (entry->bitmap && entry->bytes > bytes + empty_size) {
36849 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
36850 diff -urNp linux-2.6.32.41/fs/btrfs/inode.c linux-2.6.32.41/fs/btrfs/inode.c
36851 --- linux-2.6.32.41/fs/btrfs/inode.c 2011-03-27 14:31:47.000000000 -0400
36852 +++ linux-2.6.32.41/fs/btrfs/inode.c 2011-06-12 06:39:58.000000000 -0400
36853 @@ -63,7 +63,7 @@ static const struct inode_operations btr
36854 static const struct address_space_operations btrfs_aops;
36855 static const struct address_space_operations btrfs_symlink_aops;
36856 static const struct file_operations btrfs_dir_file_operations;
36857 -static struct extent_io_ops btrfs_extent_io_ops;
36858 +static const struct extent_io_ops btrfs_extent_io_ops;
36859
36860 static struct kmem_cache *btrfs_inode_cachep;
36861 struct kmem_cache *btrfs_trans_handle_cachep;
36862 @@ -925,6 +925,7 @@ static int cow_file_range_async(struct i
36863 1, 0, NULL, GFP_NOFS);
36864 while (start < end) {
36865 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
36866 + BUG_ON(!async_cow);
36867 async_cow->inode = inode;
36868 async_cow->root = root;
36869 async_cow->locked_page = locked_page;
36870 @@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(st
36871 inline_size = btrfs_file_extent_inline_item_len(leaf,
36872 btrfs_item_nr(leaf, path->slots[0]));
36873 tmp = kmalloc(inline_size, GFP_NOFS);
36874 + if (!tmp)
36875 + return -ENOMEM;
36876 ptr = btrfs_file_extent_inline_start(item);
36877
36878 read_extent_buffer(leaf, tmp, ptr, inline_size);
36879 @@ -5410,7 +5413,7 @@ fail:
36880 return -ENOMEM;
36881 }
36882
36883 -static int btrfs_getattr(struct vfsmount *mnt,
36884 +int btrfs_getattr(struct vfsmount *mnt,
36885 struct dentry *dentry, struct kstat *stat)
36886 {
36887 struct inode *inode = dentry->d_inode;
36888 @@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount
36889 return 0;
36890 }
36891
36892 +EXPORT_SYMBOL(btrfs_getattr);
36893 +
36894 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
36895 +{
36896 + return BTRFS_I(inode)->root->anon_super.s_dev;
36897 +}
36898 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
36899 +
36900 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
36901 struct inode *new_dir, struct dentry *new_dentry)
36902 {
36903 @@ -5972,7 +5983,7 @@ static const struct file_operations btrf
36904 .fsync = btrfs_sync_file,
36905 };
36906
36907 -static struct extent_io_ops btrfs_extent_io_ops = {
36908 +static const struct extent_io_ops btrfs_extent_io_ops = {
36909 .fill_delalloc = run_delalloc_range,
36910 .submit_bio_hook = btrfs_submit_bio_hook,
36911 .merge_bio_hook = btrfs_merge_bio_hook,
36912 diff -urNp linux-2.6.32.41/fs/btrfs/relocation.c linux-2.6.32.41/fs/btrfs/relocation.c
36913 --- linux-2.6.32.41/fs/btrfs/relocation.c 2011-03-27 14:31:47.000000000 -0400
36914 +++ linux-2.6.32.41/fs/btrfs/relocation.c 2011-04-17 15:56:46.000000000 -0400
36915 @@ -884,7 +884,7 @@ static int __update_reloc_root(struct bt
36916 }
36917 spin_unlock(&rc->reloc_root_tree.lock);
36918
36919 - BUG_ON((struct btrfs_root *)node->data != root);
36920 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
36921
36922 if (!del) {
36923 spin_lock(&rc->reloc_root_tree.lock);
36924 diff -urNp linux-2.6.32.41/fs/btrfs/sysfs.c linux-2.6.32.41/fs/btrfs/sysfs.c
36925 --- linux-2.6.32.41/fs/btrfs/sysfs.c 2011-03-27 14:31:47.000000000 -0400
36926 +++ linux-2.6.32.41/fs/btrfs/sysfs.c 2011-04-17 15:56:46.000000000 -0400
36927 @@ -164,12 +164,12 @@ static void btrfs_root_release(struct ko
36928 complete(&root->kobj_unregister);
36929 }
36930
36931 -static struct sysfs_ops btrfs_super_attr_ops = {
36932 +static const struct sysfs_ops btrfs_super_attr_ops = {
36933 .show = btrfs_super_attr_show,
36934 .store = btrfs_super_attr_store,
36935 };
36936
36937 -static struct sysfs_ops btrfs_root_attr_ops = {
36938 +static const struct sysfs_ops btrfs_root_attr_ops = {
36939 .show = btrfs_root_attr_show,
36940 .store = btrfs_root_attr_store,
36941 };
36942 diff -urNp linux-2.6.32.41/fs/buffer.c linux-2.6.32.41/fs/buffer.c
36943 --- linux-2.6.32.41/fs/buffer.c 2011-03-27 14:31:47.000000000 -0400
36944 +++ linux-2.6.32.41/fs/buffer.c 2011-04-17 15:56:46.000000000 -0400
36945 @@ -25,6 +25,7 @@
36946 #include <linux/percpu.h>
36947 #include <linux/slab.h>
36948 #include <linux/capability.h>
36949 +#include <linux/security.h>
36950 #include <linux/blkdev.h>
36951 #include <linux/file.h>
36952 #include <linux/quotaops.h>
36953 diff -urNp linux-2.6.32.41/fs/cachefiles/bind.c linux-2.6.32.41/fs/cachefiles/bind.c
36954 --- linux-2.6.32.41/fs/cachefiles/bind.c 2011-03-27 14:31:47.000000000 -0400
36955 +++ linux-2.6.32.41/fs/cachefiles/bind.c 2011-04-17 15:56:46.000000000 -0400
36956 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
36957 args);
36958
36959 /* start by checking things over */
36960 - ASSERT(cache->fstop_percent >= 0 &&
36961 - cache->fstop_percent < cache->fcull_percent &&
36962 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
36963 cache->fcull_percent < cache->frun_percent &&
36964 cache->frun_percent < 100);
36965
36966 - ASSERT(cache->bstop_percent >= 0 &&
36967 - cache->bstop_percent < cache->bcull_percent &&
36968 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
36969 cache->bcull_percent < cache->brun_percent &&
36970 cache->brun_percent < 100);
36971
36972 diff -urNp linux-2.6.32.41/fs/cachefiles/daemon.c linux-2.6.32.41/fs/cachefiles/daemon.c
36973 --- linux-2.6.32.41/fs/cachefiles/daemon.c 2011-03-27 14:31:47.000000000 -0400
36974 +++ linux-2.6.32.41/fs/cachefiles/daemon.c 2011-04-17 15:56:46.000000000 -0400
36975 @@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(s
36976 if (test_bit(CACHEFILES_DEAD, &cache->flags))
36977 return -EIO;
36978
36979 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
36980 + if (datalen > PAGE_SIZE - 1)
36981 return -EOPNOTSUPP;
36982
36983 /* drag the command string into the kernel so we can parse it */
36984 @@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struc
36985 if (args[0] != '%' || args[1] != '\0')
36986 return -EINVAL;
36987
36988 - if (fstop < 0 || fstop >= cache->fcull_percent)
36989 + if (fstop >= cache->fcull_percent)
36990 return cachefiles_daemon_range_error(cache, args);
36991
36992 cache->fstop_percent = fstop;
36993 @@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struc
36994 if (args[0] != '%' || args[1] != '\0')
36995 return -EINVAL;
36996
36997 - if (bstop < 0 || bstop >= cache->bcull_percent)
36998 + if (bstop >= cache->bcull_percent)
36999 return cachefiles_daemon_range_error(cache, args);
37000
37001 cache->bstop_percent = bstop;
37002 diff -urNp linux-2.6.32.41/fs/cachefiles/internal.h linux-2.6.32.41/fs/cachefiles/internal.h
37003 --- linux-2.6.32.41/fs/cachefiles/internal.h 2011-03-27 14:31:47.000000000 -0400
37004 +++ linux-2.6.32.41/fs/cachefiles/internal.h 2011-05-04 17:56:28.000000000 -0400
37005 @@ -56,7 +56,7 @@ struct cachefiles_cache {
37006 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
37007 struct rb_root active_nodes; /* active nodes (can't be culled) */
37008 rwlock_t active_lock; /* lock for active_nodes */
37009 - atomic_t gravecounter; /* graveyard uniquifier */
37010 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
37011 unsigned frun_percent; /* when to stop culling (% files) */
37012 unsigned fcull_percent; /* when to start culling (% files) */
37013 unsigned fstop_percent; /* when to stop allocating (% files) */
37014 @@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struc
37015 * proc.c
37016 */
37017 #ifdef CONFIG_CACHEFILES_HISTOGRAM
37018 -extern atomic_t cachefiles_lookup_histogram[HZ];
37019 -extern atomic_t cachefiles_mkdir_histogram[HZ];
37020 -extern atomic_t cachefiles_create_histogram[HZ];
37021 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
37022 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
37023 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
37024
37025 extern int __init cachefiles_proc_init(void);
37026 extern void cachefiles_proc_cleanup(void);
37027 static inline
37028 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
37029 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
37030 {
37031 unsigned long jif = jiffies - start_jif;
37032 if (jif >= HZ)
37033 jif = HZ - 1;
37034 - atomic_inc(&histogram[jif]);
37035 + atomic_inc_unchecked(&histogram[jif]);
37036 }
37037
37038 #else
37039 diff -urNp linux-2.6.32.41/fs/cachefiles/namei.c linux-2.6.32.41/fs/cachefiles/namei.c
37040 --- linux-2.6.32.41/fs/cachefiles/namei.c 2011-03-27 14:31:47.000000000 -0400
37041 +++ linux-2.6.32.41/fs/cachefiles/namei.c 2011-05-04 17:56:28.000000000 -0400
37042 @@ -250,7 +250,7 @@ try_again:
37043 /* first step is to make up a grave dentry in the graveyard */
37044 sprintf(nbuffer, "%08x%08x",
37045 (uint32_t) get_seconds(),
37046 - (uint32_t) atomic_inc_return(&cache->gravecounter));
37047 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
37048
37049 /* do the multiway lock magic */
37050 trap = lock_rename(cache->graveyard, dir);
37051 diff -urNp linux-2.6.32.41/fs/cachefiles/proc.c linux-2.6.32.41/fs/cachefiles/proc.c
37052 --- linux-2.6.32.41/fs/cachefiles/proc.c 2011-03-27 14:31:47.000000000 -0400
37053 +++ linux-2.6.32.41/fs/cachefiles/proc.c 2011-05-04 17:56:28.000000000 -0400
37054 @@ -14,9 +14,9 @@
37055 #include <linux/seq_file.h>
37056 #include "internal.h"
37057
37058 -atomic_t cachefiles_lookup_histogram[HZ];
37059 -atomic_t cachefiles_mkdir_histogram[HZ];
37060 -atomic_t cachefiles_create_histogram[HZ];
37061 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
37062 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
37063 +atomic_unchecked_t cachefiles_create_histogram[HZ];
37064
37065 /*
37066 * display the latency histogram
37067 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
37068 return 0;
37069 default:
37070 index = (unsigned long) v - 3;
37071 - x = atomic_read(&cachefiles_lookup_histogram[index]);
37072 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
37073 - z = atomic_read(&cachefiles_create_histogram[index]);
37074 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
37075 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
37076 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
37077 if (x == 0 && y == 0 && z == 0)
37078 return 0;
37079
37080 diff -urNp linux-2.6.32.41/fs/cachefiles/rdwr.c linux-2.6.32.41/fs/cachefiles/rdwr.c
37081 --- linux-2.6.32.41/fs/cachefiles/rdwr.c 2011-03-27 14:31:47.000000000 -0400
37082 +++ linux-2.6.32.41/fs/cachefiles/rdwr.c 2011-04-17 15:56:46.000000000 -0400
37083 @@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache
37084 old_fs = get_fs();
37085 set_fs(KERNEL_DS);
37086 ret = file->f_op->write(
37087 - file, (const void __user *) data, len, &pos);
37088 + file, (__force const void __user *) data, len, &pos);
37089 set_fs(old_fs);
37090 kunmap(page);
37091 if (ret != len)
37092 diff -urNp linux-2.6.32.41/fs/cifs/cifs_debug.c linux-2.6.32.41/fs/cifs/cifs_debug.c
37093 --- linux-2.6.32.41/fs/cifs/cifs_debug.c 2011-03-27 14:31:47.000000000 -0400
37094 +++ linux-2.6.32.41/fs/cifs/cifs_debug.c 2011-05-04 17:56:28.000000000 -0400
37095 @@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(str
37096 tcon = list_entry(tmp3,
37097 struct cifsTconInfo,
37098 tcon_list);
37099 - atomic_set(&tcon->num_smbs_sent, 0);
37100 - atomic_set(&tcon->num_writes, 0);
37101 - atomic_set(&tcon->num_reads, 0);
37102 - atomic_set(&tcon->num_oplock_brks, 0);
37103 - atomic_set(&tcon->num_opens, 0);
37104 - atomic_set(&tcon->num_posixopens, 0);
37105 - atomic_set(&tcon->num_posixmkdirs, 0);
37106 - atomic_set(&tcon->num_closes, 0);
37107 - atomic_set(&tcon->num_deletes, 0);
37108 - atomic_set(&tcon->num_mkdirs, 0);
37109 - atomic_set(&tcon->num_rmdirs, 0);
37110 - atomic_set(&tcon->num_renames, 0);
37111 - atomic_set(&tcon->num_t2renames, 0);
37112 - atomic_set(&tcon->num_ffirst, 0);
37113 - atomic_set(&tcon->num_fnext, 0);
37114 - atomic_set(&tcon->num_fclose, 0);
37115 - atomic_set(&tcon->num_hardlinks, 0);
37116 - atomic_set(&tcon->num_symlinks, 0);
37117 - atomic_set(&tcon->num_locks, 0);
37118 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
37119 + atomic_set_unchecked(&tcon->num_writes, 0);
37120 + atomic_set_unchecked(&tcon->num_reads, 0);
37121 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
37122 + atomic_set_unchecked(&tcon->num_opens, 0);
37123 + atomic_set_unchecked(&tcon->num_posixopens, 0);
37124 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
37125 + atomic_set_unchecked(&tcon->num_closes, 0);
37126 + atomic_set_unchecked(&tcon->num_deletes, 0);
37127 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
37128 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
37129 + atomic_set_unchecked(&tcon->num_renames, 0);
37130 + atomic_set_unchecked(&tcon->num_t2renames, 0);
37131 + atomic_set_unchecked(&tcon->num_ffirst, 0);
37132 + atomic_set_unchecked(&tcon->num_fnext, 0);
37133 + atomic_set_unchecked(&tcon->num_fclose, 0);
37134 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
37135 + atomic_set_unchecked(&tcon->num_symlinks, 0);
37136 + atomic_set_unchecked(&tcon->num_locks, 0);
37137 }
37138 }
37139 }
37140 @@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct s
37141 if (tcon->need_reconnect)
37142 seq_puts(m, "\tDISCONNECTED ");
37143 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
37144 - atomic_read(&tcon->num_smbs_sent),
37145 - atomic_read(&tcon->num_oplock_brks));
37146 + atomic_read_unchecked(&tcon->num_smbs_sent),
37147 + atomic_read_unchecked(&tcon->num_oplock_brks));
37148 seq_printf(m, "\nReads: %d Bytes: %lld",
37149 - atomic_read(&tcon->num_reads),
37150 + atomic_read_unchecked(&tcon->num_reads),
37151 (long long)(tcon->bytes_read));
37152 seq_printf(m, "\nWrites: %d Bytes: %lld",
37153 - atomic_read(&tcon->num_writes),
37154 + atomic_read_unchecked(&tcon->num_writes),
37155 (long long)(tcon->bytes_written));
37156 seq_printf(m, "\nFlushes: %d",
37157 - atomic_read(&tcon->num_flushes));
37158 + atomic_read_unchecked(&tcon->num_flushes));
37159 seq_printf(m, "\nLocks: %d HardLinks: %d "
37160 "Symlinks: %d",
37161 - atomic_read(&tcon->num_locks),
37162 - atomic_read(&tcon->num_hardlinks),
37163 - atomic_read(&tcon->num_symlinks));
37164 + atomic_read_unchecked(&tcon->num_locks),
37165 + atomic_read_unchecked(&tcon->num_hardlinks),
37166 + atomic_read_unchecked(&tcon->num_symlinks));
37167 seq_printf(m, "\nOpens: %d Closes: %d "
37168 "Deletes: %d",
37169 - atomic_read(&tcon->num_opens),
37170 - atomic_read(&tcon->num_closes),
37171 - atomic_read(&tcon->num_deletes));
37172 + atomic_read_unchecked(&tcon->num_opens),
37173 + atomic_read_unchecked(&tcon->num_closes),
37174 + atomic_read_unchecked(&tcon->num_deletes));
37175 seq_printf(m, "\nPosix Opens: %d "
37176 "Posix Mkdirs: %d",
37177 - atomic_read(&tcon->num_posixopens),
37178 - atomic_read(&tcon->num_posixmkdirs));
37179 + atomic_read_unchecked(&tcon->num_posixopens),
37180 + atomic_read_unchecked(&tcon->num_posixmkdirs));
37181 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
37182 - atomic_read(&tcon->num_mkdirs),
37183 - atomic_read(&tcon->num_rmdirs));
37184 + atomic_read_unchecked(&tcon->num_mkdirs),
37185 + atomic_read_unchecked(&tcon->num_rmdirs));
37186 seq_printf(m, "\nRenames: %d T2 Renames %d",
37187 - atomic_read(&tcon->num_renames),
37188 - atomic_read(&tcon->num_t2renames));
37189 + atomic_read_unchecked(&tcon->num_renames),
37190 + atomic_read_unchecked(&tcon->num_t2renames));
37191 seq_printf(m, "\nFindFirst: %d FNext %d "
37192 "FClose %d",
37193 - atomic_read(&tcon->num_ffirst),
37194 - atomic_read(&tcon->num_fnext),
37195 - atomic_read(&tcon->num_fclose));
37196 + atomic_read_unchecked(&tcon->num_ffirst),
37197 + atomic_read_unchecked(&tcon->num_fnext),
37198 + atomic_read_unchecked(&tcon->num_fclose));
37199 }
37200 }
37201 }
37202 diff -urNp linux-2.6.32.41/fs/cifs/cifsglob.h linux-2.6.32.41/fs/cifs/cifsglob.h
37203 --- linux-2.6.32.41/fs/cifs/cifsglob.h 2011-03-27 14:31:47.000000000 -0400
37204 +++ linux-2.6.32.41/fs/cifs/cifsglob.h 2011-05-04 17:56:28.000000000 -0400
37205 @@ -252,28 +252,28 @@ struct cifsTconInfo {
37206 __u16 Flags; /* optional support bits */
37207 enum statusEnum tidStatus;
37208 #ifdef CONFIG_CIFS_STATS
37209 - atomic_t num_smbs_sent;
37210 - atomic_t num_writes;
37211 - atomic_t num_reads;
37212 - atomic_t num_flushes;
37213 - atomic_t num_oplock_brks;
37214 - atomic_t num_opens;
37215 - atomic_t num_closes;
37216 - atomic_t num_deletes;
37217 - atomic_t num_mkdirs;
37218 - atomic_t num_posixopens;
37219 - atomic_t num_posixmkdirs;
37220 - atomic_t num_rmdirs;
37221 - atomic_t num_renames;
37222 - atomic_t num_t2renames;
37223 - atomic_t num_ffirst;
37224 - atomic_t num_fnext;
37225 - atomic_t num_fclose;
37226 - atomic_t num_hardlinks;
37227 - atomic_t num_symlinks;
37228 - atomic_t num_locks;
37229 - atomic_t num_acl_get;
37230 - atomic_t num_acl_set;
37231 + atomic_unchecked_t num_smbs_sent;
37232 + atomic_unchecked_t num_writes;
37233 + atomic_unchecked_t num_reads;
37234 + atomic_unchecked_t num_flushes;
37235 + atomic_unchecked_t num_oplock_brks;
37236 + atomic_unchecked_t num_opens;
37237 + atomic_unchecked_t num_closes;
37238 + atomic_unchecked_t num_deletes;
37239 + atomic_unchecked_t num_mkdirs;
37240 + atomic_unchecked_t num_posixopens;
37241 + atomic_unchecked_t num_posixmkdirs;
37242 + atomic_unchecked_t num_rmdirs;
37243 + atomic_unchecked_t num_renames;
37244 + atomic_unchecked_t num_t2renames;
37245 + atomic_unchecked_t num_ffirst;
37246 + atomic_unchecked_t num_fnext;
37247 + atomic_unchecked_t num_fclose;
37248 + atomic_unchecked_t num_hardlinks;
37249 + atomic_unchecked_t num_symlinks;
37250 + atomic_unchecked_t num_locks;
37251 + atomic_unchecked_t num_acl_get;
37252 + atomic_unchecked_t num_acl_set;
37253 #ifdef CONFIG_CIFS_STATS2
37254 unsigned long long time_writes;
37255 unsigned long long time_reads;
37256 @@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const st
37257 }
37258
37259 #ifdef CONFIG_CIFS_STATS
37260 -#define cifs_stats_inc atomic_inc
37261 +#define cifs_stats_inc atomic_inc_unchecked
37262
37263 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
37264 unsigned int bytes)
37265 diff -urNp linux-2.6.32.41/fs/cifs/link.c linux-2.6.32.41/fs/cifs/link.c
37266 --- linux-2.6.32.41/fs/cifs/link.c 2011-03-27 14:31:47.000000000 -0400
37267 +++ linux-2.6.32.41/fs/cifs/link.c 2011-04-17 15:56:46.000000000 -0400
37268 @@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct
37269
37270 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
37271 {
37272 - char *p = nd_get_link(nd);
37273 + const char *p = nd_get_link(nd);
37274 if (!IS_ERR(p))
37275 kfree(p);
37276 }
37277 diff -urNp linux-2.6.32.41/fs/coda/cache.c linux-2.6.32.41/fs/coda/cache.c
37278 --- linux-2.6.32.41/fs/coda/cache.c 2011-03-27 14:31:47.000000000 -0400
37279 +++ linux-2.6.32.41/fs/coda/cache.c 2011-05-04 17:56:28.000000000 -0400
37280 @@ -24,14 +24,14 @@
37281 #include <linux/coda_fs_i.h>
37282 #include <linux/coda_cache.h>
37283
37284 -static atomic_t permission_epoch = ATOMIC_INIT(0);
37285 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
37286
37287 /* replace or extend an acl cache hit */
37288 void coda_cache_enter(struct inode *inode, int mask)
37289 {
37290 struct coda_inode_info *cii = ITOC(inode);
37291
37292 - cii->c_cached_epoch = atomic_read(&permission_epoch);
37293 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
37294 if (cii->c_uid != current_fsuid()) {
37295 cii->c_uid = current_fsuid();
37296 cii->c_cached_perm = mask;
37297 @@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inod
37298 void coda_cache_clear_inode(struct inode *inode)
37299 {
37300 struct coda_inode_info *cii = ITOC(inode);
37301 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
37302 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
37303 }
37304
37305 /* remove all acl caches */
37306 void coda_cache_clear_all(struct super_block *sb)
37307 {
37308 - atomic_inc(&permission_epoch);
37309 + atomic_inc_unchecked(&permission_epoch);
37310 }
37311
37312
37313 @@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode
37314
37315 hit = (mask & cii->c_cached_perm) == mask &&
37316 cii->c_uid == current_fsuid() &&
37317 - cii->c_cached_epoch == atomic_read(&permission_epoch);
37318 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
37319
37320 return hit;
37321 }
37322 diff -urNp linux-2.6.32.41/fs/compat_binfmt_elf.c linux-2.6.32.41/fs/compat_binfmt_elf.c
37323 --- linux-2.6.32.41/fs/compat_binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
37324 +++ linux-2.6.32.41/fs/compat_binfmt_elf.c 2011-04-17 15:56:46.000000000 -0400
37325 @@ -29,10 +29,12 @@
37326 #undef elfhdr
37327 #undef elf_phdr
37328 #undef elf_note
37329 +#undef elf_dyn
37330 #undef elf_addr_t
37331 #define elfhdr elf32_hdr
37332 #define elf_phdr elf32_phdr
37333 #define elf_note elf32_note
37334 +#define elf_dyn Elf32_Dyn
37335 #define elf_addr_t Elf32_Addr
37336
37337 /*
37338 diff -urNp linux-2.6.32.41/fs/compat.c linux-2.6.32.41/fs/compat.c
37339 --- linux-2.6.32.41/fs/compat.c 2011-04-17 17:00:52.000000000 -0400
37340 +++ linux-2.6.32.41/fs/compat.c 2011-05-16 21:46:57.000000000 -0400
37341 @@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
37342
37343 struct compat_readdir_callback {
37344 struct compat_old_linux_dirent __user *dirent;
37345 + struct file * file;
37346 int result;
37347 };
37348
37349 @@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf
37350 buf->result = -EOVERFLOW;
37351 return -EOVERFLOW;
37352 }
37353 +
37354 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
37355 + return 0;
37356 +
37357 buf->result++;
37358 dirent = buf->dirent;
37359 if (!access_ok(VERIFY_WRITE, dirent,
37360 @@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(u
37361
37362 buf.result = 0;
37363 buf.dirent = dirent;
37364 + buf.file = file;
37365
37366 error = vfs_readdir(file, compat_fillonedir, &buf);
37367 if (buf.result)
37368 @@ -899,6 +905,7 @@ struct compat_linux_dirent {
37369 struct compat_getdents_callback {
37370 struct compat_linux_dirent __user *current_dir;
37371 struct compat_linux_dirent __user *previous;
37372 + struct file * file;
37373 int count;
37374 int error;
37375 };
37376 @@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, c
37377 buf->error = -EOVERFLOW;
37378 return -EOVERFLOW;
37379 }
37380 +
37381 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
37382 + return 0;
37383 +
37384 dirent = buf->previous;
37385 if (dirent) {
37386 if (__put_user(offset, &dirent->d_off))
37387 @@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsi
37388 buf.previous = NULL;
37389 buf.count = count;
37390 buf.error = 0;
37391 + buf.file = file;
37392
37393 error = vfs_readdir(file, compat_filldir, &buf);
37394 if (error >= 0)
37395 @@ -987,6 +999,7 @@ out:
37396 struct compat_getdents_callback64 {
37397 struct linux_dirent64 __user *current_dir;
37398 struct linux_dirent64 __user *previous;
37399 + struct file * file;
37400 int count;
37401 int error;
37402 };
37403 @@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf
37404 buf->error = -EINVAL; /* only used if we fail.. */
37405 if (reclen > buf->count)
37406 return -EINVAL;
37407 +
37408 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
37409 + return 0;
37410 +
37411 dirent = buf->previous;
37412
37413 if (dirent) {
37414 @@ -1054,6 +1071,7 @@ asmlinkage long compat_sys_getdents64(un
37415 buf.previous = NULL;
37416 buf.count = count;
37417 buf.error = 0;
37418 + buf.file = file;
37419
37420 error = vfs_readdir(file, compat_filldir64, &buf);
37421 if (error >= 0)
37422 @@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(in
37423 * verify all the pointers
37424 */
37425 ret = -EINVAL;
37426 - if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
37427 + if (nr_segs > UIO_MAXIOV)
37428 goto out;
37429 if (!file->f_op)
37430 goto out;
37431 @@ -1463,6 +1481,11 @@ int compat_do_execve(char * filename,
37432 compat_uptr_t __user *envp,
37433 struct pt_regs * regs)
37434 {
37435 +#ifdef CONFIG_GRKERNSEC
37436 + struct file *old_exec_file;
37437 + struct acl_subject_label *old_acl;
37438 + struct rlimit old_rlim[RLIM_NLIMITS];
37439 +#endif
37440 struct linux_binprm *bprm;
37441 struct file *file;
37442 struct files_struct *displaced;
37443 @@ -1499,6 +1522,19 @@ int compat_do_execve(char * filename,
37444 bprm->filename = filename;
37445 bprm->interp = filename;
37446
37447 + if (gr_process_user_ban()) {
37448 + retval = -EPERM;
37449 + goto out_file;
37450 + }
37451 +
37452 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
37453 + retval = -EAGAIN;
37454 + if (gr_handle_nproc())
37455 + goto out_file;
37456 + retval = -EACCES;
37457 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
37458 + goto out_file;
37459 +
37460 retval = bprm_mm_init(bprm);
37461 if (retval)
37462 goto out_file;
37463 @@ -1528,9 +1564,40 @@ int compat_do_execve(char * filename,
37464 if (retval < 0)
37465 goto out;
37466
37467 + if (!gr_tpe_allow(file)) {
37468 + retval = -EACCES;
37469 + goto out;
37470 + }
37471 +
37472 + if (gr_check_crash_exec(file)) {
37473 + retval = -EACCES;
37474 + goto out;
37475 + }
37476 +
37477 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
37478 +
37479 + gr_handle_exec_args_compat(bprm, argv);
37480 +
37481 +#ifdef CONFIG_GRKERNSEC
37482 + old_acl = current->acl;
37483 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
37484 + old_exec_file = current->exec_file;
37485 + get_file(file);
37486 + current->exec_file = file;
37487 +#endif
37488 +
37489 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
37490 + bprm->unsafe & LSM_UNSAFE_SHARE);
37491 + if (retval < 0)
37492 + goto out_fail;
37493 +
37494 retval = search_binary_handler(bprm, regs);
37495 if (retval < 0)
37496 - goto out;
37497 + goto out_fail;
37498 +#ifdef CONFIG_GRKERNSEC
37499 + if (old_exec_file)
37500 + fput(old_exec_file);
37501 +#endif
37502
37503 /* execve succeeded */
37504 current->fs->in_exec = 0;
37505 @@ -1541,6 +1608,14 @@ int compat_do_execve(char * filename,
37506 put_files_struct(displaced);
37507 return retval;
37508
37509 +out_fail:
37510 +#ifdef CONFIG_GRKERNSEC
37511 + current->acl = old_acl;
37512 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
37513 + fput(current->exec_file);
37514 + current->exec_file = old_exec_file;
37515 +#endif
37516 +
37517 out:
37518 if (bprm->mm) {
37519 acct_arg_size(bprm, 0);
37520 @@ -1711,6 +1786,8 @@ int compat_core_sys_select(int n, compat
37521 struct fdtable *fdt;
37522 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
37523
37524 + pax_track_stack();
37525 +
37526 if (n < 0)
37527 goto out_nofds;
37528
37529 diff -urNp linux-2.6.32.41/fs/compat_ioctl.c linux-2.6.32.41/fs/compat_ioctl.c
37530 --- linux-2.6.32.41/fs/compat_ioctl.c 2011-03-27 14:31:47.000000000 -0400
37531 +++ linux-2.6.32.41/fs/compat_ioctl.c 2011-04-23 12:56:11.000000000 -0400
37532 @@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsi
37533 up = (struct compat_video_spu_palette __user *) arg;
37534 err = get_user(palp, &up->palette);
37535 err |= get_user(length, &up->length);
37536 + if (err)
37537 + return -EFAULT;
37538
37539 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
37540 err = put_user(compat_ptr(palp), &up_native->palette);
37541 diff -urNp linux-2.6.32.41/fs/configfs/dir.c linux-2.6.32.41/fs/configfs/dir.c
37542 --- linux-2.6.32.41/fs/configfs/dir.c 2011-03-27 14:31:47.000000000 -0400
37543 +++ linux-2.6.32.41/fs/configfs/dir.c 2011-05-11 18:25:15.000000000 -0400
37544 @@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file
37545 }
37546 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
37547 struct configfs_dirent *next;
37548 - const char * name;
37549 + const unsigned char * name;
37550 + char d_name[sizeof(next->s_dentry->d_iname)];
37551 int len;
37552
37553 next = list_entry(p, struct configfs_dirent,
37554 @@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file
37555 continue;
37556
37557 name = configfs_get_name(next);
37558 - len = strlen(name);
37559 + if (next->s_dentry && name == next->s_dentry->d_iname) {
37560 + len = next->s_dentry->d_name.len;
37561 + memcpy(d_name, name, len);
37562 + name = d_name;
37563 + } else
37564 + len = strlen(name);
37565 if (next->s_dentry)
37566 ino = next->s_dentry->d_inode->i_ino;
37567 else
37568 diff -urNp linux-2.6.32.41/fs/dcache.c linux-2.6.32.41/fs/dcache.c
37569 --- linux-2.6.32.41/fs/dcache.c 2011-03-27 14:31:47.000000000 -0400
37570 +++ linux-2.6.32.41/fs/dcache.c 2011-04-23 13:32:21.000000000 -0400
37571 @@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
37572
37573 static struct kmem_cache *dentry_cache __read_mostly;
37574
37575 -#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
37576 -
37577 /*
37578 * This is the single most critical data structure when it comes
37579 * to the dcache: the hashtable for lookups. Somebody should try
37580 @@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned lon
37581 mempages -= reserve;
37582
37583 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
37584 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
37585 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
37586
37587 dcache_init();
37588 inode_init();
37589 diff -urNp linux-2.6.32.41/fs/dlm/lockspace.c linux-2.6.32.41/fs/dlm/lockspace.c
37590 --- linux-2.6.32.41/fs/dlm/lockspace.c 2011-03-27 14:31:47.000000000 -0400
37591 +++ linux-2.6.32.41/fs/dlm/lockspace.c 2011-04-17 15:56:46.000000000 -0400
37592 @@ -148,7 +148,7 @@ static void lockspace_kobj_release(struc
37593 kfree(ls);
37594 }
37595
37596 -static struct sysfs_ops dlm_attr_ops = {
37597 +static const struct sysfs_ops dlm_attr_ops = {
37598 .show = dlm_attr_show,
37599 .store = dlm_attr_store,
37600 };
37601 diff -urNp linux-2.6.32.41/fs/ecryptfs/inode.c linux-2.6.32.41/fs/ecryptfs/inode.c
37602 --- linux-2.6.32.41/fs/ecryptfs/inode.c 2011-03-27 14:31:47.000000000 -0400
37603 +++ linux-2.6.32.41/fs/ecryptfs/inode.c 2011-04-17 15:56:46.000000000 -0400
37604 @@ -660,7 +660,7 @@ static int ecryptfs_readlink_lower(struc
37605 old_fs = get_fs();
37606 set_fs(get_ds());
37607 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
37608 - (char __user *)lower_buf,
37609 + (__force char __user *)lower_buf,
37610 lower_bufsiz);
37611 set_fs(old_fs);
37612 if (rc < 0)
37613 @@ -706,7 +706,7 @@ static void *ecryptfs_follow_link(struct
37614 }
37615 old_fs = get_fs();
37616 set_fs(get_ds());
37617 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
37618 + rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
37619 set_fs(old_fs);
37620 if (rc < 0)
37621 goto out_free;
37622 diff -urNp linux-2.6.32.41/fs/exec.c linux-2.6.32.41/fs/exec.c
37623 --- linux-2.6.32.41/fs/exec.c 2011-04-17 17:00:52.000000000 -0400
37624 +++ linux-2.6.32.41/fs/exec.c 2011-06-04 20:41:36.000000000 -0400
37625 @@ -56,12 +56,24 @@
37626 #include <linux/fsnotify.h>
37627 #include <linux/fs_struct.h>
37628 #include <linux/pipe_fs_i.h>
37629 +#include <linux/random.h>
37630 +#include <linux/seq_file.h>
37631 +
37632 +#ifdef CONFIG_PAX_REFCOUNT
37633 +#include <linux/kallsyms.h>
37634 +#include <linux/kdebug.h>
37635 +#endif
37636
37637 #include <asm/uaccess.h>
37638 #include <asm/mmu_context.h>
37639 #include <asm/tlb.h>
37640 #include "internal.h"
37641
37642 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
37643 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
37644 +EXPORT_SYMBOL(pax_set_initial_flags_func);
37645 +#endif
37646 +
37647 int core_uses_pid;
37648 char core_pattern[CORENAME_MAX_SIZE] = "core";
37649 unsigned int core_pipe_limit;
37650 @@ -115,7 +127,7 @@ SYSCALL_DEFINE1(uselib, const char __use
37651 goto out;
37652
37653 file = do_filp_open(AT_FDCWD, tmp,
37654 - O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
37655 + O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
37656 MAY_READ | MAY_EXEC | MAY_OPEN);
37657 putname(tmp);
37658 error = PTR_ERR(file);
37659 @@ -178,18 +190,10 @@ struct page *get_arg_page(struct linux_b
37660 int write)
37661 {
37662 struct page *page;
37663 - int ret;
37664
37665 -#ifdef CONFIG_STACK_GROWSUP
37666 - if (write) {
37667 - ret = expand_stack_downwards(bprm->vma, pos);
37668 - if (ret < 0)
37669 - return NULL;
37670 - }
37671 -#endif
37672 - ret = get_user_pages(current, bprm->mm, pos,
37673 - 1, write, 1, &page, NULL);
37674 - if (ret <= 0)
37675 + if (0 > expand_stack_downwards(bprm->vma, pos))
37676 + return NULL;
37677 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
37678 return NULL;
37679
37680 if (write) {
37681 @@ -263,6 +267,11 @@ static int __bprm_mm_init(struct linux_b
37682 vma->vm_end = STACK_TOP_MAX;
37683 vma->vm_start = vma->vm_end - PAGE_SIZE;
37684 vma->vm_flags = VM_STACK_FLAGS;
37685 +
37686 +#ifdef CONFIG_PAX_SEGMEXEC
37687 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
37688 +#endif
37689 +
37690 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
37691
37692 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
37693 @@ -276,6 +285,12 @@ static int __bprm_mm_init(struct linux_b
37694 mm->stack_vm = mm->total_vm = 1;
37695 up_write(&mm->mmap_sem);
37696 bprm->p = vma->vm_end - sizeof(void *);
37697 +
37698 +#ifdef CONFIG_PAX_RANDUSTACK
37699 + if (randomize_va_space)
37700 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
37701 +#endif
37702 +
37703 return 0;
37704 err:
37705 up_write(&mm->mmap_sem);
37706 @@ -510,7 +525,7 @@ int copy_strings_kernel(int argc,char **
37707 int r;
37708 mm_segment_t oldfs = get_fs();
37709 set_fs(KERNEL_DS);
37710 - r = copy_strings(argc, (char __user * __user *)argv, bprm);
37711 + r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
37712 set_fs(oldfs);
37713 return r;
37714 }
37715 @@ -540,7 +555,8 @@ static int shift_arg_pages(struct vm_are
37716 unsigned long new_end = old_end - shift;
37717 struct mmu_gather *tlb;
37718
37719 - BUG_ON(new_start > new_end);
37720 + if (new_start >= new_end || new_start < mmap_min_addr)
37721 + return -ENOMEM;
37722
37723 /*
37724 * ensure there are no vmas between where we want to go
37725 @@ -549,6 +565,10 @@ static int shift_arg_pages(struct vm_are
37726 if (vma != find_vma(mm, new_start))
37727 return -EFAULT;
37728
37729 +#ifdef CONFIG_PAX_SEGMEXEC
37730 + BUG_ON(pax_find_mirror_vma(vma));
37731 +#endif
37732 +
37733 /*
37734 * cover the whole range: [new_start, old_end)
37735 */
37736 @@ -630,10 +650,6 @@ int setup_arg_pages(struct linux_binprm
37737 stack_top = arch_align_stack(stack_top);
37738 stack_top = PAGE_ALIGN(stack_top);
37739
37740 - if (unlikely(stack_top < mmap_min_addr) ||
37741 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
37742 - return -ENOMEM;
37743 -
37744 stack_shift = vma->vm_end - stack_top;
37745
37746 bprm->p -= stack_shift;
37747 @@ -645,6 +661,14 @@ int setup_arg_pages(struct linux_binprm
37748 bprm->exec -= stack_shift;
37749
37750 down_write(&mm->mmap_sem);
37751 +
37752 + /* Move stack pages down in memory. */
37753 + if (stack_shift) {
37754 + ret = shift_arg_pages(vma, stack_shift);
37755 + if (ret)
37756 + goto out_unlock;
37757 + }
37758 +
37759 vm_flags = VM_STACK_FLAGS;
37760
37761 /*
37762 @@ -658,19 +682,24 @@ int setup_arg_pages(struct linux_binprm
37763 vm_flags &= ~VM_EXEC;
37764 vm_flags |= mm->def_flags;
37765
37766 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
37767 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
37768 + vm_flags &= ~VM_EXEC;
37769 +
37770 +#ifdef CONFIG_PAX_MPROTECT
37771 + if (mm->pax_flags & MF_PAX_MPROTECT)
37772 + vm_flags &= ~VM_MAYEXEC;
37773 +#endif
37774 +
37775 + }
37776 +#endif
37777 +
37778 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
37779 vm_flags);
37780 if (ret)
37781 goto out_unlock;
37782 BUG_ON(prev != vma);
37783
37784 - /* Move stack pages down in memory. */
37785 - if (stack_shift) {
37786 - ret = shift_arg_pages(vma, stack_shift);
37787 - if (ret)
37788 - goto out_unlock;
37789 - }
37790 -
37791 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
37792 stack_size = vma->vm_end - vma->vm_start;
37793 /*
37794 @@ -707,7 +736,7 @@ struct file *open_exec(const char *name)
37795 int err;
37796
37797 file = do_filp_open(AT_FDCWD, name,
37798 - O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
37799 + O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
37800 MAY_EXEC | MAY_OPEN);
37801 if (IS_ERR(file))
37802 goto out;
37803 @@ -744,7 +773,7 @@ int kernel_read(struct file *file, loff_
37804 old_fs = get_fs();
37805 set_fs(get_ds());
37806 /* The cast to a user pointer is valid due to the set_fs() */
37807 - result = vfs_read(file, (void __user *)addr, count, &pos);
37808 + result = vfs_read(file, (__force void __user *)addr, count, &pos);
37809 set_fs(old_fs);
37810 return result;
37811 }
37812 @@ -1151,7 +1180,7 @@ int check_unsafe_exec(struct linux_binpr
37813 }
37814 rcu_read_unlock();
37815
37816 - if (p->fs->users > n_fs) {
37817 + if (atomic_read(&p->fs->users) > n_fs) {
37818 bprm->unsafe |= LSM_UNSAFE_SHARE;
37819 } else {
37820 res = -EAGAIN;
37821 @@ -1350,6 +1379,11 @@ int do_execve(char * filename,
37822 char __user *__user *envp,
37823 struct pt_regs * regs)
37824 {
37825 +#ifdef CONFIG_GRKERNSEC
37826 + struct file *old_exec_file;
37827 + struct acl_subject_label *old_acl;
37828 + struct rlimit old_rlim[RLIM_NLIMITS];
37829 +#endif
37830 struct linux_binprm *bprm;
37831 struct file *file;
37832 struct files_struct *displaced;
37833 @@ -1386,6 +1420,23 @@ int do_execve(char * filename,
37834 bprm->filename = filename;
37835 bprm->interp = filename;
37836
37837 + if (gr_process_user_ban()) {
37838 + retval = -EPERM;
37839 + goto out_file;
37840 + }
37841 +
37842 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
37843 +
37844 + if (gr_handle_nproc()) {
37845 + retval = -EAGAIN;
37846 + goto out_file;
37847 + }
37848 +
37849 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
37850 + retval = -EACCES;
37851 + goto out_file;
37852 + }
37853 +
37854 retval = bprm_mm_init(bprm);
37855 if (retval)
37856 goto out_file;
37857 @@ -1415,10 +1466,41 @@ int do_execve(char * filename,
37858 if (retval < 0)
37859 goto out;
37860
37861 + if (!gr_tpe_allow(file)) {
37862 + retval = -EACCES;
37863 + goto out;
37864 + }
37865 +
37866 + if (gr_check_crash_exec(file)) {
37867 + retval = -EACCES;
37868 + goto out;
37869 + }
37870 +
37871 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
37872 +
37873 + gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
37874 +
37875 +#ifdef CONFIG_GRKERNSEC
37876 + old_acl = current->acl;
37877 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
37878 + old_exec_file = current->exec_file;
37879 + get_file(file);
37880 + current->exec_file = file;
37881 +#endif
37882 +
37883 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
37884 + bprm->unsafe & LSM_UNSAFE_SHARE);
37885 + if (retval < 0)
37886 + goto out_fail;
37887 +
37888 current->flags &= ~PF_KTHREAD;
37889 retval = search_binary_handler(bprm,regs);
37890 if (retval < 0)
37891 - goto out;
37892 + goto out_fail;
37893 +#ifdef CONFIG_GRKERNSEC
37894 + if (old_exec_file)
37895 + fput(old_exec_file);
37896 +#endif
37897
37898 /* execve succeeded */
37899 current->fs->in_exec = 0;
37900 @@ -1429,6 +1511,14 @@ int do_execve(char * filename,
37901 put_files_struct(displaced);
37902 return retval;
37903
37904 +out_fail:
37905 +#ifdef CONFIG_GRKERNSEC
37906 + current->acl = old_acl;
37907 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
37908 + fput(current->exec_file);
37909 + current->exec_file = old_exec_file;
37910 +#endif
37911 +
37912 out:
37913 if (bprm->mm) {
37914 acct_arg_size(bprm, 0);
37915 @@ -1594,6 +1684,220 @@ out:
37916 return ispipe;
37917 }
37918
37919 +int pax_check_flags(unsigned long *flags)
37920 +{
37921 + int retval = 0;
37922 +
37923 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
37924 + if (*flags & MF_PAX_SEGMEXEC)
37925 + {
37926 + *flags &= ~MF_PAX_SEGMEXEC;
37927 + retval = -EINVAL;
37928 + }
37929 +#endif
37930 +
37931 + if ((*flags & MF_PAX_PAGEEXEC)
37932 +
37933 +#ifdef CONFIG_PAX_PAGEEXEC
37934 + && (*flags & MF_PAX_SEGMEXEC)
37935 +#endif
37936 +
37937 + )
37938 + {
37939 + *flags &= ~MF_PAX_PAGEEXEC;
37940 + retval = -EINVAL;
37941 + }
37942 +
37943 + if ((*flags & MF_PAX_MPROTECT)
37944 +
37945 +#ifdef CONFIG_PAX_MPROTECT
37946 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
37947 +#endif
37948 +
37949 + )
37950 + {
37951 + *flags &= ~MF_PAX_MPROTECT;
37952 + retval = -EINVAL;
37953 + }
37954 +
37955 + if ((*flags & MF_PAX_EMUTRAMP)
37956 +
37957 +#ifdef CONFIG_PAX_EMUTRAMP
37958 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
37959 +#endif
37960 +
37961 + )
37962 + {
37963 + *flags &= ~MF_PAX_EMUTRAMP;
37964 + retval = -EINVAL;
37965 + }
37966 +
37967 + return retval;
37968 +}
37969 +
37970 +EXPORT_SYMBOL(pax_check_flags);
37971 +
37972 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
37973 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
37974 +{
37975 + struct task_struct *tsk = current;
37976 + struct mm_struct *mm = current->mm;
37977 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
37978 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
37979 + char *path_exec = NULL;
37980 + char *path_fault = NULL;
37981 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
37982 +
37983 + if (buffer_exec && buffer_fault) {
37984 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
37985 +
37986 + down_read(&mm->mmap_sem);
37987 + vma = mm->mmap;
37988 + while (vma && (!vma_exec || !vma_fault)) {
37989 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
37990 + vma_exec = vma;
37991 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
37992 + vma_fault = vma;
37993 + vma = vma->vm_next;
37994 + }
37995 + if (vma_exec) {
37996 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
37997 + if (IS_ERR(path_exec))
37998 + path_exec = "<path too long>";
37999 + else {
38000 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
38001 + if (path_exec) {
38002 + *path_exec = 0;
38003 + path_exec = buffer_exec;
38004 + } else
38005 + path_exec = "<path too long>";
38006 + }
38007 + }
38008 + if (vma_fault) {
38009 + start = vma_fault->vm_start;
38010 + end = vma_fault->vm_end;
38011 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
38012 + if (vma_fault->vm_file) {
38013 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
38014 + if (IS_ERR(path_fault))
38015 + path_fault = "<path too long>";
38016 + else {
38017 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
38018 + if (path_fault) {
38019 + *path_fault = 0;
38020 + path_fault = buffer_fault;
38021 + } else
38022 + path_fault = "<path too long>";
38023 + }
38024 + } else
38025 + path_fault = "<anonymous mapping>";
38026 + }
38027 + up_read(&mm->mmap_sem);
38028 + }
38029 + if (tsk->signal->curr_ip)
38030 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
38031 + else
38032 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
38033 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
38034 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
38035 + task_uid(tsk), task_euid(tsk), pc, sp);
38036 + free_page((unsigned long)buffer_exec);
38037 + free_page((unsigned long)buffer_fault);
38038 + pax_report_insns(pc, sp);
38039 + do_coredump(SIGKILL, SIGKILL, regs);
38040 +}
38041 +#endif
38042 +
38043 +#ifdef CONFIG_PAX_REFCOUNT
38044 +void pax_report_refcount_overflow(struct pt_regs *regs)
38045 +{
38046 + if (current->signal->curr_ip)
38047 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
38048 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
38049 + else
38050 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
38051 + current->comm, task_pid_nr(current), current_uid(), current_euid());
38052 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
38053 + show_regs(regs);
38054 + force_sig_specific(SIGKILL, current);
38055 +}
38056 +#endif
38057 +
38058 +#ifdef CONFIG_PAX_USERCOPY
38059 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
38060 +int object_is_on_stack(const void *obj, unsigned long len)
38061 +{
38062 + const void * const stack = task_stack_page(current);
38063 + const void * const stackend = stack + THREAD_SIZE;
38064 +
38065 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
38066 + const void *frame = NULL;
38067 + const void *oldframe;
38068 +#endif
38069 +
38070 + if (obj + len < obj)
38071 + return -1;
38072 +
38073 + if (obj + len <= stack || stackend <= obj)
38074 + return 0;
38075 +
38076 + if (obj < stack || stackend < obj + len)
38077 + return -1;
38078 +
38079 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
38080 + oldframe = __builtin_frame_address(1);
38081 + if (oldframe)
38082 + frame = __builtin_frame_address(2);
38083 + /*
38084 + low ----------------------------------------------> high
38085 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
38086 + ^----------------^
38087 + allow copies only within here
38088 + */
38089 + while (stack <= frame && frame < stackend) {
38090 + /* if obj + len extends past the last frame, this
38091 + check won't pass and the next frame will be 0,
38092 + causing us to bail out and correctly report
38093 + the copy as invalid
38094 + */
38095 + if (obj + len <= frame)
38096 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
38097 + oldframe = frame;
38098 + frame = *(const void * const *)frame;
38099 + }
38100 + return -1;
38101 +#else
38102 + return 1;
38103 +#endif
38104 +}
38105 +
38106 +
38107 +void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
38108 +{
38109 + if (current->signal->curr_ip)
38110 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
38111 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
38112 + else
38113 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
38114 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
38115 +
38116 + dump_stack();
38117 + gr_handle_kernel_exploit();
38118 + do_group_exit(SIGKILL);
38119 +}
38120 +#endif
38121 +
38122 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
38123 +void pax_track_stack(void)
38124 +{
38125 + unsigned long sp = (unsigned long)&sp;
38126 + if (sp < current_thread_info()->lowest_stack &&
38127 + sp > (unsigned long)task_stack_page(current))
38128 + current_thread_info()->lowest_stack = sp;
38129 +}
38130 +EXPORT_SYMBOL(pax_track_stack);
38131 +#endif
38132 +
38133 static int zap_process(struct task_struct *start)
38134 {
38135 struct task_struct *t;
38136 @@ -1796,17 +2100,17 @@ static void wait_for_dump_helpers(struct
38137 pipe = file->f_path.dentry->d_inode->i_pipe;
38138
38139 pipe_lock(pipe);
38140 - pipe->readers++;
38141 - pipe->writers--;
38142 + atomic_inc(&pipe->readers);
38143 + atomic_dec(&pipe->writers);
38144
38145 - while ((pipe->readers > 1) && (!signal_pending(current))) {
38146 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
38147 wake_up_interruptible_sync(&pipe->wait);
38148 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
38149 pipe_wait(pipe);
38150 }
38151
38152 - pipe->readers--;
38153 - pipe->writers++;
38154 + atomic_dec(&pipe->readers);
38155 + atomic_inc(&pipe->writers);
38156 pipe_unlock(pipe);
38157
38158 }
38159 @@ -1829,10 +2133,13 @@ void do_coredump(long signr, int exit_co
38160 char **helper_argv = NULL;
38161 int helper_argc = 0;
38162 int dump_count = 0;
38163 - static atomic_t core_dump_count = ATOMIC_INIT(0);
38164 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
38165
38166 audit_core_dumps(signr);
38167
38168 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
38169 + gr_handle_brute_attach(current, mm->flags);
38170 +
38171 binfmt = mm->binfmt;
38172 if (!binfmt || !binfmt->core_dump)
38173 goto fail;
38174 @@ -1877,6 +2184,8 @@ void do_coredump(long signr, int exit_co
38175 */
38176 clear_thread_flag(TIF_SIGPENDING);
38177
38178 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
38179 +
38180 /*
38181 * lock_kernel() because format_corename() is controlled by sysctl, which
38182 * uses lock_kernel()
38183 @@ -1911,7 +2220,7 @@ void do_coredump(long signr, int exit_co
38184 goto fail_unlock;
38185 }
38186
38187 - dump_count = atomic_inc_return(&core_dump_count);
38188 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
38189 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
38190 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
38191 task_tgid_vnr(current), current->comm);
38192 @@ -1975,7 +2284,7 @@ close_fail:
38193 filp_close(file, NULL);
38194 fail_dropcount:
38195 if (dump_count)
38196 - atomic_dec(&core_dump_count);
38197 + atomic_dec_unchecked(&core_dump_count);
38198 fail_unlock:
38199 if (helper_argv)
38200 argv_free(helper_argv);
38201 diff -urNp linux-2.6.32.41/fs/ext2/balloc.c linux-2.6.32.41/fs/ext2/balloc.c
38202 --- linux-2.6.32.41/fs/ext2/balloc.c 2011-03-27 14:31:47.000000000 -0400
38203 +++ linux-2.6.32.41/fs/ext2/balloc.c 2011-04-17 15:56:46.000000000 -0400
38204 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
38205
38206 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
38207 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
38208 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
38209 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
38210 sbi->s_resuid != current_fsuid() &&
38211 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
38212 return 0;
38213 diff -urNp linux-2.6.32.41/fs/ext3/balloc.c linux-2.6.32.41/fs/ext3/balloc.c
38214 --- linux-2.6.32.41/fs/ext3/balloc.c 2011-03-27 14:31:47.000000000 -0400
38215 +++ linux-2.6.32.41/fs/ext3/balloc.c 2011-04-17 15:56:46.000000000 -0400
38216 @@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct e
38217
38218 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
38219 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
38220 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
38221 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
38222 sbi->s_resuid != current_fsuid() &&
38223 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
38224 return 0;
38225 diff -urNp linux-2.6.32.41/fs/ext4/balloc.c linux-2.6.32.41/fs/ext4/balloc.c
38226 --- linux-2.6.32.41/fs/ext4/balloc.c 2011-03-27 14:31:47.000000000 -0400
38227 +++ linux-2.6.32.41/fs/ext4/balloc.c 2011-04-17 15:56:46.000000000 -0400
38228 @@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_
38229 /* Hm, nope. Are (enough) root reserved blocks available? */
38230 if (sbi->s_resuid == current_fsuid() ||
38231 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
38232 - capable(CAP_SYS_RESOURCE)) {
38233 + capable_nolog(CAP_SYS_RESOURCE)) {
38234 if (free_blocks >= (nblocks + dirty_blocks))
38235 return 1;
38236 }
38237 diff -urNp linux-2.6.32.41/fs/ext4/ext4.h linux-2.6.32.41/fs/ext4/ext4.h
38238 --- linux-2.6.32.41/fs/ext4/ext4.h 2011-03-27 14:31:47.000000000 -0400
38239 +++ linux-2.6.32.41/fs/ext4/ext4.h 2011-04-17 15:56:46.000000000 -0400
38240 @@ -1078,19 +1078,19 @@ struct ext4_sb_info {
38241
38242 /* stats for buddy allocator */
38243 spinlock_t s_mb_pa_lock;
38244 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
38245 - atomic_t s_bal_success; /* we found long enough chunks */
38246 - atomic_t s_bal_allocated; /* in blocks */
38247 - atomic_t s_bal_ex_scanned; /* total extents scanned */
38248 - atomic_t s_bal_goals; /* goal hits */
38249 - atomic_t s_bal_breaks; /* too long searches */
38250 - atomic_t s_bal_2orders; /* 2^order hits */
38251 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
38252 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
38253 + atomic_unchecked_t s_bal_allocated; /* in blocks */
38254 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
38255 + atomic_unchecked_t s_bal_goals; /* goal hits */
38256 + atomic_unchecked_t s_bal_breaks; /* too long searches */
38257 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
38258 spinlock_t s_bal_lock;
38259 unsigned long s_mb_buddies_generated;
38260 unsigned long long s_mb_generation_time;
38261 - atomic_t s_mb_lost_chunks;
38262 - atomic_t s_mb_preallocated;
38263 - atomic_t s_mb_discarded;
38264 + atomic_unchecked_t s_mb_lost_chunks;
38265 + atomic_unchecked_t s_mb_preallocated;
38266 + atomic_unchecked_t s_mb_discarded;
38267 atomic_t s_lock_busy;
38268
38269 /* locality groups */
38270 diff -urNp linux-2.6.32.41/fs/ext4/mballoc.c linux-2.6.32.41/fs/ext4/mballoc.c
38271 --- linux-2.6.32.41/fs/ext4/mballoc.c 2011-03-27 14:31:47.000000000 -0400
38272 +++ linux-2.6.32.41/fs/ext4/mballoc.c 2011-05-16 21:46:57.000000000 -0400
38273 @@ -1753,7 +1753,7 @@ void ext4_mb_simple_scan_group(struct ex
38274 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
38275
38276 if (EXT4_SB(sb)->s_mb_stats)
38277 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
38278 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
38279
38280 break;
38281 }
38282 @@ -2129,7 +2129,7 @@ repeat:
38283 ac->ac_status = AC_STATUS_CONTINUE;
38284 ac->ac_flags |= EXT4_MB_HINT_FIRST;
38285 cr = 3;
38286 - atomic_inc(&sbi->s_mb_lost_chunks);
38287 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
38288 goto repeat;
38289 }
38290 }
38291 @@ -2172,6 +2172,8 @@ static int ext4_mb_seq_groups_show(struc
38292 ext4_grpblk_t counters[16];
38293 } sg;
38294
38295 + pax_track_stack();
38296 +
38297 group--;
38298 if (group == 0)
38299 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
38300 @@ -2532,25 +2534,25 @@ int ext4_mb_release(struct super_block *
38301 if (sbi->s_mb_stats) {
38302 printk(KERN_INFO
38303 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
38304 - atomic_read(&sbi->s_bal_allocated),
38305 - atomic_read(&sbi->s_bal_reqs),
38306 - atomic_read(&sbi->s_bal_success));
38307 + atomic_read_unchecked(&sbi->s_bal_allocated),
38308 + atomic_read_unchecked(&sbi->s_bal_reqs),
38309 + atomic_read_unchecked(&sbi->s_bal_success));
38310 printk(KERN_INFO
38311 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
38312 "%u 2^N hits, %u breaks, %u lost\n",
38313 - atomic_read(&sbi->s_bal_ex_scanned),
38314 - atomic_read(&sbi->s_bal_goals),
38315 - atomic_read(&sbi->s_bal_2orders),
38316 - atomic_read(&sbi->s_bal_breaks),
38317 - atomic_read(&sbi->s_mb_lost_chunks));
38318 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
38319 + atomic_read_unchecked(&sbi->s_bal_goals),
38320 + atomic_read_unchecked(&sbi->s_bal_2orders),
38321 + atomic_read_unchecked(&sbi->s_bal_breaks),
38322 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
38323 printk(KERN_INFO
38324 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
38325 sbi->s_mb_buddies_generated++,
38326 sbi->s_mb_generation_time);
38327 printk(KERN_INFO
38328 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
38329 - atomic_read(&sbi->s_mb_preallocated),
38330 - atomic_read(&sbi->s_mb_discarded));
38331 + atomic_read_unchecked(&sbi->s_mb_preallocated),
38332 + atomic_read_unchecked(&sbi->s_mb_discarded));
38333 }
38334
38335 free_percpu(sbi->s_locality_groups);
38336 @@ -3032,16 +3034,16 @@ static void ext4_mb_collect_stats(struct
38337 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
38338
38339 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
38340 - atomic_inc(&sbi->s_bal_reqs);
38341 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
38342 + atomic_inc_unchecked(&sbi->s_bal_reqs);
38343 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
38344 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
38345 - atomic_inc(&sbi->s_bal_success);
38346 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
38347 + atomic_inc_unchecked(&sbi->s_bal_success);
38348 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
38349 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
38350 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
38351 - atomic_inc(&sbi->s_bal_goals);
38352 + atomic_inc_unchecked(&sbi->s_bal_goals);
38353 if (ac->ac_found > sbi->s_mb_max_to_scan)
38354 - atomic_inc(&sbi->s_bal_breaks);
38355 + atomic_inc_unchecked(&sbi->s_bal_breaks);
38356 }
38357
38358 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
38359 @@ -3441,7 +3443,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
38360 trace_ext4_mb_new_inode_pa(ac, pa);
38361
38362 ext4_mb_use_inode_pa(ac, pa);
38363 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
38364 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
38365
38366 ei = EXT4_I(ac->ac_inode);
38367 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
38368 @@ -3501,7 +3503,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
38369 trace_ext4_mb_new_group_pa(ac, pa);
38370
38371 ext4_mb_use_group_pa(ac, pa);
38372 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
38373 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
38374
38375 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
38376 lg = ac->ac_lg;
38377 @@ -3605,7 +3607,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
38378 * from the bitmap and continue.
38379 */
38380 }
38381 - atomic_add(free, &sbi->s_mb_discarded);
38382 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
38383
38384 return err;
38385 }
38386 @@ -3624,7 +3626,7 @@ ext4_mb_release_group_pa(struct ext4_bud
38387 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
38388 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
38389 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
38390 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
38391 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
38392
38393 if (ac) {
38394 ac->ac_sb = sb;
38395 diff -urNp linux-2.6.32.41/fs/ext4/super.c linux-2.6.32.41/fs/ext4/super.c
38396 --- linux-2.6.32.41/fs/ext4/super.c 2011-03-27 14:31:47.000000000 -0400
38397 +++ linux-2.6.32.41/fs/ext4/super.c 2011-04-17 15:56:46.000000000 -0400
38398 @@ -2287,7 +2287,7 @@ static void ext4_sb_release(struct kobje
38399 }
38400
38401
38402 -static struct sysfs_ops ext4_attr_ops = {
38403 +static const struct sysfs_ops ext4_attr_ops = {
38404 .show = ext4_attr_show,
38405 .store = ext4_attr_store,
38406 };
38407 diff -urNp linux-2.6.32.41/fs/fcntl.c linux-2.6.32.41/fs/fcntl.c
38408 --- linux-2.6.32.41/fs/fcntl.c 2011-03-27 14:31:47.000000000 -0400
38409 +++ linux-2.6.32.41/fs/fcntl.c 2011-04-17 15:56:46.000000000 -0400
38410 @@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct
38411 if (err)
38412 return err;
38413
38414 + if (gr_handle_chroot_fowner(pid, type))
38415 + return -ENOENT;
38416 + if (gr_check_protected_task_fowner(pid, type))
38417 + return -EACCES;
38418 +
38419 f_modown(filp, pid, type, force);
38420 return 0;
38421 }
38422 @@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned in
38423 switch (cmd) {
38424 case F_DUPFD:
38425 case F_DUPFD_CLOEXEC:
38426 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
38427 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
38428 break;
38429 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
38430 diff -urNp linux-2.6.32.41/fs/fifo.c linux-2.6.32.41/fs/fifo.c
38431 --- linux-2.6.32.41/fs/fifo.c 2011-03-27 14:31:47.000000000 -0400
38432 +++ linux-2.6.32.41/fs/fifo.c 2011-04-17 15:56:46.000000000 -0400
38433 @@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode
38434 */
38435 filp->f_op = &read_pipefifo_fops;
38436 pipe->r_counter++;
38437 - if (pipe->readers++ == 0)
38438 + if (atomic_inc_return(&pipe->readers) == 1)
38439 wake_up_partner(inode);
38440
38441 - if (!pipe->writers) {
38442 + if (!atomic_read(&pipe->writers)) {
38443 if ((filp->f_flags & O_NONBLOCK)) {
38444 /* suppress POLLHUP until we have
38445 * seen a writer */
38446 @@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode
38447 * errno=ENXIO when there is no process reading the FIFO.
38448 */
38449 ret = -ENXIO;
38450 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
38451 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
38452 goto err;
38453
38454 filp->f_op = &write_pipefifo_fops;
38455 pipe->w_counter++;
38456 - if (!pipe->writers++)
38457 + if (atomic_inc_return(&pipe->writers) == 1)
38458 wake_up_partner(inode);
38459
38460 - if (!pipe->readers) {
38461 + if (!atomic_read(&pipe->readers)) {
38462 wait_for_partner(inode, &pipe->r_counter);
38463 if (signal_pending(current))
38464 goto err_wr;
38465 @@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode
38466 */
38467 filp->f_op = &rdwr_pipefifo_fops;
38468
38469 - pipe->readers++;
38470 - pipe->writers++;
38471 + atomic_inc(&pipe->readers);
38472 + atomic_inc(&pipe->writers);
38473 pipe->r_counter++;
38474 pipe->w_counter++;
38475 - if (pipe->readers == 1 || pipe->writers == 1)
38476 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
38477 wake_up_partner(inode);
38478 break;
38479
38480 @@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode
38481 return 0;
38482
38483 err_rd:
38484 - if (!--pipe->readers)
38485 + if (atomic_dec_and_test(&pipe->readers))
38486 wake_up_interruptible(&pipe->wait);
38487 ret = -ERESTARTSYS;
38488 goto err;
38489
38490 err_wr:
38491 - if (!--pipe->writers)
38492 + if (atomic_dec_and_test(&pipe->writers))
38493 wake_up_interruptible(&pipe->wait);
38494 ret = -ERESTARTSYS;
38495 goto err;
38496
38497 err:
38498 - if (!pipe->readers && !pipe->writers)
38499 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
38500 free_pipe_info(inode);
38501
38502 err_nocleanup:
38503 diff -urNp linux-2.6.32.41/fs/file.c linux-2.6.32.41/fs/file.c
38504 --- linux-2.6.32.41/fs/file.c 2011-03-27 14:31:47.000000000 -0400
38505 +++ linux-2.6.32.41/fs/file.c 2011-04-17 15:56:46.000000000 -0400
38506 @@ -14,6 +14,7 @@
38507 #include <linux/slab.h>
38508 #include <linux/vmalloc.h>
38509 #include <linux/file.h>
38510 +#include <linux/security.h>
38511 #include <linux/fdtable.h>
38512 #include <linux/bitops.h>
38513 #include <linux/interrupt.h>
38514 @@ -257,6 +258,8 @@ int expand_files(struct files_struct *fi
38515 * N.B. For clone tasks sharing a files structure, this test
38516 * will limit the total number of files that can be opened.
38517 */
38518 +
38519 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
38520 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
38521 return -EMFILE;
38522
38523 diff -urNp linux-2.6.32.41/fs/filesystems.c linux-2.6.32.41/fs/filesystems.c
38524 --- linux-2.6.32.41/fs/filesystems.c 2011-03-27 14:31:47.000000000 -0400
38525 +++ linux-2.6.32.41/fs/filesystems.c 2011-04-17 15:56:46.000000000 -0400
38526 @@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(con
38527 int len = dot ? dot - name : strlen(name);
38528
38529 fs = __get_fs_type(name, len);
38530 +
38531 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
38532 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
38533 +#else
38534 if (!fs && (request_module("%.*s", len, name) == 0))
38535 +#endif
38536 fs = __get_fs_type(name, len);
38537
38538 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
38539 diff -urNp linux-2.6.32.41/fs/fscache/cookie.c linux-2.6.32.41/fs/fscache/cookie.c
38540 --- linux-2.6.32.41/fs/fscache/cookie.c 2011-03-27 14:31:47.000000000 -0400
38541 +++ linux-2.6.32.41/fs/fscache/cookie.c 2011-05-04 17:56:28.000000000 -0400
38542 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
38543 parent ? (char *) parent->def->name : "<no-parent>",
38544 def->name, netfs_data);
38545
38546 - fscache_stat(&fscache_n_acquires);
38547 + fscache_stat_unchecked(&fscache_n_acquires);
38548
38549 /* if there's no parent cookie, then we don't create one here either */
38550 if (!parent) {
38551 - fscache_stat(&fscache_n_acquires_null);
38552 + fscache_stat_unchecked(&fscache_n_acquires_null);
38553 _leave(" [no parent]");
38554 return NULL;
38555 }
38556 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
38557 /* allocate and initialise a cookie */
38558 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
38559 if (!cookie) {
38560 - fscache_stat(&fscache_n_acquires_oom);
38561 + fscache_stat_unchecked(&fscache_n_acquires_oom);
38562 _leave(" [ENOMEM]");
38563 return NULL;
38564 }
38565 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
38566
38567 switch (cookie->def->type) {
38568 case FSCACHE_COOKIE_TYPE_INDEX:
38569 - fscache_stat(&fscache_n_cookie_index);
38570 + fscache_stat_unchecked(&fscache_n_cookie_index);
38571 break;
38572 case FSCACHE_COOKIE_TYPE_DATAFILE:
38573 - fscache_stat(&fscache_n_cookie_data);
38574 + fscache_stat_unchecked(&fscache_n_cookie_data);
38575 break;
38576 default:
38577 - fscache_stat(&fscache_n_cookie_special);
38578 + fscache_stat_unchecked(&fscache_n_cookie_special);
38579 break;
38580 }
38581
38582 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
38583 if (fscache_acquire_non_index_cookie(cookie) < 0) {
38584 atomic_dec(&parent->n_children);
38585 __fscache_cookie_put(cookie);
38586 - fscache_stat(&fscache_n_acquires_nobufs);
38587 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
38588 _leave(" = NULL");
38589 return NULL;
38590 }
38591 }
38592
38593 - fscache_stat(&fscache_n_acquires_ok);
38594 + fscache_stat_unchecked(&fscache_n_acquires_ok);
38595 _leave(" = %p", cookie);
38596 return cookie;
38597 }
38598 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
38599 cache = fscache_select_cache_for_object(cookie->parent);
38600 if (!cache) {
38601 up_read(&fscache_addremove_sem);
38602 - fscache_stat(&fscache_n_acquires_no_cache);
38603 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
38604 _leave(" = -ENOMEDIUM [no cache]");
38605 return -ENOMEDIUM;
38606 }
38607 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
38608 object = cache->ops->alloc_object(cache, cookie);
38609 fscache_stat_d(&fscache_n_cop_alloc_object);
38610 if (IS_ERR(object)) {
38611 - fscache_stat(&fscache_n_object_no_alloc);
38612 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
38613 ret = PTR_ERR(object);
38614 goto error;
38615 }
38616
38617 - fscache_stat(&fscache_n_object_alloc);
38618 + fscache_stat_unchecked(&fscache_n_object_alloc);
38619
38620 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
38621
38622 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
38623 struct fscache_object *object;
38624 struct hlist_node *_p;
38625
38626 - fscache_stat(&fscache_n_updates);
38627 + fscache_stat_unchecked(&fscache_n_updates);
38628
38629 if (!cookie) {
38630 - fscache_stat(&fscache_n_updates_null);
38631 + fscache_stat_unchecked(&fscache_n_updates_null);
38632 _leave(" [no cookie]");
38633 return;
38634 }
38635 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
38636 struct fscache_object *object;
38637 unsigned long event;
38638
38639 - fscache_stat(&fscache_n_relinquishes);
38640 + fscache_stat_unchecked(&fscache_n_relinquishes);
38641 if (retire)
38642 - fscache_stat(&fscache_n_relinquishes_retire);
38643 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
38644
38645 if (!cookie) {
38646 - fscache_stat(&fscache_n_relinquishes_null);
38647 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
38648 _leave(" [no cookie]");
38649 return;
38650 }
38651 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
38652
38653 /* wait for the cookie to finish being instantiated (or to fail) */
38654 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
38655 - fscache_stat(&fscache_n_relinquishes_waitcrt);
38656 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
38657 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
38658 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
38659 }
38660 diff -urNp linux-2.6.32.41/fs/fscache/internal.h linux-2.6.32.41/fs/fscache/internal.h
38661 --- linux-2.6.32.41/fs/fscache/internal.h 2011-03-27 14:31:47.000000000 -0400
38662 +++ linux-2.6.32.41/fs/fscache/internal.h 2011-05-04 17:56:28.000000000 -0400
38663 @@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
38664 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
38665 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
38666
38667 -extern atomic_t fscache_n_op_pend;
38668 -extern atomic_t fscache_n_op_run;
38669 -extern atomic_t fscache_n_op_enqueue;
38670 -extern atomic_t fscache_n_op_deferred_release;
38671 -extern atomic_t fscache_n_op_release;
38672 -extern atomic_t fscache_n_op_gc;
38673 -extern atomic_t fscache_n_op_cancelled;
38674 -extern atomic_t fscache_n_op_rejected;
38675 -
38676 -extern atomic_t fscache_n_attr_changed;
38677 -extern atomic_t fscache_n_attr_changed_ok;
38678 -extern atomic_t fscache_n_attr_changed_nobufs;
38679 -extern atomic_t fscache_n_attr_changed_nomem;
38680 -extern atomic_t fscache_n_attr_changed_calls;
38681 -
38682 -extern atomic_t fscache_n_allocs;
38683 -extern atomic_t fscache_n_allocs_ok;
38684 -extern atomic_t fscache_n_allocs_wait;
38685 -extern atomic_t fscache_n_allocs_nobufs;
38686 -extern atomic_t fscache_n_allocs_intr;
38687 -extern atomic_t fscache_n_allocs_object_dead;
38688 -extern atomic_t fscache_n_alloc_ops;
38689 -extern atomic_t fscache_n_alloc_op_waits;
38690 -
38691 -extern atomic_t fscache_n_retrievals;
38692 -extern atomic_t fscache_n_retrievals_ok;
38693 -extern atomic_t fscache_n_retrievals_wait;
38694 -extern atomic_t fscache_n_retrievals_nodata;
38695 -extern atomic_t fscache_n_retrievals_nobufs;
38696 -extern atomic_t fscache_n_retrievals_intr;
38697 -extern atomic_t fscache_n_retrievals_nomem;
38698 -extern atomic_t fscache_n_retrievals_object_dead;
38699 -extern atomic_t fscache_n_retrieval_ops;
38700 -extern atomic_t fscache_n_retrieval_op_waits;
38701 -
38702 -extern atomic_t fscache_n_stores;
38703 -extern atomic_t fscache_n_stores_ok;
38704 -extern atomic_t fscache_n_stores_again;
38705 -extern atomic_t fscache_n_stores_nobufs;
38706 -extern atomic_t fscache_n_stores_oom;
38707 -extern atomic_t fscache_n_store_ops;
38708 -extern atomic_t fscache_n_store_calls;
38709 -extern atomic_t fscache_n_store_pages;
38710 -extern atomic_t fscache_n_store_radix_deletes;
38711 -extern atomic_t fscache_n_store_pages_over_limit;
38712 -
38713 -extern atomic_t fscache_n_store_vmscan_not_storing;
38714 -extern atomic_t fscache_n_store_vmscan_gone;
38715 -extern atomic_t fscache_n_store_vmscan_busy;
38716 -extern atomic_t fscache_n_store_vmscan_cancelled;
38717 -
38718 -extern atomic_t fscache_n_marks;
38719 -extern atomic_t fscache_n_uncaches;
38720 -
38721 -extern atomic_t fscache_n_acquires;
38722 -extern atomic_t fscache_n_acquires_null;
38723 -extern atomic_t fscache_n_acquires_no_cache;
38724 -extern atomic_t fscache_n_acquires_ok;
38725 -extern atomic_t fscache_n_acquires_nobufs;
38726 -extern atomic_t fscache_n_acquires_oom;
38727 -
38728 -extern atomic_t fscache_n_updates;
38729 -extern atomic_t fscache_n_updates_null;
38730 -extern atomic_t fscache_n_updates_run;
38731 -
38732 -extern atomic_t fscache_n_relinquishes;
38733 -extern atomic_t fscache_n_relinquishes_null;
38734 -extern atomic_t fscache_n_relinquishes_waitcrt;
38735 -extern atomic_t fscache_n_relinquishes_retire;
38736 -
38737 -extern atomic_t fscache_n_cookie_index;
38738 -extern atomic_t fscache_n_cookie_data;
38739 -extern atomic_t fscache_n_cookie_special;
38740 -
38741 -extern atomic_t fscache_n_object_alloc;
38742 -extern atomic_t fscache_n_object_no_alloc;
38743 -extern atomic_t fscache_n_object_lookups;
38744 -extern atomic_t fscache_n_object_lookups_negative;
38745 -extern atomic_t fscache_n_object_lookups_positive;
38746 -extern atomic_t fscache_n_object_lookups_timed_out;
38747 -extern atomic_t fscache_n_object_created;
38748 -extern atomic_t fscache_n_object_avail;
38749 -extern atomic_t fscache_n_object_dead;
38750 -
38751 -extern atomic_t fscache_n_checkaux_none;
38752 -extern atomic_t fscache_n_checkaux_okay;
38753 -extern atomic_t fscache_n_checkaux_update;
38754 -extern atomic_t fscache_n_checkaux_obsolete;
38755 +extern atomic_unchecked_t fscache_n_op_pend;
38756 +extern atomic_unchecked_t fscache_n_op_run;
38757 +extern atomic_unchecked_t fscache_n_op_enqueue;
38758 +extern atomic_unchecked_t fscache_n_op_deferred_release;
38759 +extern atomic_unchecked_t fscache_n_op_release;
38760 +extern atomic_unchecked_t fscache_n_op_gc;
38761 +extern atomic_unchecked_t fscache_n_op_cancelled;
38762 +extern atomic_unchecked_t fscache_n_op_rejected;
38763 +
38764 +extern atomic_unchecked_t fscache_n_attr_changed;
38765 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
38766 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
38767 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
38768 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
38769 +
38770 +extern atomic_unchecked_t fscache_n_allocs;
38771 +extern atomic_unchecked_t fscache_n_allocs_ok;
38772 +extern atomic_unchecked_t fscache_n_allocs_wait;
38773 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
38774 +extern atomic_unchecked_t fscache_n_allocs_intr;
38775 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
38776 +extern atomic_unchecked_t fscache_n_alloc_ops;
38777 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
38778 +
38779 +extern atomic_unchecked_t fscache_n_retrievals;
38780 +extern atomic_unchecked_t fscache_n_retrievals_ok;
38781 +extern atomic_unchecked_t fscache_n_retrievals_wait;
38782 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
38783 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
38784 +extern atomic_unchecked_t fscache_n_retrievals_intr;
38785 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
38786 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
38787 +extern atomic_unchecked_t fscache_n_retrieval_ops;
38788 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
38789 +
38790 +extern atomic_unchecked_t fscache_n_stores;
38791 +extern atomic_unchecked_t fscache_n_stores_ok;
38792 +extern atomic_unchecked_t fscache_n_stores_again;
38793 +extern atomic_unchecked_t fscache_n_stores_nobufs;
38794 +extern atomic_unchecked_t fscache_n_stores_oom;
38795 +extern atomic_unchecked_t fscache_n_store_ops;
38796 +extern atomic_unchecked_t fscache_n_store_calls;
38797 +extern atomic_unchecked_t fscache_n_store_pages;
38798 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
38799 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
38800 +
38801 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
38802 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
38803 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
38804 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
38805 +
38806 +extern atomic_unchecked_t fscache_n_marks;
38807 +extern atomic_unchecked_t fscache_n_uncaches;
38808 +
38809 +extern atomic_unchecked_t fscache_n_acquires;
38810 +extern atomic_unchecked_t fscache_n_acquires_null;
38811 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
38812 +extern atomic_unchecked_t fscache_n_acquires_ok;
38813 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
38814 +extern atomic_unchecked_t fscache_n_acquires_oom;
38815 +
38816 +extern atomic_unchecked_t fscache_n_updates;
38817 +extern atomic_unchecked_t fscache_n_updates_null;
38818 +extern atomic_unchecked_t fscache_n_updates_run;
38819 +
38820 +extern atomic_unchecked_t fscache_n_relinquishes;
38821 +extern atomic_unchecked_t fscache_n_relinquishes_null;
38822 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
38823 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
38824 +
38825 +extern atomic_unchecked_t fscache_n_cookie_index;
38826 +extern atomic_unchecked_t fscache_n_cookie_data;
38827 +extern atomic_unchecked_t fscache_n_cookie_special;
38828 +
38829 +extern atomic_unchecked_t fscache_n_object_alloc;
38830 +extern atomic_unchecked_t fscache_n_object_no_alloc;
38831 +extern atomic_unchecked_t fscache_n_object_lookups;
38832 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
38833 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
38834 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
38835 +extern atomic_unchecked_t fscache_n_object_created;
38836 +extern atomic_unchecked_t fscache_n_object_avail;
38837 +extern atomic_unchecked_t fscache_n_object_dead;
38838 +
38839 +extern atomic_unchecked_t fscache_n_checkaux_none;
38840 +extern atomic_unchecked_t fscache_n_checkaux_okay;
38841 +extern atomic_unchecked_t fscache_n_checkaux_update;
38842 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
38843
38844 extern atomic_t fscache_n_cop_alloc_object;
38845 extern atomic_t fscache_n_cop_lookup_object;
38846 @@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t
38847 atomic_inc(stat);
38848 }
38849
38850 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
38851 +{
38852 + atomic_inc_unchecked(stat);
38853 +}
38854 +
38855 static inline void fscache_stat_d(atomic_t *stat)
38856 {
38857 atomic_dec(stat);
38858 @@ -259,6 +264,7 @@ extern const struct file_operations fsca
38859
38860 #define __fscache_stat(stat) (NULL)
38861 #define fscache_stat(stat) do {} while (0)
38862 +#define fscache_stat_unchecked(stat) do {} while (0)
38863 #define fscache_stat_d(stat) do {} while (0)
38864 #endif
38865
38866 diff -urNp linux-2.6.32.41/fs/fscache/object.c linux-2.6.32.41/fs/fscache/object.c
38867 --- linux-2.6.32.41/fs/fscache/object.c 2011-03-27 14:31:47.000000000 -0400
38868 +++ linux-2.6.32.41/fs/fscache/object.c 2011-05-04 17:56:28.000000000 -0400
38869 @@ -144,7 +144,7 @@ static void fscache_object_state_machine
38870 /* update the object metadata on disk */
38871 case FSCACHE_OBJECT_UPDATING:
38872 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
38873 - fscache_stat(&fscache_n_updates_run);
38874 + fscache_stat_unchecked(&fscache_n_updates_run);
38875 fscache_stat(&fscache_n_cop_update_object);
38876 object->cache->ops->update_object(object);
38877 fscache_stat_d(&fscache_n_cop_update_object);
38878 @@ -233,7 +233,7 @@ static void fscache_object_state_machine
38879 spin_lock(&object->lock);
38880 object->state = FSCACHE_OBJECT_DEAD;
38881 spin_unlock(&object->lock);
38882 - fscache_stat(&fscache_n_object_dead);
38883 + fscache_stat_unchecked(&fscache_n_object_dead);
38884 goto terminal_transit;
38885
38886 /* handle the parent cache of this object being withdrawn from
38887 @@ -248,7 +248,7 @@ static void fscache_object_state_machine
38888 spin_lock(&object->lock);
38889 object->state = FSCACHE_OBJECT_DEAD;
38890 spin_unlock(&object->lock);
38891 - fscache_stat(&fscache_n_object_dead);
38892 + fscache_stat_unchecked(&fscache_n_object_dead);
38893 goto terminal_transit;
38894
38895 /* complain about the object being woken up once it is
38896 @@ -492,7 +492,7 @@ static void fscache_lookup_object(struct
38897 parent->cookie->def->name, cookie->def->name,
38898 object->cache->tag->name);
38899
38900 - fscache_stat(&fscache_n_object_lookups);
38901 + fscache_stat_unchecked(&fscache_n_object_lookups);
38902 fscache_stat(&fscache_n_cop_lookup_object);
38903 ret = object->cache->ops->lookup_object(object);
38904 fscache_stat_d(&fscache_n_cop_lookup_object);
38905 @@ -503,7 +503,7 @@ static void fscache_lookup_object(struct
38906 if (ret == -ETIMEDOUT) {
38907 /* probably stuck behind another object, so move this one to
38908 * the back of the queue */
38909 - fscache_stat(&fscache_n_object_lookups_timed_out);
38910 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
38911 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
38912 }
38913
38914 @@ -526,7 +526,7 @@ void fscache_object_lookup_negative(stru
38915
38916 spin_lock(&object->lock);
38917 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
38918 - fscache_stat(&fscache_n_object_lookups_negative);
38919 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
38920
38921 /* transit here to allow write requests to begin stacking up
38922 * and read requests to begin returning ENODATA */
38923 @@ -572,7 +572,7 @@ void fscache_obtained_object(struct fsca
38924 * result, in which case there may be data available */
38925 spin_lock(&object->lock);
38926 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
38927 - fscache_stat(&fscache_n_object_lookups_positive);
38928 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
38929
38930 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
38931
38932 @@ -586,7 +586,7 @@ void fscache_obtained_object(struct fsca
38933 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
38934 } else {
38935 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
38936 - fscache_stat(&fscache_n_object_created);
38937 + fscache_stat_unchecked(&fscache_n_object_created);
38938
38939 object->state = FSCACHE_OBJECT_AVAILABLE;
38940 spin_unlock(&object->lock);
38941 @@ -633,7 +633,7 @@ static void fscache_object_available(str
38942 fscache_enqueue_dependents(object);
38943
38944 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
38945 - fscache_stat(&fscache_n_object_avail);
38946 + fscache_stat_unchecked(&fscache_n_object_avail);
38947
38948 _leave("");
38949 }
38950 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
38951 enum fscache_checkaux result;
38952
38953 if (!object->cookie->def->check_aux) {
38954 - fscache_stat(&fscache_n_checkaux_none);
38955 + fscache_stat_unchecked(&fscache_n_checkaux_none);
38956 return FSCACHE_CHECKAUX_OKAY;
38957 }
38958
38959 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
38960 switch (result) {
38961 /* entry okay as is */
38962 case FSCACHE_CHECKAUX_OKAY:
38963 - fscache_stat(&fscache_n_checkaux_okay);
38964 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
38965 break;
38966
38967 /* entry requires update */
38968 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
38969 - fscache_stat(&fscache_n_checkaux_update);
38970 + fscache_stat_unchecked(&fscache_n_checkaux_update);
38971 break;
38972
38973 /* entry requires deletion */
38974 case FSCACHE_CHECKAUX_OBSOLETE:
38975 - fscache_stat(&fscache_n_checkaux_obsolete);
38976 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
38977 break;
38978
38979 default:
38980 diff -urNp linux-2.6.32.41/fs/fscache/operation.c linux-2.6.32.41/fs/fscache/operation.c
38981 --- linux-2.6.32.41/fs/fscache/operation.c 2011-03-27 14:31:47.000000000 -0400
38982 +++ linux-2.6.32.41/fs/fscache/operation.c 2011-05-04 17:56:28.000000000 -0400
38983 @@ -16,7 +16,7 @@
38984 #include <linux/seq_file.h>
38985 #include "internal.h"
38986
38987 -atomic_t fscache_op_debug_id;
38988 +atomic_unchecked_t fscache_op_debug_id;
38989 EXPORT_SYMBOL(fscache_op_debug_id);
38990
38991 /**
38992 @@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fs
38993 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
38994 ASSERTCMP(atomic_read(&op->usage), >, 0);
38995
38996 - fscache_stat(&fscache_n_op_enqueue);
38997 + fscache_stat_unchecked(&fscache_n_op_enqueue);
38998 switch (op->flags & FSCACHE_OP_TYPE) {
38999 case FSCACHE_OP_FAST:
39000 _debug("queue fast");
39001 @@ -76,7 +76,7 @@ static void fscache_run_op(struct fscach
39002 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
39003 if (op->processor)
39004 fscache_enqueue_operation(op);
39005 - fscache_stat(&fscache_n_op_run);
39006 + fscache_stat_unchecked(&fscache_n_op_run);
39007 }
39008
39009 /*
39010 @@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct f
39011 if (object->n_ops > 0) {
39012 atomic_inc(&op->usage);
39013 list_add_tail(&op->pend_link, &object->pending_ops);
39014 - fscache_stat(&fscache_n_op_pend);
39015 + fscache_stat_unchecked(&fscache_n_op_pend);
39016 } else if (!list_empty(&object->pending_ops)) {
39017 atomic_inc(&op->usage);
39018 list_add_tail(&op->pend_link, &object->pending_ops);
39019 - fscache_stat(&fscache_n_op_pend);
39020 + fscache_stat_unchecked(&fscache_n_op_pend);
39021 fscache_start_operations(object);
39022 } else {
39023 ASSERTCMP(object->n_in_progress, ==, 0);
39024 @@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct f
39025 object->n_exclusive++; /* reads and writes must wait */
39026 atomic_inc(&op->usage);
39027 list_add_tail(&op->pend_link, &object->pending_ops);
39028 - fscache_stat(&fscache_n_op_pend);
39029 + fscache_stat_unchecked(&fscache_n_op_pend);
39030 ret = 0;
39031 } else {
39032 /* not allowed to submit ops in any other state */
39033 @@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_obj
39034 if (object->n_exclusive > 0) {
39035 atomic_inc(&op->usage);
39036 list_add_tail(&op->pend_link, &object->pending_ops);
39037 - fscache_stat(&fscache_n_op_pend);
39038 + fscache_stat_unchecked(&fscache_n_op_pend);
39039 } else if (!list_empty(&object->pending_ops)) {
39040 atomic_inc(&op->usage);
39041 list_add_tail(&op->pend_link, &object->pending_ops);
39042 - fscache_stat(&fscache_n_op_pend);
39043 + fscache_stat_unchecked(&fscache_n_op_pend);
39044 fscache_start_operations(object);
39045 } else {
39046 ASSERTCMP(object->n_exclusive, ==, 0);
39047 @@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_obj
39048 object->n_ops++;
39049 atomic_inc(&op->usage);
39050 list_add_tail(&op->pend_link, &object->pending_ops);
39051 - fscache_stat(&fscache_n_op_pend);
39052 + fscache_stat_unchecked(&fscache_n_op_pend);
39053 ret = 0;
39054 } else if (object->state == FSCACHE_OBJECT_DYING ||
39055 object->state == FSCACHE_OBJECT_LC_DYING ||
39056 object->state == FSCACHE_OBJECT_WITHDRAWING) {
39057 - fscache_stat(&fscache_n_op_rejected);
39058 + fscache_stat_unchecked(&fscache_n_op_rejected);
39059 ret = -ENOBUFS;
39060 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
39061 fscache_report_unexpected_submission(object, op, ostate);
39062 @@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_ope
39063
39064 ret = -EBUSY;
39065 if (!list_empty(&op->pend_link)) {
39066 - fscache_stat(&fscache_n_op_cancelled);
39067 + fscache_stat_unchecked(&fscache_n_op_cancelled);
39068 list_del_init(&op->pend_link);
39069 object->n_ops--;
39070 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
39071 @@ -344,7 +344,7 @@ void fscache_put_operation(struct fscach
39072 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
39073 BUG();
39074
39075 - fscache_stat(&fscache_n_op_release);
39076 + fscache_stat_unchecked(&fscache_n_op_release);
39077
39078 if (op->release) {
39079 op->release(op);
39080 @@ -361,7 +361,7 @@ void fscache_put_operation(struct fscach
39081 * lock, and defer it otherwise */
39082 if (!spin_trylock(&object->lock)) {
39083 _debug("defer put");
39084 - fscache_stat(&fscache_n_op_deferred_release);
39085 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
39086
39087 cache = object->cache;
39088 spin_lock(&cache->op_gc_list_lock);
39089 @@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_st
39090
39091 _debug("GC DEFERRED REL OBJ%x OP%x",
39092 object->debug_id, op->debug_id);
39093 - fscache_stat(&fscache_n_op_gc);
39094 + fscache_stat_unchecked(&fscache_n_op_gc);
39095
39096 ASSERTCMP(atomic_read(&op->usage), ==, 0);
39097
39098 diff -urNp linux-2.6.32.41/fs/fscache/page.c linux-2.6.32.41/fs/fscache/page.c
39099 --- linux-2.6.32.41/fs/fscache/page.c 2011-03-27 14:31:47.000000000 -0400
39100 +++ linux-2.6.32.41/fs/fscache/page.c 2011-05-04 17:56:28.000000000 -0400
39101 @@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct
39102 val = radix_tree_lookup(&cookie->stores, page->index);
39103 if (!val) {
39104 rcu_read_unlock();
39105 - fscache_stat(&fscache_n_store_vmscan_not_storing);
39106 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
39107 __fscache_uncache_page(cookie, page);
39108 return true;
39109 }
39110 @@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct
39111 spin_unlock(&cookie->stores_lock);
39112
39113 if (xpage) {
39114 - fscache_stat(&fscache_n_store_vmscan_cancelled);
39115 - fscache_stat(&fscache_n_store_radix_deletes);
39116 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
39117 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
39118 ASSERTCMP(xpage, ==, page);
39119 } else {
39120 - fscache_stat(&fscache_n_store_vmscan_gone);
39121 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
39122 }
39123
39124 wake_up_bit(&cookie->flags, 0);
39125 @@ -106,7 +106,7 @@ page_busy:
39126 /* we might want to wait here, but that could deadlock the allocator as
39127 * the slow-work threads writing to the cache may all end up sleeping
39128 * on memory allocation */
39129 - fscache_stat(&fscache_n_store_vmscan_busy);
39130 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
39131 return false;
39132 }
39133 EXPORT_SYMBOL(__fscache_maybe_release_page);
39134 @@ -130,7 +130,7 @@ static void fscache_end_page_write(struc
39135 FSCACHE_COOKIE_STORING_TAG);
39136 if (!radix_tree_tag_get(&cookie->stores, page->index,
39137 FSCACHE_COOKIE_PENDING_TAG)) {
39138 - fscache_stat(&fscache_n_store_radix_deletes);
39139 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
39140 xpage = radix_tree_delete(&cookie->stores, page->index);
39141 }
39142 spin_unlock(&cookie->stores_lock);
39143 @@ -151,7 +151,7 @@ static void fscache_attr_changed_op(stru
39144
39145 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
39146
39147 - fscache_stat(&fscache_n_attr_changed_calls);
39148 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
39149
39150 if (fscache_object_is_active(object)) {
39151 fscache_set_op_state(op, "CallFS");
39152 @@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscach
39153
39154 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
39155
39156 - fscache_stat(&fscache_n_attr_changed);
39157 + fscache_stat_unchecked(&fscache_n_attr_changed);
39158
39159 op = kzalloc(sizeof(*op), GFP_KERNEL);
39160 if (!op) {
39161 - fscache_stat(&fscache_n_attr_changed_nomem);
39162 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
39163 _leave(" = -ENOMEM");
39164 return -ENOMEM;
39165 }
39166 @@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscach
39167 if (fscache_submit_exclusive_op(object, op) < 0)
39168 goto nobufs;
39169 spin_unlock(&cookie->lock);
39170 - fscache_stat(&fscache_n_attr_changed_ok);
39171 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
39172 fscache_put_operation(op);
39173 _leave(" = 0");
39174 return 0;
39175 @@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscach
39176 nobufs:
39177 spin_unlock(&cookie->lock);
39178 kfree(op);
39179 - fscache_stat(&fscache_n_attr_changed_nobufs);
39180 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
39181 _leave(" = %d", -ENOBUFS);
39182 return -ENOBUFS;
39183 }
39184 @@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache
39185 /* allocate a retrieval operation and attempt to submit it */
39186 op = kzalloc(sizeof(*op), GFP_NOIO);
39187 if (!op) {
39188 - fscache_stat(&fscache_n_retrievals_nomem);
39189 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
39190 return NULL;
39191 }
39192
39193 @@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_loo
39194 return 0;
39195 }
39196
39197 - fscache_stat(&fscache_n_retrievals_wait);
39198 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
39199
39200 jif = jiffies;
39201 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
39202 fscache_wait_bit_interruptible,
39203 TASK_INTERRUPTIBLE) != 0) {
39204 - fscache_stat(&fscache_n_retrievals_intr);
39205 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
39206 _leave(" = -ERESTARTSYS");
39207 return -ERESTARTSYS;
39208 }
39209 @@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_loo
39210 */
39211 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
39212 struct fscache_retrieval *op,
39213 - atomic_t *stat_op_waits,
39214 - atomic_t *stat_object_dead)
39215 + atomic_unchecked_t *stat_op_waits,
39216 + atomic_unchecked_t *stat_object_dead)
39217 {
39218 int ret;
39219
39220 @@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_ac
39221 goto check_if_dead;
39222
39223 _debug(">>> WT");
39224 - fscache_stat(stat_op_waits);
39225 + fscache_stat_unchecked(stat_op_waits);
39226 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
39227 fscache_wait_bit_interruptible,
39228 TASK_INTERRUPTIBLE) < 0) {
39229 @@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_ac
39230
39231 check_if_dead:
39232 if (unlikely(fscache_object_is_dead(object))) {
39233 - fscache_stat(stat_object_dead);
39234 + fscache_stat_unchecked(stat_object_dead);
39235 return -ENOBUFS;
39236 }
39237 return 0;
39238 @@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct
39239
39240 _enter("%p,%p,,,", cookie, page);
39241
39242 - fscache_stat(&fscache_n_retrievals);
39243 + fscache_stat_unchecked(&fscache_n_retrievals);
39244
39245 if (hlist_empty(&cookie->backing_objects))
39246 goto nobufs;
39247 @@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct
39248 goto nobufs_unlock;
39249 spin_unlock(&cookie->lock);
39250
39251 - fscache_stat(&fscache_n_retrieval_ops);
39252 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
39253
39254 /* pin the netfs read context in case we need to do the actual netfs
39255 * read because we've encountered a cache read failure */
39256 @@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct
39257
39258 error:
39259 if (ret == -ENOMEM)
39260 - fscache_stat(&fscache_n_retrievals_nomem);
39261 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
39262 else if (ret == -ERESTARTSYS)
39263 - fscache_stat(&fscache_n_retrievals_intr);
39264 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
39265 else if (ret == -ENODATA)
39266 - fscache_stat(&fscache_n_retrievals_nodata);
39267 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
39268 else if (ret < 0)
39269 - fscache_stat(&fscache_n_retrievals_nobufs);
39270 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
39271 else
39272 - fscache_stat(&fscache_n_retrievals_ok);
39273 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
39274
39275 fscache_put_retrieval(op);
39276 _leave(" = %d", ret);
39277 @@ -453,7 +453,7 @@ nobufs_unlock:
39278 spin_unlock(&cookie->lock);
39279 kfree(op);
39280 nobufs:
39281 - fscache_stat(&fscache_n_retrievals_nobufs);
39282 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
39283 _leave(" = -ENOBUFS");
39284 return -ENOBUFS;
39285 }
39286 @@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct
39287
39288 _enter("%p,,%d,,,", cookie, *nr_pages);
39289
39290 - fscache_stat(&fscache_n_retrievals);
39291 + fscache_stat_unchecked(&fscache_n_retrievals);
39292
39293 if (hlist_empty(&cookie->backing_objects))
39294 goto nobufs;
39295 @@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct
39296 goto nobufs_unlock;
39297 spin_unlock(&cookie->lock);
39298
39299 - fscache_stat(&fscache_n_retrieval_ops);
39300 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
39301
39302 /* pin the netfs read context in case we need to do the actual netfs
39303 * read because we've encountered a cache read failure */
39304 @@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct
39305
39306 error:
39307 if (ret == -ENOMEM)
39308 - fscache_stat(&fscache_n_retrievals_nomem);
39309 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
39310 else if (ret == -ERESTARTSYS)
39311 - fscache_stat(&fscache_n_retrievals_intr);
39312 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
39313 else if (ret == -ENODATA)
39314 - fscache_stat(&fscache_n_retrievals_nodata);
39315 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
39316 else if (ret < 0)
39317 - fscache_stat(&fscache_n_retrievals_nobufs);
39318 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
39319 else
39320 - fscache_stat(&fscache_n_retrievals_ok);
39321 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
39322
39323 fscache_put_retrieval(op);
39324 _leave(" = %d", ret);
39325 @@ -570,7 +570,7 @@ nobufs_unlock:
39326 spin_unlock(&cookie->lock);
39327 kfree(op);
39328 nobufs:
39329 - fscache_stat(&fscache_n_retrievals_nobufs);
39330 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
39331 _leave(" = -ENOBUFS");
39332 return -ENOBUFS;
39333 }
39334 @@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_
39335
39336 _enter("%p,%p,,,", cookie, page);
39337
39338 - fscache_stat(&fscache_n_allocs);
39339 + fscache_stat_unchecked(&fscache_n_allocs);
39340
39341 if (hlist_empty(&cookie->backing_objects))
39342 goto nobufs;
39343 @@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_
39344 goto nobufs_unlock;
39345 spin_unlock(&cookie->lock);
39346
39347 - fscache_stat(&fscache_n_alloc_ops);
39348 + fscache_stat_unchecked(&fscache_n_alloc_ops);
39349
39350 ret = fscache_wait_for_retrieval_activation(
39351 object, op,
39352 @@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_
39353
39354 error:
39355 if (ret == -ERESTARTSYS)
39356 - fscache_stat(&fscache_n_allocs_intr);
39357 + fscache_stat_unchecked(&fscache_n_allocs_intr);
39358 else if (ret < 0)
39359 - fscache_stat(&fscache_n_allocs_nobufs);
39360 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
39361 else
39362 - fscache_stat(&fscache_n_allocs_ok);
39363 + fscache_stat_unchecked(&fscache_n_allocs_ok);
39364
39365 fscache_put_retrieval(op);
39366 _leave(" = %d", ret);
39367 @@ -651,7 +651,7 @@ nobufs_unlock:
39368 spin_unlock(&cookie->lock);
39369 kfree(op);
39370 nobufs:
39371 - fscache_stat(&fscache_n_allocs_nobufs);
39372 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
39373 _leave(" = -ENOBUFS");
39374 return -ENOBUFS;
39375 }
39376 @@ -694,7 +694,7 @@ static void fscache_write_op(struct fsca
39377
39378 spin_lock(&cookie->stores_lock);
39379
39380 - fscache_stat(&fscache_n_store_calls);
39381 + fscache_stat_unchecked(&fscache_n_store_calls);
39382
39383 /* find a page to store */
39384 page = NULL;
39385 @@ -705,7 +705,7 @@ static void fscache_write_op(struct fsca
39386 page = results[0];
39387 _debug("gang %d [%lx]", n, page->index);
39388 if (page->index > op->store_limit) {
39389 - fscache_stat(&fscache_n_store_pages_over_limit);
39390 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
39391 goto superseded;
39392 }
39393
39394 @@ -721,7 +721,7 @@ static void fscache_write_op(struct fsca
39395
39396 if (page) {
39397 fscache_set_op_state(&op->op, "Store");
39398 - fscache_stat(&fscache_n_store_pages);
39399 + fscache_stat_unchecked(&fscache_n_store_pages);
39400 fscache_stat(&fscache_n_cop_write_page);
39401 ret = object->cache->ops->write_page(op, page);
39402 fscache_stat_d(&fscache_n_cop_write_page);
39403 @@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_
39404 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
39405 ASSERT(PageFsCache(page));
39406
39407 - fscache_stat(&fscache_n_stores);
39408 + fscache_stat_unchecked(&fscache_n_stores);
39409
39410 op = kzalloc(sizeof(*op), GFP_NOIO);
39411 if (!op)
39412 @@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_
39413 spin_unlock(&cookie->stores_lock);
39414 spin_unlock(&object->lock);
39415
39416 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
39417 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
39418 op->store_limit = object->store_limit;
39419
39420 if (fscache_submit_op(object, &op->op) < 0)
39421 @@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_
39422
39423 spin_unlock(&cookie->lock);
39424 radix_tree_preload_end();
39425 - fscache_stat(&fscache_n_store_ops);
39426 - fscache_stat(&fscache_n_stores_ok);
39427 + fscache_stat_unchecked(&fscache_n_store_ops);
39428 + fscache_stat_unchecked(&fscache_n_stores_ok);
39429
39430 /* the slow work queue now carries its own ref on the object */
39431 fscache_put_operation(&op->op);
39432 @@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_
39433 return 0;
39434
39435 already_queued:
39436 - fscache_stat(&fscache_n_stores_again);
39437 + fscache_stat_unchecked(&fscache_n_stores_again);
39438 already_pending:
39439 spin_unlock(&cookie->stores_lock);
39440 spin_unlock(&object->lock);
39441 spin_unlock(&cookie->lock);
39442 radix_tree_preload_end();
39443 kfree(op);
39444 - fscache_stat(&fscache_n_stores_ok);
39445 + fscache_stat_unchecked(&fscache_n_stores_ok);
39446 _leave(" = 0");
39447 return 0;
39448
39449 @@ -886,14 +886,14 @@ nobufs:
39450 spin_unlock(&cookie->lock);
39451 radix_tree_preload_end();
39452 kfree(op);
39453 - fscache_stat(&fscache_n_stores_nobufs);
39454 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
39455 _leave(" = -ENOBUFS");
39456 return -ENOBUFS;
39457
39458 nomem_free:
39459 kfree(op);
39460 nomem:
39461 - fscache_stat(&fscache_n_stores_oom);
39462 + fscache_stat_unchecked(&fscache_n_stores_oom);
39463 _leave(" = -ENOMEM");
39464 return -ENOMEM;
39465 }
39466 @@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscac
39467 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
39468 ASSERTCMP(page, !=, NULL);
39469
39470 - fscache_stat(&fscache_n_uncaches);
39471 + fscache_stat_unchecked(&fscache_n_uncaches);
39472
39473 /* cache withdrawal may beat us to it */
39474 if (!PageFsCache(page))
39475 @@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fs
39476 unsigned long loop;
39477
39478 #ifdef CONFIG_FSCACHE_STATS
39479 - atomic_add(pagevec->nr, &fscache_n_marks);
39480 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
39481 #endif
39482
39483 for (loop = 0; loop < pagevec->nr; loop++) {
39484 diff -urNp linux-2.6.32.41/fs/fscache/stats.c linux-2.6.32.41/fs/fscache/stats.c
39485 --- linux-2.6.32.41/fs/fscache/stats.c 2011-03-27 14:31:47.000000000 -0400
39486 +++ linux-2.6.32.41/fs/fscache/stats.c 2011-05-04 17:56:28.000000000 -0400
39487 @@ -18,95 +18,95 @@
39488 /*
39489 * operation counters
39490 */
39491 -atomic_t fscache_n_op_pend;
39492 -atomic_t fscache_n_op_run;
39493 -atomic_t fscache_n_op_enqueue;
39494 -atomic_t fscache_n_op_requeue;
39495 -atomic_t fscache_n_op_deferred_release;
39496 -atomic_t fscache_n_op_release;
39497 -atomic_t fscache_n_op_gc;
39498 -atomic_t fscache_n_op_cancelled;
39499 -atomic_t fscache_n_op_rejected;
39500 -
39501 -atomic_t fscache_n_attr_changed;
39502 -atomic_t fscache_n_attr_changed_ok;
39503 -atomic_t fscache_n_attr_changed_nobufs;
39504 -atomic_t fscache_n_attr_changed_nomem;
39505 -atomic_t fscache_n_attr_changed_calls;
39506 -
39507 -atomic_t fscache_n_allocs;
39508 -atomic_t fscache_n_allocs_ok;
39509 -atomic_t fscache_n_allocs_wait;
39510 -atomic_t fscache_n_allocs_nobufs;
39511 -atomic_t fscache_n_allocs_intr;
39512 -atomic_t fscache_n_allocs_object_dead;
39513 -atomic_t fscache_n_alloc_ops;
39514 -atomic_t fscache_n_alloc_op_waits;
39515 -
39516 -atomic_t fscache_n_retrievals;
39517 -atomic_t fscache_n_retrievals_ok;
39518 -atomic_t fscache_n_retrievals_wait;
39519 -atomic_t fscache_n_retrievals_nodata;
39520 -atomic_t fscache_n_retrievals_nobufs;
39521 -atomic_t fscache_n_retrievals_intr;
39522 -atomic_t fscache_n_retrievals_nomem;
39523 -atomic_t fscache_n_retrievals_object_dead;
39524 -atomic_t fscache_n_retrieval_ops;
39525 -atomic_t fscache_n_retrieval_op_waits;
39526 -
39527 -atomic_t fscache_n_stores;
39528 -atomic_t fscache_n_stores_ok;
39529 -atomic_t fscache_n_stores_again;
39530 -atomic_t fscache_n_stores_nobufs;
39531 -atomic_t fscache_n_stores_oom;
39532 -atomic_t fscache_n_store_ops;
39533 -atomic_t fscache_n_store_calls;
39534 -atomic_t fscache_n_store_pages;
39535 -atomic_t fscache_n_store_radix_deletes;
39536 -atomic_t fscache_n_store_pages_over_limit;
39537 -
39538 -atomic_t fscache_n_store_vmscan_not_storing;
39539 -atomic_t fscache_n_store_vmscan_gone;
39540 -atomic_t fscache_n_store_vmscan_busy;
39541 -atomic_t fscache_n_store_vmscan_cancelled;
39542 -
39543 -atomic_t fscache_n_marks;
39544 -atomic_t fscache_n_uncaches;
39545 -
39546 -atomic_t fscache_n_acquires;
39547 -atomic_t fscache_n_acquires_null;
39548 -atomic_t fscache_n_acquires_no_cache;
39549 -atomic_t fscache_n_acquires_ok;
39550 -atomic_t fscache_n_acquires_nobufs;
39551 -atomic_t fscache_n_acquires_oom;
39552 -
39553 -atomic_t fscache_n_updates;
39554 -atomic_t fscache_n_updates_null;
39555 -atomic_t fscache_n_updates_run;
39556 -
39557 -atomic_t fscache_n_relinquishes;
39558 -atomic_t fscache_n_relinquishes_null;
39559 -atomic_t fscache_n_relinquishes_waitcrt;
39560 -atomic_t fscache_n_relinquishes_retire;
39561 -
39562 -atomic_t fscache_n_cookie_index;
39563 -atomic_t fscache_n_cookie_data;
39564 -atomic_t fscache_n_cookie_special;
39565 -
39566 -atomic_t fscache_n_object_alloc;
39567 -atomic_t fscache_n_object_no_alloc;
39568 -atomic_t fscache_n_object_lookups;
39569 -atomic_t fscache_n_object_lookups_negative;
39570 -atomic_t fscache_n_object_lookups_positive;
39571 -atomic_t fscache_n_object_lookups_timed_out;
39572 -atomic_t fscache_n_object_created;
39573 -atomic_t fscache_n_object_avail;
39574 -atomic_t fscache_n_object_dead;
39575 -
39576 -atomic_t fscache_n_checkaux_none;
39577 -atomic_t fscache_n_checkaux_okay;
39578 -atomic_t fscache_n_checkaux_update;
39579 -atomic_t fscache_n_checkaux_obsolete;
39580 +atomic_unchecked_t fscache_n_op_pend;
39581 +atomic_unchecked_t fscache_n_op_run;
39582 +atomic_unchecked_t fscache_n_op_enqueue;
39583 +atomic_unchecked_t fscache_n_op_requeue;
39584 +atomic_unchecked_t fscache_n_op_deferred_release;
39585 +atomic_unchecked_t fscache_n_op_release;
39586 +atomic_unchecked_t fscache_n_op_gc;
39587 +atomic_unchecked_t fscache_n_op_cancelled;
39588 +atomic_unchecked_t fscache_n_op_rejected;
39589 +
39590 +atomic_unchecked_t fscache_n_attr_changed;
39591 +atomic_unchecked_t fscache_n_attr_changed_ok;
39592 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
39593 +atomic_unchecked_t fscache_n_attr_changed_nomem;
39594 +atomic_unchecked_t fscache_n_attr_changed_calls;
39595 +
39596 +atomic_unchecked_t fscache_n_allocs;
39597 +atomic_unchecked_t fscache_n_allocs_ok;
39598 +atomic_unchecked_t fscache_n_allocs_wait;
39599 +atomic_unchecked_t fscache_n_allocs_nobufs;
39600 +atomic_unchecked_t fscache_n_allocs_intr;
39601 +atomic_unchecked_t fscache_n_allocs_object_dead;
39602 +atomic_unchecked_t fscache_n_alloc_ops;
39603 +atomic_unchecked_t fscache_n_alloc_op_waits;
39604 +
39605 +atomic_unchecked_t fscache_n_retrievals;
39606 +atomic_unchecked_t fscache_n_retrievals_ok;
39607 +atomic_unchecked_t fscache_n_retrievals_wait;
39608 +atomic_unchecked_t fscache_n_retrievals_nodata;
39609 +atomic_unchecked_t fscache_n_retrievals_nobufs;
39610 +atomic_unchecked_t fscache_n_retrievals_intr;
39611 +atomic_unchecked_t fscache_n_retrievals_nomem;
39612 +atomic_unchecked_t fscache_n_retrievals_object_dead;
39613 +atomic_unchecked_t fscache_n_retrieval_ops;
39614 +atomic_unchecked_t fscache_n_retrieval_op_waits;
39615 +
39616 +atomic_unchecked_t fscache_n_stores;
39617 +atomic_unchecked_t fscache_n_stores_ok;
39618 +atomic_unchecked_t fscache_n_stores_again;
39619 +atomic_unchecked_t fscache_n_stores_nobufs;
39620 +atomic_unchecked_t fscache_n_stores_oom;
39621 +atomic_unchecked_t fscache_n_store_ops;
39622 +atomic_unchecked_t fscache_n_store_calls;
39623 +atomic_unchecked_t fscache_n_store_pages;
39624 +atomic_unchecked_t fscache_n_store_radix_deletes;
39625 +atomic_unchecked_t fscache_n_store_pages_over_limit;
39626 +
39627 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
39628 +atomic_unchecked_t fscache_n_store_vmscan_gone;
39629 +atomic_unchecked_t fscache_n_store_vmscan_busy;
39630 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
39631 +
39632 +atomic_unchecked_t fscache_n_marks;
39633 +atomic_unchecked_t fscache_n_uncaches;
39634 +
39635 +atomic_unchecked_t fscache_n_acquires;
39636 +atomic_unchecked_t fscache_n_acquires_null;
39637 +atomic_unchecked_t fscache_n_acquires_no_cache;
39638 +atomic_unchecked_t fscache_n_acquires_ok;
39639 +atomic_unchecked_t fscache_n_acquires_nobufs;
39640 +atomic_unchecked_t fscache_n_acquires_oom;
39641 +
39642 +atomic_unchecked_t fscache_n_updates;
39643 +atomic_unchecked_t fscache_n_updates_null;
39644 +atomic_unchecked_t fscache_n_updates_run;
39645 +
39646 +atomic_unchecked_t fscache_n_relinquishes;
39647 +atomic_unchecked_t fscache_n_relinquishes_null;
39648 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
39649 +atomic_unchecked_t fscache_n_relinquishes_retire;
39650 +
39651 +atomic_unchecked_t fscache_n_cookie_index;
39652 +atomic_unchecked_t fscache_n_cookie_data;
39653 +atomic_unchecked_t fscache_n_cookie_special;
39654 +
39655 +atomic_unchecked_t fscache_n_object_alloc;
39656 +atomic_unchecked_t fscache_n_object_no_alloc;
39657 +atomic_unchecked_t fscache_n_object_lookups;
39658 +atomic_unchecked_t fscache_n_object_lookups_negative;
39659 +atomic_unchecked_t fscache_n_object_lookups_positive;
39660 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
39661 +atomic_unchecked_t fscache_n_object_created;
39662 +atomic_unchecked_t fscache_n_object_avail;
39663 +atomic_unchecked_t fscache_n_object_dead;
39664 +
39665 +atomic_unchecked_t fscache_n_checkaux_none;
39666 +atomic_unchecked_t fscache_n_checkaux_okay;
39667 +atomic_unchecked_t fscache_n_checkaux_update;
39668 +atomic_unchecked_t fscache_n_checkaux_obsolete;
39669
39670 atomic_t fscache_n_cop_alloc_object;
39671 atomic_t fscache_n_cop_lookup_object;
39672 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
39673 seq_puts(m, "FS-Cache statistics\n");
39674
39675 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
39676 - atomic_read(&fscache_n_cookie_index),
39677 - atomic_read(&fscache_n_cookie_data),
39678 - atomic_read(&fscache_n_cookie_special));
39679 + atomic_read_unchecked(&fscache_n_cookie_index),
39680 + atomic_read_unchecked(&fscache_n_cookie_data),
39681 + atomic_read_unchecked(&fscache_n_cookie_special));
39682
39683 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
39684 - atomic_read(&fscache_n_object_alloc),
39685 - atomic_read(&fscache_n_object_no_alloc),
39686 - atomic_read(&fscache_n_object_avail),
39687 - atomic_read(&fscache_n_object_dead));
39688 + atomic_read_unchecked(&fscache_n_object_alloc),
39689 + atomic_read_unchecked(&fscache_n_object_no_alloc),
39690 + atomic_read_unchecked(&fscache_n_object_avail),
39691 + atomic_read_unchecked(&fscache_n_object_dead));
39692 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
39693 - atomic_read(&fscache_n_checkaux_none),
39694 - atomic_read(&fscache_n_checkaux_okay),
39695 - atomic_read(&fscache_n_checkaux_update),
39696 - atomic_read(&fscache_n_checkaux_obsolete));
39697 + atomic_read_unchecked(&fscache_n_checkaux_none),
39698 + atomic_read_unchecked(&fscache_n_checkaux_okay),
39699 + atomic_read_unchecked(&fscache_n_checkaux_update),
39700 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
39701
39702 seq_printf(m, "Pages : mrk=%u unc=%u\n",
39703 - atomic_read(&fscache_n_marks),
39704 - atomic_read(&fscache_n_uncaches));
39705 + atomic_read_unchecked(&fscache_n_marks),
39706 + atomic_read_unchecked(&fscache_n_uncaches));
39707
39708 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
39709 " oom=%u\n",
39710 - atomic_read(&fscache_n_acquires),
39711 - atomic_read(&fscache_n_acquires_null),
39712 - atomic_read(&fscache_n_acquires_no_cache),
39713 - atomic_read(&fscache_n_acquires_ok),
39714 - atomic_read(&fscache_n_acquires_nobufs),
39715 - atomic_read(&fscache_n_acquires_oom));
39716 + atomic_read_unchecked(&fscache_n_acquires),
39717 + atomic_read_unchecked(&fscache_n_acquires_null),
39718 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
39719 + atomic_read_unchecked(&fscache_n_acquires_ok),
39720 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
39721 + atomic_read_unchecked(&fscache_n_acquires_oom));
39722
39723 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
39724 - atomic_read(&fscache_n_object_lookups),
39725 - atomic_read(&fscache_n_object_lookups_negative),
39726 - atomic_read(&fscache_n_object_lookups_positive),
39727 - atomic_read(&fscache_n_object_lookups_timed_out),
39728 - atomic_read(&fscache_n_object_created));
39729 + atomic_read_unchecked(&fscache_n_object_lookups),
39730 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
39731 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
39732 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
39733 + atomic_read_unchecked(&fscache_n_object_created));
39734
39735 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
39736 - atomic_read(&fscache_n_updates),
39737 - atomic_read(&fscache_n_updates_null),
39738 - atomic_read(&fscache_n_updates_run));
39739 + atomic_read_unchecked(&fscache_n_updates),
39740 + atomic_read_unchecked(&fscache_n_updates_null),
39741 + atomic_read_unchecked(&fscache_n_updates_run));
39742
39743 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
39744 - atomic_read(&fscache_n_relinquishes),
39745 - atomic_read(&fscache_n_relinquishes_null),
39746 - atomic_read(&fscache_n_relinquishes_waitcrt),
39747 - atomic_read(&fscache_n_relinquishes_retire));
39748 + atomic_read_unchecked(&fscache_n_relinquishes),
39749 + atomic_read_unchecked(&fscache_n_relinquishes_null),
39750 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
39751 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
39752
39753 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
39754 - atomic_read(&fscache_n_attr_changed),
39755 - atomic_read(&fscache_n_attr_changed_ok),
39756 - atomic_read(&fscache_n_attr_changed_nobufs),
39757 - atomic_read(&fscache_n_attr_changed_nomem),
39758 - atomic_read(&fscache_n_attr_changed_calls));
39759 + atomic_read_unchecked(&fscache_n_attr_changed),
39760 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
39761 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
39762 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
39763 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
39764
39765 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
39766 - atomic_read(&fscache_n_allocs),
39767 - atomic_read(&fscache_n_allocs_ok),
39768 - atomic_read(&fscache_n_allocs_wait),
39769 - atomic_read(&fscache_n_allocs_nobufs),
39770 - atomic_read(&fscache_n_allocs_intr));
39771 + atomic_read_unchecked(&fscache_n_allocs),
39772 + atomic_read_unchecked(&fscache_n_allocs_ok),
39773 + atomic_read_unchecked(&fscache_n_allocs_wait),
39774 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
39775 + atomic_read_unchecked(&fscache_n_allocs_intr));
39776 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
39777 - atomic_read(&fscache_n_alloc_ops),
39778 - atomic_read(&fscache_n_alloc_op_waits),
39779 - atomic_read(&fscache_n_allocs_object_dead));
39780 + atomic_read_unchecked(&fscache_n_alloc_ops),
39781 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
39782 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
39783
39784 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
39785 " int=%u oom=%u\n",
39786 - atomic_read(&fscache_n_retrievals),
39787 - atomic_read(&fscache_n_retrievals_ok),
39788 - atomic_read(&fscache_n_retrievals_wait),
39789 - atomic_read(&fscache_n_retrievals_nodata),
39790 - atomic_read(&fscache_n_retrievals_nobufs),
39791 - atomic_read(&fscache_n_retrievals_intr),
39792 - atomic_read(&fscache_n_retrievals_nomem));
39793 + atomic_read_unchecked(&fscache_n_retrievals),
39794 + atomic_read_unchecked(&fscache_n_retrievals_ok),
39795 + atomic_read_unchecked(&fscache_n_retrievals_wait),
39796 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
39797 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
39798 + atomic_read_unchecked(&fscache_n_retrievals_intr),
39799 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
39800 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
39801 - atomic_read(&fscache_n_retrieval_ops),
39802 - atomic_read(&fscache_n_retrieval_op_waits),
39803 - atomic_read(&fscache_n_retrievals_object_dead));
39804 + atomic_read_unchecked(&fscache_n_retrieval_ops),
39805 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
39806 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
39807
39808 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
39809 - atomic_read(&fscache_n_stores),
39810 - atomic_read(&fscache_n_stores_ok),
39811 - atomic_read(&fscache_n_stores_again),
39812 - atomic_read(&fscache_n_stores_nobufs),
39813 - atomic_read(&fscache_n_stores_oom));
39814 + atomic_read_unchecked(&fscache_n_stores),
39815 + atomic_read_unchecked(&fscache_n_stores_ok),
39816 + atomic_read_unchecked(&fscache_n_stores_again),
39817 + atomic_read_unchecked(&fscache_n_stores_nobufs),
39818 + atomic_read_unchecked(&fscache_n_stores_oom));
39819 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
39820 - atomic_read(&fscache_n_store_ops),
39821 - atomic_read(&fscache_n_store_calls),
39822 - atomic_read(&fscache_n_store_pages),
39823 - atomic_read(&fscache_n_store_radix_deletes),
39824 - atomic_read(&fscache_n_store_pages_over_limit));
39825 + atomic_read_unchecked(&fscache_n_store_ops),
39826 + atomic_read_unchecked(&fscache_n_store_calls),
39827 + atomic_read_unchecked(&fscache_n_store_pages),
39828 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
39829 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
39830
39831 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
39832 - atomic_read(&fscache_n_store_vmscan_not_storing),
39833 - atomic_read(&fscache_n_store_vmscan_gone),
39834 - atomic_read(&fscache_n_store_vmscan_busy),
39835 - atomic_read(&fscache_n_store_vmscan_cancelled));
39836 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
39837 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
39838 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
39839 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
39840
39841 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
39842 - atomic_read(&fscache_n_op_pend),
39843 - atomic_read(&fscache_n_op_run),
39844 - atomic_read(&fscache_n_op_enqueue),
39845 - atomic_read(&fscache_n_op_cancelled),
39846 - atomic_read(&fscache_n_op_rejected));
39847 + atomic_read_unchecked(&fscache_n_op_pend),
39848 + atomic_read_unchecked(&fscache_n_op_run),
39849 + atomic_read_unchecked(&fscache_n_op_enqueue),
39850 + atomic_read_unchecked(&fscache_n_op_cancelled),
39851 + atomic_read_unchecked(&fscache_n_op_rejected));
39852 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
39853 - atomic_read(&fscache_n_op_deferred_release),
39854 - atomic_read(&fscache_n_op_release),
39855 - atomic_read(&fscache_n_op_gc));
39856 + atomic_read_unchecked(&fscache_n_op_deferred_release),
39857 + atomic_read_unchecked(&fscache_n_op_release),
39858 + atomic_read_unchecked(&fscache_n_op_gc));
39859
39860 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
39861 atomic_read(&fscache_n_cop_alloc_object),
39862 diff -urNp linux-2.6.32.41/fs/fs_struct.c linux-2.6.32.41/fs/fs_struct.c
39863 --- linux-2.6.32.41/fs/fs_struct.c 2011-03-27 14:31:47.000000000 -0400
39864 +++ linux-2.6.32.41/fs/fs_struct.c 2011-04-17 15:56:46.000000000 -0400
39865 @@ -4,6 +4,7 @@
39866 #include <linux/path.h>
39867 #include <linux/slab.h>
39868 #include <linux/fs_struct.h>
39869 +#include <linux/grsecurity.h>
39870
39871 /*
39872 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
39873 @@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, s
39874 old_root = fs->root;
39875 fs->root = *path;
39876 path_get(path);
39877 + gr_set_chroot_entries(current, path);
39878 write_unlock(&fs->lock);
39879 if (old_root.dentry)
39880 path_put(&old_root);
39881 @@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_roo
39882 && fs->root.mnt == old_root->mnt) {
39883 path_get(new_root);
39884 fs->root = *new_root;
39885 + gr_set_chroot_entries(p, new_root);
39886 count++;
39887 }
39888 if (fs->pwd.dentry == old_root->dentry
39889 @@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
39890 task_lock(tsk);
39891 write_lock(&fs->lock);
39892 tsk->fs = NULL;
39893 - kill = !--fs->users;
39894 + gr_clear_chroot_entries(tsk);
39895 + kill = !atomic_dec_return(&fs->users);
39896 write_unlock(&fs->lock);
39897 task_unlock(tsk);
39898 if (kill)
39899 @@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct
39900 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
39901 /* We don't need to lock fs - think why ;-) */
39902 if (fs) {
39903 - fs->users = 1;
39904 + atomic_set(&fs->users, 1);
39905 fs->in_exec = 0;
39906 rwlock_init(&fs->lock);
39907 fs->umask = old->umask;
39908 @@ -127,8 +131,9 @@ int unshare_fs_struct(void)
39909
39910 task_lock(current);
39911 write_lock(&fs->lock);
39912 - kill = !--fs->users;
39913 + kill = !atomic_dec_return(&fs->users);
39914 current->fs = new_fs;
39915 + gr_set_chroot_entries(current, &new_fs->root);
39916 write_unlock(&fs->lock);
39917 task_unlock(current);
39918
39919 @@ -147,7 +152,7 @@ EXPORT_SYMBOL(current_umask);
39920
39921 /* to be mentioned only in INIT_TASK */
39922 struct fs_struct init_fs = {
39923 - .users = 1,
39924 + .users = ATOMIC_INIT(1),
39925 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
39926 .umask = 0022,
39927 };
39928 @@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
39929 task_lock(current);
39930
39931 write_lock(&init_fs.lock);
39932 - init_fs.users++;
39933 + atomic_inc(&init_fs.users);
39934 write_unlock(&init_fs.lock);
39935
39936 write_lock(&fs->lock);
39937 current->fs = &init_fs;
39938 - kill = !--fs->users;
39939 + gr_set_chroot_entries(current, &current->fs->root);
39940 + kill = !atomic_dec_return(&fs->users);
39941 write_unlock(&fs->lock);
39942
39943 task_unlock(current);
39944 diff -urNp linux-2.6.32.41/fs/fuse/cuse.c linux-2.6.32.41/fs/fuse/cuse.c
39945 --- linux-2.6.32.41/fs/fuse/cuse.c 2011-03-27 14:31:47.000000000 -0400
39946 +++ linux-2.6.32.41/fs/fuse/cuse.c 2011-04-17 15:56:46.000000000 -0400
39947 @@ -528,8 +528,18 @@ static int cuse_channel_release(struct i
39948 return rc;
39949 }
39950
39951 -static struct file_operations cuse_channel_fops; /* initialized during init */
39952 -
39953 +static const struct file_operations cuse_channel_fops = { /* initialized during init */
39954 + .owner = THIS_MODULE,
39955 + .llseek = no_llseek,
39956 + .read = do_sync_read,
39957 + .aio_read = fuse_dev_read,
39958 + .write = do_sync_write,
39959 + .aio_write = fuse_dev_write,
39960 + .poll = fuse_dev_poll,
39961 + .open = cuse_channel_open,
39962 + .release = cuse_channel_release,
39963 + .fasync = fuse_dev_fasync,
39964 +};
39965
39966 /**************************************************************************
39967 * Misc stuff and module initializatiion
39968 @@ -575,12 +585,6 @@ static int __init cuse_init(void)
39969 for (i = 0; i < CUSE_CONNTBL_LEN; i++)
39970 INIT_LIST_HEAD(&cuse_conntbl[i]);
39971
39972 - /* inherit and extend fuse_dev_operations */
39973 - cuse_channel_fops = fuse_dev_operations;
39974 - cuse_channel_fops.owner = THIS_MODULE;
39975 - cuse_channel_fops.open = cuse_channel_open;
39976 - cuse_channel_fops.release = cuse_channel_release;
39977 -
39978 cuse_class = class_create(THIS_MODULE, "cuse");
39979 if (IS_ERR(cuse_class))
39980 return PTR_ERR(cuse_class);
39981 diff -urNp linux-2.6.32.41/fs/fuse/dev.c linux-2.6.32.41/fs/fuse/dev.c
39982 --- linux-2.6.32.41/fs/fuse/dev.c 2011-03-27 14:31:47.000000000 -0400
39983 +++ linux-2.6.32.41/fs/fuse/dev.c 2011-04-17 15:56:46.000000000 -0400
39984 @@ -745,7 +745,7 @@ __releases(&fc->lock)
39985 * request_end(). Otherwise add it to the processing list, and set
39986 * the 'sent' flag.
39987 */
39988 -static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
39989 +ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
39990 unsigned long nr_segs, loff_t pos)
39991 {
39992 int err;
39993 @@ -827,6 +827,7 @@ static ssize_t fuse_dev_read(struct kioc
39994 spin_unlock(&fc->lock);
39995 return err;
39996 }
39997 +EXPORT_SYMBOL_GPL(fuse_dev_read);
39998
39999 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
40000 struct fuse_copy_state *cs)
40001 @@ -885,7 +886,7 @@ static int fuse_notify_inval_entry(struc
40002 {
40003 struct fuse_notify_inval_entry_out outarg;
40004 int err = -EINVAL;
40005 - char buf[FUSE_NAME_MAX+1];
40006 + char *buf = NULL;
40007 struct qstr name;
40008
40009 if (size < sizeof(outarg))
40010 @@ -899,6 +900,11 @@ static int fuse_notify_inval_entry(struc
40011 if (outarg.namelen > FUSE_NAME_MAX)
40012 goto err;
40013
40014 + err = -ENOMEM;
40015 + buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
40016 + if (!buf)
40017 + goto err;
40018 +
40019 name.name = buf;
40020 name.len = outarg.namelen;
40021 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
40022 @@ -910,17 +916,15 @@ static int fuse_notify_inval_entry(struc
40023
40024 down_read(&fc->killsb);
40025 err = -ENOENT;
40026 - if (!fc->sb)
40027 - goto err_unlock;
40028 -
40029 - err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
40030 -
40031 -err_unlock:
40032 + if (fc->sb)
40033 + err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
40034 up_read(&fc->killsb);
40035 + kfree(buf);
40036 return err;
40037
40038 err:
40039 fuse_copy_finish(cs);
40040 + kfree(buf);
40041 return err;
40042 }
40043
40044 @@ -987,7 +991,7 @@ static int copy_out_args(struct fuse_cop
40045 * it from the list and copy the rest of the buffer to the request.
40046 * The request is finished by calling request_end()
40047 */
40048 -static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
40049 +ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
40050 unsigned long nr_segs, loff_t pos)
40051 {
40052 int err;
40053 @@ -1083,8 +1087,9 @@ static ssize_t fuse_dev_write(struct kio
40054 fuse_copy_finish(&cs);
40055 return err;
40056 }
40057 +EXPORT_SYMBOL_GPL(fuse_dev_write);
40058
40059 -static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
40060 +unsigned fuse_dev_poll(struct file *file, poll_table *wait)
40061 {
40062 unsigned mask = POLLOUT | POLLWRNORM;
40063 struct fuse_conn *fc = fuse_get_conn(file);
40064 @@ -1102,6 +1107,7 @@ static unsigned fuse_dev_poll(struct fil
40065
40066 return mask;
40067 }
40068 +EXPORT_SYMBOL_GPL(fuse_dev_poll);
40069
40070 /*
40071 * Abort all requests on the given list (pending or processing)
40072 @@ -1218,7 +1224,7 @@ int fuse_dev_release(struct inode *inode
40073 }
40074 EXPORT_SYMBOL_GPL(fuse_dev_release);
40075
40076 -static int fuse_dev_fasync(int fd, struct file *file, int on)
40077 +int fuse_dev_fasync(int fd, struct file *file, int on)
40078 {
40079 struct fuse_conn *fc = fuse_get_conn(file);
40080 if (!fc)
40081 @@ -1227,6 +1233,7 @@ static int fuse_dev_fasync(int fd, struc
40082 /* No locking - fasync_helper does its own locking */
40083 return fasync_helper(fd, file, on, &fc->fasync);
40084 }
40085 +EXPORT_SYMBOL_GPL(fuse_dev_fasync);
40086
40087 const struct file_operations fuse_dev_operations = {
40088 .owner = THIS_MODULE,
40089 diff -urNp linux-2.6.32.41/fs/fuse/dir.c linux-2.6.32.41/fs/fuse/dir.c
40090 --- linux-2.6.32.41/fs/fuse/dir.c 2011-03-27 14:31:47.000000000 -0400
40091 +++ linux-2.6.32.41/fs/fuse/dir.c 2011-04-17 15:56:46.000000000 -0400
40092 @@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *de
40093 return link;
40094 }
40095
40096 -static void free_link(char *link)
40097 +static void free_link(const char *link)
40098 {
40099 if (!IS_ERR(link))
40100 free_page((unsigned long) link);
40101 diff -urNp linux-2.6.32.41/fs/fuse/fuse_i.h linux-2.6.32.41/fs/fuse/fuse_i.h
40102 --- linux-2.6.32.41/fs/fuse/fuse_i.h 2011-03-27 14:31:47.000000000 -0400
40103 +++ linux-2.6.32.41/fs/fuse/fuse_i.h 2011-04-17 15:56:46.000000000 -0400
40104 @@ -525,6 +525,16 @@ extern const struct file_operations fuse
40105
40106 extern const struct dentry_operations fuse_dentry_operations;
40107
40108 +extern ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
40109 + unsigned long nr_segs, loff_t pos);
40110 +
40111 +extern ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
40112 + unsigned long nr_segs, loff_t pos);
40113 +
40114 +extern unsigned fuse_dev_poll(struct file *file, poll_table *wait);
40115 +
40116 +extern int fuse_dev_fasync(int fd, struct file *file, int on);
40117 +
40118 /**
40119 * Inode to nodeid comparison.
40120 */
40121 diff -urNp linux-2.6.32.41/fs/gfs2/ops_inode.c linux-2.6.32.41/fs/gfs2/ops_inode.c
40122 --- linux-2.6.32.41/fs/gfs2/ops_inode.c 2011-03-27 14:31:47.000000000 -0400
40123 +++ linux-2.6.32.41/fs/gfs2/ops_inode.c 2011-05-16 21:46:57.000000000 -0400
40124 @@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odi
40125 unsigned int x;
40126 int error;
40127
40128 + pax_track_stack();
40129 +
40130 if (ndentry->d_inode) {
40131 nip = GFS2_I(ndentry->d_inode);
40132 if (ip == nip)
40133 diff -urNp linux-2.6.32.41/fs/gfs2/sys.c linux-2.6.32.41/fs/gfs2/sys.c
40134 --- linux-2.6.32.41/fs/gfs2/sys.c 2011-03-27 14:31:47.000000000 -0400
40135 +++ linux-2.6.32.41/fs/gfs2/sys.c 2011-04-17 15:56:46.000000000 -0400
40136 @@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct ko
40137 return a->store ? a->store(sdp, buf, len) : len;
40138 }
40139
40140 -static struct sysfs_ops gfs2_attr_ops = {
40141 +static const struct sysfs_ops gfs2_attr_ops = {
40142 .show = gfs2_attr_show,
40143 .store = gfs2_attr_store,
40144 };
40145 @@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset
40146 return 0;
40147 }
40148
40149 -static struct kset_uevent_ops gfs2_uevent_ops = {
40150 +static const struct kset_uevent_ops gfs2_uevent_ops = {
40151 .uevent = gfs2_uevent,
40152 };
40153
40154 diff -urNp linux-2.6.32.41/fs/hfsplus/catalog.c linux-2.6.32.41/fs/hfsplus/catalog.c
40155 --- linux-2.6.32.41/fs/hfsplus/catalog.c 2011-03-27 14:31:47.000000000 -0400
40156 +++ linux-2.6.32.41/fs/hfsplus/catalog.c 2011-05-16 21:46:57.000000000 -0400
40157 @@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block
40158 int err;
40159 u16 type;
40160
40161 + pax_track_stack();
40162 +
40163 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
40164 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
40165 if (err)
40166 @@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct
40167 int entry_size;
40168 int err;
40169
40170 + pax_track_stack();
40171 +
40172 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
40173 sb = dir->i_sb;
40174 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
40175 @@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
40176 int entry_size, type;
40177 int err = 0;
40178
40179 + pax_track_stack();
40180 +
40181 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
40182 dst_dir->i_ino, dst_name->name);
40183 sb = src_dir->i_sb;
40184 diff -urNp linux-2.6.32.41/fs/hfsplus/dir.c linux-2.6.32.41/fs/hfsplus/dir.c
40185 --- linux-2.6.32.41/fs/hfsplus/dir.c 2011-03-27 14:31:47.000000000 -0400
40186 +++ linux-2.6.32.41/fs/hfsplus/dir.c 2011-05-16 21:46:57.000000000 -0400
40187 @@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *
40188 struct hfsplus_readdir_data *rd;
40189 u16 type;
40190
40191 + pax_track_stack();
40192 +
40193 if (filp->f_pos >= inode->i_size)
40194 return 0;
40195
40196 diff -urNp linux-2.6.32.41/fs/hfsplus/inode.c linux-2.6.32.41/fs/hfsplus/inode.c
40197 --- linux-2.6.32.41/fs/hfsplus/inode.c 2011-03-27 14:31:47.000000000 -0400
40198 +++ linux-2.6.32.41/fs/hfsplus/inode.c 2011-05-16 21:46:57.000000000 -0400
40199 @@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode
40200 int res = 0;
40201 u16 type;
40202
40203 + pax_track_stack();
40204 +
40205 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
40206
40207 HFSPLUS_I(inode).dev = 0;
40208 @@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode
40209 struct hfs_find_data fd;
40210 hfsplus_cat_entry entry;
40211
40212 + pax_track_stack();
40213 +
40214 if (HFSPLUS_IS_RSRC(inode))
40215 main_inode = HFSPLUS_I(inode).rsrc_inode;
40216
40217 diff -urNp linux-2.6.32.41/fs/hfsplus/ioctl.c linux-2.6.32.41/fs/hfsplus/ioctl.c
40218 --- linux-2.6.32.41/fs/hfsplus/ioctl.c 2011-03-27 14:31:47.000000000 -0400
40219 +++ linux-2.6.32.41/fs/hfsplus/ioctl.c 2011-05-16 21:46:57.000000000 -0400
40220 @@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dent
40221 struct hfsplus_cat_file *file;
40222 int res;
40223
40224 + pax_track_stack();
40225 +
40226 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
40227 return -EOPNOTSUPP;
40228
40229 @@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *
40230 struct hfsplus_cat_file *file;
40231 ssize_t res = 0;
40232
40233 + pax_track_stack();
40234 +
40235 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
40236 return -EOPNOTSUPP;
40237
40238 diff -urNp linux-2.6.32.41/fs/hfsplus/super.c linux-2.6.32.41/fs/hfsplus/super.c
40239 --- linux-2.6.32.41/fs/hfsplus/super.c 2011-03-27 14:31:47.000000000 -0400
40240 +++ linux-2.6.32.41/fs/hfsplus/super.c 2011-05-16 21:46:57.000000000 -0400
40241 @@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct sup
40242 struct nls_table *nls = NULL;
40243 int err = -EINVAL;
40244
40245 + pax_track_stack();
40246 +
40247 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
40248 if (!sbi)
40249 return -ENOMEM;
40250 diff -urNp linux-2.6.32.41/fs/hugetlbfs/inode.c linux-2.6.32.41/fs/hugetlbfs/inode.c
40251 --- linux-2.6.32.41/fs/hugetlbfs/inode.c 2011-03-27 14:31:47.000000000 -0400
40252 +++ linux-2.6.32.41/fs/hugetlbfs/inode.c 2011-04-17 15:56:46.000000000 -0400
40253 @@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs
40254 .kill_sb = kill_litter_super,
40255 };
40256
40257 -static struct vfsmount *hugetlbfs_vfsmount;
40258 +struct vfsmount *hugetlbfs_vfsmount;
40259
40260 static int can_do_hugetlb_shm(void)
40261 {
40262 diff -urNp linux-2.6.32.41/fs/ioctl.c linux-2.6.32.41/fs/ioctl.c
40263 --- linux-2.6.32.41/fs/ioctl.c 2011-03-27 14:31:47.000000000 -0400
40264 +++ linux-2.6.32.41/fs/ioctl.c 2011-04-17 15:56:46.000000000 -0400
40265 @@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiema
40266 u64 phys, u64 len, u32 flags)
40267 {
40268 struct fiemap_extent extent;
40269 - struct fiemap_extent *dest = fieinfo->fi_extents_start;
40270 + struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
40271
40272 /* only count the extents */
40273 if (fieinfo->fi_extents_max == 0) {
40274 @@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *fil
40275
40276 fieinfo.fi_flags = fiemap.fm_flags;
40277 fieinfo.fi_extents_max = fiemap.fm_extent_count;
40278 - fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
40279 + fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
40280
40281 if (fiemap.fm_extent_count != 0 &&
40282 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
40283 @@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *fil
40284 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
40285 fiemap.fm_flags = fieinfo.fi_flags;
40286 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
40287 - if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
40288 + if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
40289 error = -EFAULT;
40290
40291 return error;
40292 diff -urNp linux-2.6.32.41/fs/jbd/checkpoint.c linux-2.6.32.41/fs/jbd/checkpoint.c
40293 --- linux-2.6.32.41/fs/jbd/checkpoint.c 2011-03-27 14:31:47.000000000 -0400
40294 +++ linux-2.6.32.41/fs/jbd/checkpoint.c 2011-05-16 21:46:57.000000000 -0400
40295 @@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal
40296 tid_t this_tid;
40297 int result;
40298
40299 + pax_track_stack();
40300 +
40301 jbd_debug(1, "Start checkpoint\n");
40302
40303 /*
40304 diff -urNp linux-2.6.32.41/fs/jffs2/compr_rtime.c linux-2.6.32.41/fs/jffs2/compr_rtime.c
40305 --- linux-2.6.32.41/fs/jffs2/compr_rtime.c 2011-03-27 14:31:47.000000000 -0400
40306 +++ linux-2.6.32.41/fs/jffs2/compr_rtime.c 2011-05-16 21:46:57.000000000 -0400
40307 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
40308 int outpos = 0;
40309 int pos=0;
40310
40311 + pax_track_stack();
40312 +
40313 memset(positions,0,sizeof(positions));
40314
40315 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
40316 @@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsign
40317 int outpos = 0;
40318 int pos=0;
40319
40320 + pax_track_stack();
40321 +
40322 memset(positions,0,sizeof(positions));
40323
40324 while (outpos<destlen) {
40325 diff -urNp linux-2.6.32.41/fs/jffs2/compr_rubin.c linux-2.6.32.41/fs/jffs2/compr_rubin.c
40326 --- linux-2.6.32.41/fs/jffs2/compr_rubin.c 2011-03-27 14:31:47.000000000 -0400
40327 +++ linux-2.6.32.41/fs/jffs2/compr_rubin.c 2011-05-16 21:46:57.000000000 -0400
40328 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
40329 int ret;
40330 uint32_t mysrclen, mydstlen;
40331
40332 + pax_track_stack();
40333 +
40334 mysrclen = *sourcelen;
40335 mydstlen = *dstlen - 8;
40336
40337 diff -urNp linux-2.6.32.41/fs/jffs2/erase.c linux-2.6.32.41/fs/jffs2/erase.c
40338 --- linux-2.6.32.41/fs/jffs2/erase.c 2011-03-27 14:31:47.000000000 -0400
40339 +++ linux-2.6.32.41/fs/jffs2/erase.c 2011-04-17 15:56:46.000000000 -0400
40340 @@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(stru
40341 struct jffs2_unknown_node marker = {
40342 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
40343 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
40344 - .totlen = cpu_to_je32(c->cleanmarker_size)
40345 + .totlen = cpu_to_je32(c->cleanmarker_size),
40346 + .hdr_crc = cpu_to_je32(0)
40347 };
40348
40349 jffs2_prealloc_raw_node_refs(c, jeb, 1);
40350 diff -urNp linux-2.6.32.41/fs/jffs2/wbuf.c linux-2.6.32.41/fs/jffs2/wbuf.c
40351 --- linux-2.6.32.41/fs/jffs2/wbuf.c 2011-03-27 14:31:47.000000000 -0400
40352 +++ linux-2.6.32.41/fs/jffs2/wbuf.c 2011-04-17 15:56:46.000000000 -0400
40353 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
40354 {
40355 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
40356 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
40357 - .totlen = constant_cpu_to_je32(8)
40358 + .totlen = constant_cpu_to_je32(8),
40359 + .hdr_crc = constant_cpu_to_je32(0)
40360 };
40361
40362 /*
40363 diff -urNp linux-2.6.32.41/fs/jffs2/xattr.c linux-2.6.32.41/fs/jffs2/xattr.c
40364 --- linux-2.6.32.41/fs/jffs2/xattr.c 2011-03-27 14:31:47.000000000 -0400
40365 +++ linux-2.6.32.41/fs/jffs2/xattr.c 2011-05-16 21:46:57.000000000 -0400
40366 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
40367
40368 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
40369
40370 + pax_track_stack();
40371 +
40372 /* Phase.1 : Merge same xref */
40373 for (i=0; i < XREF_TMPHASH_SIZE; i++)
40374 xref_tmphash[i] = NULL;
40375 diff -urNp linux-2.6.32.41/fs/jfs/super.c linux-2.6.32.41/fs/jfs/super.c
40376 --- linux-2.6.32.41/fs/jfs/super.c 2011-03-27 14:31:47.000000000 -0400
40377 +++ linux-2.6.32.41/fs/jfs/super.c 2011-06-07 18:06:04.000000000 -0400
40378 @@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
40379
40380 jfs_inode_cachep =
40381 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
40382 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
40383 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
40384 init_once);
40385 if (jfs_inode_cachep == NULL)
40386 return -ENOMEM;
40387 diff -urNp linux-2.6.32.41/fs/Kconfig.binfmt linux-2.6.32.41/fs/Kconfig.binfmt
40388 --- linux-2.6.32.41/fs/Kconfig.binfmt 2011-03-27 14:31:47.000000000 -0400
40389 +++ linux-2.6.32.41/fs/Kconfig.binfmt 2011-04-17 15:56:46.000000000 -0400
40390 @@ -86,7 +86,7 @@ config HAVE_AOUT
40391
40392 config BINFMT_AOUT
40393 tristate "Kernel support for a.out and ECOFF binaries"
40394 - depends on HAVE_AOUT
40395 + depends on HAVE_AOUT && BROKEN
40396 ---help---
40397 A.out (Assembler.OUTput) is a set of formats for libraries and
40398 executables used in the earliest versions of UNIX. Linux used
40399 diff -urNp linux-2.6.32.41/fs/libfs.c linux-2.6.32.41/fs/libfs.c
40400 --- linux-2.6.32.41/fs/libfs.c 2011-03-27 14:31:47.000000000 -0400
40401 +++ linux-2.6.32.41/fs/libfs.c 2011-05-11 18:25:15.000000000 -0400
40402 @@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, v
40403
40404 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
40405 struct dentry *next;
40406 + char d_name[sizeof(next->d_iname)];
40407 + const unsigned char *name;
40408 +
40409 next = list_entry(p, struct dentry, d_u.d_child);
40410 if (d_unhashed(next) || !next->d_inode)
40411 continue;
40412
40413 spin_unlock(&dcache_lock);
40414 - if (filldir(dirent, next->d_name.name,
40415 + name = next->d_name.name;
40416 + if (name == next->d_iname) {
40417 + memcpy(d_name, name, next->d_name.len);
40418 + name = d_name;
40419 + }
40420 + if (filldir(dirent, name,
40421 next->d_name.len, filp->f_pos,
40422 next->d_inode->i_ino,
40423 dt_type(next->d_inode)) < 0)
40424 diff -urNp linux-2.6.32.41/fs/lockd/clntproc.c linux-2.6.32.41/fs/lockd/clntproc.c
40425 --- linux-2.6.32.41/fs/lockd/clntproc.c 2011-03-27 14:31:47.000000000 -0400
40426 +++ linux-2.6.32.41/fs/lockd/clntproc.c 2011-05-16 21:46:57.000000000 -0400
40427 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
40428 /*
40429 * Cookie counter for NLM requests
40430 */
40431 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
40432 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
40433
40434 void nlmclnt_next_cookie(struct nlm_cookie *c)
40435 {
40436 - u32 cookie = atomic_inc_return(&nlm_cookie);
40437 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
40438
40439 memcpy(c->data, &cookie, 4);
40440 c->len=4;
40441 @@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
40442 struct nlm_rqst reqst, *req;
40443 int status;
40444
40445 + pax_track_stack();
40446 +
40447 req = &reqst;
40448 memset(req, 0, sizeof(*req));
40449 locks_init_lock(&req->a_args.lock.fl);
40450 diff -urNp linux-2.6.32.41/fs/lockd/svc.c linux-2.6.32.41/fs/lockd/svc.c
40451 --- linux-2.6.32.41/fs/lockd/svc.c 2011-03-27 14:31:47.000000000 -0400
40452 +++ linux-2.6.32.41/fs/lockd/svc.c 2011-04-17 15:56:46.000000000 -0400
40453 @@ -43,7 +43,7 @@
40454
40455 static struct svc_program nlmsvc_program;
40456
40457 -struct nlmsvc_binding * nlmsvc_ops;
40458 +const struct nlmsvc_binding * nlmsvc_ops;
40459 EXPORT_SYMBOL_GPL(nlmsvc_ops);
40460
40461 static DEFINE_MUTEX(nlmsvc_mutex);
40462 diff -urNp linux-2.6.32.41/fs/locks.c linux-2.6.32.41/fs/locks.c
40463 --- linux-2.6.32.41/fs/locks.c 2011-03-27 14:31:47.000000000 -0400
40464 +++ linux-2.6.32.41/fs/locks.c 2011-04-17 15:56:46.000000000 -0400
40465 @@ -2007,16 +2007,16 @@ void locks_remove_flock(struct file *fil
40466 return;
40467
40468 if (filp->f_op && filp->f_op->flock) {
40469 - struct file_lock fl = {
40470 + struct file_lock flock = {
40471 .fl_pid = current->tgid,
40472 .fl_file = filp,
40473 .fl_flags = FL_FLOCK,
40474 .fl_type = F_UNLCK,
40475 .fl_end = OFFSET_MAX,
40476 };
40477 - filp->f_op->flock(filp, F_SETLKW, &fl);
40478 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
40479 - fl.fl_ops->fl_release_private(&fl);
40480 + filp->f_op->flock(filp, F_SETLKW, &flock);
40481 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
40482 + flock.fl_ops->fl_release_private(&flock);
40483 }
40484
40485 lock_kernel();
40486 diff -urNp linux-2.6.32.41/fs/namei.c linux-2.6.32.41/fs/namei.c
40487 --- linux-2.6.32.41/fs/namei.c 2011-03-27 14:31:47.000000000 -0400
40488 +++ linux-2.6.32.41/fs/namei.c 2011-05-16 21:46:57.000000000 -0400
40489 @@ -224,14 +224,6 @@ int generic_permission(struct inode *ino
40490 return ret;
40491
40492 /*
40493 - * Read/write DACs are always overridable.
40494 - * Executable DACs are overridable if at least one exec bit is set.
40495 - */
40496 - if (!(mask & MAY_EXEC) || execute_ok(inode))
40497 - if (capable(CAP_DAC_OVERRIDE))
40498 - return 0;
40499 -
40500 - /*
40501 * Searching includes executable on directories, else just read.
40502 */
40503 mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
40504 @@ -239,6 +231,14 @@ int generic_permission(struct inode *ino
40505 if (capable(CAP_DAC_READ_SEARCH))
40506 return 0;
40507
40508 + /*
40509 + * Read/write DACs are always overridable.
40510 + * Executable DACs are overridable if at least one exec bit is set.
40511 + */
40512 + if (!(mask & MAY_EXEC) || execute_ok(inode))
40513 + if (capable(CAP_DAC_OVERRIDE))
40514 + return 0;
40515 +
40516 return -EACCES;
40517 }
40518
40519 @@ -458,7 +458,8 @@ static int exec_permission_lite(struct i
40520 if (!ret)
40521 goto ok;
40522
40523 - if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
40524 + if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
40525 + capable(CAP_DAC_OVERRIDE))
40526 goto ok;
40527
40528 return ret;
40529 @@ -638,7 +639,7 @@ static __always_inline int __do_follow_l
40530 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
40531 error = PTR_ERR(cookie);
40532 if (!IS_ERR(cookie)) {
40533 - char *s = nd_get_link(nd);
40534 + const char *s = nd_get_link(nd);
40535 error = 0;
40536 if (s)
40537 error = __vfs_follow_link(nd, s);
40538 @@ -669,6 +670,13 @@ static inline int do_follow_link(struct
40539 err = security_inode_follow_link(path->dentry, nd);
40540 if (err)
40541 goto loop;
40542 +
40543 + if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
40544 + path->dentry->d_inode, path->dentry, nd->path.mnt)) {
40545 + err = -EACCES;
40546 + goto loop;
40547 + }
40548 +
40549 current->link_count++;
40550 current->total_link_count++;
40551 nd->depth++;
40552 @@ -1016,11 +1024,18 @@ return_reval:
40553 break;
40554 }
40555 return_base:
40556 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
40557 + path_put(&nd->path);
40558 + return -ENOENT;
40559 + }
40560 return 0;
40561 out_dput:
40562 path_put_conditional(&next, nd);
40563 break;
40564 }
40565 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
40566 + err = -ENOENT;
40567 +
40568 path_put(&nd->path);
40569 return_err:
40570 return err;
40571 @@ -1091,13 +1106,20 @@ static int do_path_lookup(int dfd, const
40572 int retval = path_init(dfd, name, flags, nd);
40573 if (!retval)
40574 retval = path_walk(name, nd);
40575 - if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
40576 - nd->path.dentry->d_inode))
40577 - audit_inode(name, nd->path.dentry);
40578 +
40579 + if (likely(!retval)) {
40580 + if (nd->path.dentry && nd->path.dentry->d_inode) {
40581 + if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
40582 + retval = -ENOENT;
40583 + if (!audit_dummy_context())
40584 + audit_inode(name, nd->path.dentry);
40585 + }
40586 + }
40587 if (nd->root.mnt) {
40588 path_put(&nd->root);
40589 nd->root.mnt = NULL;
40590 }
40591 +
40592 return retval;
40593 }
40594
40595 @@ -1576,6 +1598,20 @@ int may_open(struct path *path, int acc_
40596 if (error)
40597 goto err_out;
40598
40599 +
40600 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
40601 + error = -EPERM;
40602 + goto err_out;
40603 + }
40604 + if (gr_handle_rawio(inode)) {
40605 + error = -EPERM;
40606 + goto err_out;
40607 + }
40608 + if (!gr_acl_handle_open(dentry, path->mnt, flag)) {
40609 + error = -EACCES;
40610 + goto err_out;
40611 + }
40612 +
40613 if (flag & O_TRUNC) {
40614 error = get_write_access(inode);
40615 if (error)
40616 @@ -1621,12 +1657,19 @@ static int __open_namei_create(struct na
40617 int error;
40618 struct dentry *dir = nd->path.dentry;
40619
40620 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, nd->path.mnt, flag, mode)) {
40621 + error = -EACCES;
40622 + goto out_unlock;
40623 + }
40624 +
40625 if (!IS_POSIXACL(dir->d_inode))
40626 mode &= ~current_umask();
40627 error = security_path_mknod(&nd->path, path->dentry, mode, 0);
40628 if (error)
40629 goto out_unlock;
40630 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
40631 + if (!error)
40632 + gr_handle_create(path->dentry, nd->path.mnt);
40633 out_unlock:
40634 mutex_unlock(&dir->d_inode->i_mutex);
40635 dput(nd->path.dentry);
40636 @@ -1709,6 +1752,22 @@ struct file *do_filp_open(int dfd, const
40637 &nd, flag);
40638 if (error)
40639 return ERR_PTR(error);
40640 +
40641 + if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
40642 + error = -EPERM;
40643 + goto exit;
40644 + }
40645 +
40646 + if (gr_handle_rawio(nd.path.dentry->d_inode)) {
40647 + error = -EPERM;
40648 + goto exit;
40649 + }
40650 +
40651 + if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, flag)) {
40652 + error = -EACCES;
40653 + goto exit;
40654 + }
40655 +
40656 goto ok;
40657 }
40658
40659 @@ -1795,6 +1854,14 @@ do_last:
40660 /*
40661 * It already exists.
40662 */
40663 +
40664 + /* only check if O_CREAT is specified, all other checks need
40665 + to go into may_open */
40666 + if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
40667 + error = -EACCES;
40668 + goto exit_mutex_unlock;
40669 + }
40670 +
40671 mutex_unlock(&dir->d_inode->i_mutex);
40672 audit_inode(pathname, path.dentry);
40673
40674 @@ -1887,6 +1954,13 @@ do_link:
40675 error = security_inode_follow_link(path.dentry, &nd);
40676 if (error)
40677 goto exit_dput;
40678 +
40679 + if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
40680 + path.dentry, nd.path.mnt)) {
40681 + error = -EACCES;
40682 + goto exit_dput;
40683 + }
40684 +
40685 error = __do_follow_link(&path, &nd);
40686 if (error) {
40687 /* Does someone understand code flow here? Or it is only
40688 @@ -2061,6 +2135,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
40689 error = may_mknod(mode);
40690 if (error)
40691 goto out_dput;
40692 +
40693 + if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
40694 + error = -EPERM;
40695 + goto out_dput;
40696 + }
40697 +
40698 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
40699 + error = -EACCES;
40700 + goto out_dput;
40701 + }
40702 +
40703 error = mnt_want_write(nd.path.mnt);
40704 if (error)
40705 goto out_dput;
40706 @@ -2081,6 +2166,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
40707 }
40708 out_drop_write:
40709 mnt_drop_write(nd.path.mnt);
40710 +
40711 + if (!error)
40712 + gr_handle_create(dentry, nd.path.mnt);
40713 out_dput:
40714 dput(dentry);
40715 out_unlock:
40716 @@ -2134,6 +2222,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
40717 if (IS_ERR(dentry))
40718 goto out_unlock;
40719
40720 + if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
40721 + error = -EACCES;
40722 + goto out_dput;
40723 + }
40724 +
40725 if (!IS_POSIXACL(nd.path.dentry->d_inode))
40726 mode &= ~current_umask();
40727 error = mnt_want_write(nd.path.mnt);
40728 @@ -2145,6 +2238,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
40729 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
40730 out_drop_write:
40731 mnt_drop_write(nd.path.mnt);
40732 +
40733 + if (!error)
40734 + gr_handle_create(dentry, nd.path.mnt);
40735 +
40736 out_dput:
40737 dput(dentry);
40738 out_unlock:
40739 @@ -2226,6 +2323,8 @@ static long do_rmdir(int dfd, const char
40740 char * name;
40741 struct dentry *dentry;
40742 struct nameidata nd;
40743 + ino_t saved_ino = 0;
40744 + dev_t saved_dev = 0;
40745
40746 error = user_path_parent(dfd, pathname, &nd, &name);
40747 if (error)
40748 @@ -2250,6 +2349,19 @@ static long do_rmdir(int dfd, const char
40749 error = PTR_ERR(dentry);
40750 if (IS_ERR(dentry))
40751 goto exit2;
40752 +
40753 + if (dentry->d_inode != NULL) {
40754 + if (dentry->d_inode->i_nlink <= 1) {
40755 + saved_ino = dentry->d_inode->i_ino;
40756 + saved_dev = gr_get_dev_from_dentry(dentry);
40757 + }
40758 +
40759 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
40760 + error = -EACCES;
40761 + goto exit3;
40762 + }
40763 + }
40764 +
40765 error = mnt_want_write(nd.path.mnt);
40766 if (error)
40767 goto exit3;
40768 @@ -2257,6 +2369,8 @@ static long do_rmdir(int dfd, const char
40769 if (error)
40770 goto exit4;
40771 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
40772 + if (!error && (saved_dev || saved_ino))
40773 + gr_handle_delete(saved_ino, saved_dev);
40774 exit4:
40775 mnt_drop_write(nd.path.mnt);
40776 exit3:
40777 @@ -2318,6 +2432,8 @@ static long do_unlinkat(int dfd, const c
40778 struct dentry *dentry;
40779 struct nameidata nd;
40780 struct inode *inode = NULL;
40781 + ino_t saved_ino = 0;
40782 + dev_t saved_dev = 0;
40783
40784 error = user_path_parent(dfd, pathname, &nd, &name);
40785 if (error)
40786 @@ -2337,8 +2453,19 @@ static long do_unlinkat(int dfd, const c
40787 if (nd.last.name[nd.last.len])
40788 goto slashes;
40789 inode = dentry->d_inode;
40790 - if (inode)
40791 + if (inode) {
40792 + if (inode->i_nlink <= 1) {
40793 + saved_ino = inode->i_ino;
40794 + saved_dev = gr_get_dev_from_dentry(dentry);
40795 + }
40796 +
40797 atomic_inc(&inode->i_count);
40798 +
40799 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
40800 + error = -EACCES;
40801 + goto exit2;
40802 + }
40803 + }
40804 error = mnt_want_write(nd.path.mnt);
40805 if (error)
40806 goto exit2;
40807 @@ -2346,6 +2473,8 @@ static long do_unlinkat(int dfd, const c
40808 if (error)
40809 goto exit3;
40810 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
40811 + if (!error && (saved_ino || saved_dev))
40812 + gr_handle_delete(saved_ino, saved_dev);
40813 exit3:
40814 mnt_drop_write(nd.path.mnt);
40815 exit2:
40816 @@ -2424,6 +2553,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
40817 if (IS_ERR(dentry))
40818 goto out_unlock;
40819
40820 + if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
40821 + error = -EACCES;
40822 + goto out_dput;
40823 + }
40824 +
40825 error = mnt_want_write(nd.path.mnt);
40826 if (error)
40827 goto out_dput;
40828 @@ -2431,6 +2565,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
40829 if (error)
40830 goto out_drop_write;
40831 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
40832 + if (!error)
40833 + gr_handle_create(dentry, nd.path.mnt);
40834 out_drop_write:
40835 mnt_drop_write(nd.path.mnt);
40836 out_dput:
40837 @@ -2524,6 +2660,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
40838 error = PTR_ERR(new_dentry);
40839 if (IS_ERR(new_dentry))
40840 goto out_unlock;
40841 +
40842 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
40843 + old_path.dentry->d_inode,
40844 + old_path.dentry->d_inode->i_mode, to)) {
40845 + error = -EACCES;
40846 + goto out_dput;
40847 + }
40848 +
40849 + if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
40850 + old_path.dentry, old_path.mnt, to)) {
40851 + error = -EACCES;
40852 + goto out_dput;
40853 + }
40854 +
40855 error = mnt_want_write(nd.path.mnt);
40856 if (error)
40857 goto out_dput;
40858 @@ -2531,6 +2681,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
40859 if (error)
40860 goto out_drop_write;
40861 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
40862 + if (!error)
40863 + gr_handle_create(new_dentry, nd.path.mnt);
40864 out_drop_write:
40865 mnt_drop_write(nd.path.mnt);
40866 out_dput:
40867 @@ -2708,6 +2860,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
40868 char *to;
40869 int error;
40870
40871 + pax_track_stack();
40872 +
40873 error = user_path_parent(olddfd, oldname, &oldnd, &from);
40874 if (error)
40875 goto exit;
40876 @@ -2764,6 +2918,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
40877 if (new_dentry == trap)
40878 goto exit5;
40879
40880 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
40881 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
40882 + to);
40883 + if (error)
40884 + goto exit5;
40885 +
40886 error = mnt_want_write(oldnd.path.mnt);
40887 if (error)
40888 goto exit5;
40889 @@ -2773,6 +2933,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
40890 goto exit6;
40891 error = vfs_rename(old_dir->d_inode, old_dentry,
40892 new_dir->d_inode, new_dentry);
40893 + if (!error)
40894 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
40895 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
40896 exit6:
40897 mnt_drop_write(oldnd.path.mnt);
40898 exit5:
40899 @@ -2798,6 +2961,8 @@ SYSCALL_DEFINE2(rename, const char __use
40900
40901 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
40902 {
40903 + char tmpbuf[64];
40904 + const char *newlink;
40905 int len;
40906
40907 len = PTR_ERR(link);
40908 @@ -2807,7 +2972,14 @@ int vfs_readlink(struct dentry *dentry,
40909 len = strlen(link);
40910 if (len > (unsigned) buflen)
40911 len = buflen;
40912 - if (copy_to_user(buffer, link, len))
40913 +
40914 + if (len < sizeof(tmpbuf)) {
40915 + memcpy(tmpbuf, link, len);
40916 + newlink = tmpbuf;
40917 + } else
40918 + newlink = link;
40919 +
40920 + if (copy_to_user(buffer, newlink, len))
40921 len = -EFAULT;
40922 out:
40923 return len;
40924 diff -urNp linux-2.6.32.41/fs/namespace.c linux-2.6.32.41/fs/namespace.c
40925 --- linux-2.6.32.41/fs/namespace.c 2011-03-27 14:31:47.000000000 -0400
40926 +++ linux-2.6.32.41/fs/namespace.c 2011-04-17 15:56:46.000000000 -0400
40927 @@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mn
40928 if (!(sb->s_flags & MS_RDONLY))
40929 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
40930 up_write(&sb->s_umount);
40931 +
40932 + gr_log_remount(mnt->mnt_devname, retval);
40933 +
40934 return retval;
40935 }
40936
40937 @@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mn
40938 security_sb_umount_busy(mnt);
40939 up_write(&namespace_sem);
40940 release_mounts(&umount_list);
40941 +
40942 + gr_log_unmount(mnt->mnt_devname, retval);
40943 +
40944 return retval;
40945 }
40946
40947 @@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_
40948 if (retval)
40949 goto dput_out;
40950
40951 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
40952 + retval = -EPERM;
40953 + goto dput_out;
40954 + }
40955 +
40956 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
40957 + retval = -EPERM;
40958 + goto dput_out;
40959 + }
40960 +
40961 if (flags & MS_REMOUNT)
40962 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
40963 data_page);
40964 @@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_
40965 dev_name, data_page);
40966 dput_out:
40967 path_put(&path);
40968 +
40969 + gr_log_mount(dev_name, dir_name, retval);
40970 +
40971 return retval;
40972 }
40973
40974 @@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char _
40975 goto out1;
40976 }
40977
40978 + if (gr_handle_chroot_pivot()) {
40979 + error = -EPERM;
40980 + path_put(&old);
40981 + goto out1;
40982 + }
40983 +
40984 read_lock(&current->fs->lock);
40985 root = current->fs->root;
40986 path_get(&current->fs->root);
40987 diff -urNp linux-2.6.32.41/fs/ncpfs/dir.c linux-2.6.32.41/fs/ncpfs/dir.c
40988 --- linux-2.6.32.41/fs/ncpfs/dir.c 2011-03-27 14:31:47.000000000 -0400
40989 +++ linux-2.6.32.41/fs/ncpfs/dir.c 2011-05-16 21:46:57.000000000 -0400
40990 @@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *den
40991 int res, val = 0, len;
40992 __u8 __name[NCP_MAXPATHLEN + 1];
40993
40994 + pax_track_stack();
40995 +
40996 parent = dget_parent(dentry);
40997 dir = parent->d_inode;
40998
40999 @@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct
41000 int error, res, len;
41001 __u8 __name[NCP_MAXPATHLEN + 1];
41002
41003 + pax_track_stack();
41004 +
41005 lock_kernel();
41006 error = -EIO;
41007 if (!ncp_conn_valid(server))
41008 @@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, st
41009 int error, result, len;
41010 int opmode;
41011 __u8 __name[NCP_MAXPATHLEN + 1];
41012 -
41013 +
41014 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
41015 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
41016
41017 + pax_track_stack();
41018 +
41019 error = -EIO;
41020 lock_kernel();
41021 if (!ncp_conn_valid(server))
41022 @@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir,
41023 int error, len;
41024 __u8 __name[NCP_MAXPATHLEN + 1];
41025
41026 + pax_track_stack();
41027 +
41028 DPRINTK("ncp_mkdir: making %s/%s\n",
41029 dentry->d_parent->d_name.name, dentry->d_name.name);
41030
41031 @@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir,
41032 if (!ncp_conn_valid(server))
41033 goto out;
41034
41035 + pax_track_stack();
41036 +
41037 ncp_age_dentry(server, dentry);
41038 len = sizeof(__name);
41039 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
41040 @@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_
41041 int old_len, new_len;
41042 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
41043
41044 + pax_track_stack();
41045 +
41046 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
41047 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
41048 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
41049 diff -urNp linux-2.6.32.41/fs/ncpfs/inode.c linux-2.6.32.41/fs/ncpfs/inode.c
41050 --- linux-2.6.32.41/fs/ncpfs/inode.c 2011-03-27 14:31:47.000000000 -0400
41051 +++ linux-2.6.32.41/fs/ncpfs/inode.c 2011-05-16 21:46:57.000000000 -0400
41052 @@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_b
41053 #endif
41054 struct ncp_entry_info finfo;
41055
41056 + pax_track_stack();
41057 +
41058 data.wdog_pid = NULL;
41059 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
41060 if (!server)
41061 diff -urNp linux-2.6.32.41/fs/nfs/inode.c linux-2.6.32.41/fs/nfs/inode.c
41062 --- linux-2.6.32.41/fs/nfs/inode.c 2011-05-10 22:12:01.000000000 -0400
41063 +++ linux-2.6.32.41/fs/nfs/inode.c 2011-05-10 22:12:33.000000000 -0400
41064 @@ -973,16 +973,16 @@ static int nfs_size_need_update(const st
41065 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
41066 }
41067
41068 -static atomic_long_t nfs_attr_generation_counter;
41069 +static atomic_long_unchecked_t nfs_attr_generation_counter;
41070
41071 static unsigned long nfs_read_attr_generation_counter(void)
41072 {
41073 - return atomic_long_read(&nfs_attr_generation_counter);
41074 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
41075 }
41076
41077 unsigned long nfs_inc_attr_generation_counter(void)
41078 {
41079 - return atomic_long_inc_return(&nfs_attr_generation_counter);
41080 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
41081 }
41082
41083 void nfs_fattr_init(struct nfs_fattr *fattr)
41084 diff -urNp linux-2.6.32.41/fs/nfsd/lockd.c linux-2.6.32.41/fs/nfsd/lockd.c
41085 --- linux-2.6.32.41/fs/nfsd/lockd.c 2011-04-17 17:00:52.000000000 -0400
41086 +++ linux-2.6.32.41/fs/nfsd/lockd.c 2011-04-17 17:03:15.000000000 -0400
41087 @@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
41088 fput(filp);
41089 }
41090
41091 -static struct nlmsvc_binding nfsd_nlm_ops = {
41092 +static const struct nlmsvc_binding nfsd_nlm_ops = {
41093 .fopen = nlm_fopen, /* open file for locking */
41094 .fclose = nlm_fclose, /* close file */
41095 };
41096 diff -urNp linux-2.6.32.41/fs/nfsd/nfs4state.c linux-2.6.32.41/fs/nfsd/nfs4state.c
41097 --- linux-2.6.32.41/fs/nfsd/nfs4state.c 2011-03-27 14:31:47.000000000 -0400
41098 +++ linux-2.6.32.41/fs/nfsd/nfs4state.c 2011-05-16 21:46:57.000000000 -0400
41099 @@ -3457,6 +3457,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
41100 unsigned int cmd;
41101 int err;
41102
41103 + pax_track_stack();
41104 +
41105 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
41106 (long long) lock->lk_offset,
41107 (long long) lock->lk_length);
41108 diff -urNp linux-2.6.32.41/fs/nfsd/nfs4xdr.c linux-2.6.32.41/fs/nfsd/nfs4xdr.c
41109 --- linux-2.6.32.41/fs/nfsd/nfs4xdr.c 2011-03-27 14:31:47.000000000 -0400
41110 +++ linux-2.6.32.41/fs/nfsd/nfs4xdr.c 2011-05-16 21:46:57.000000000 -0400
41111 @@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
41112 struct nfsd4_compoundres *resp = rqstp->rq_resp;
41113 u32 minorversion = resp->cstate.minorversion;
41114
41115 + pax_track_stack();
41116 +
41117 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
41118 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
41119 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
41120 diff -urNp linux-2.6.32.41/fs/nfsd/vfs.c linux-2.6.32.41/fs/nfsd/vfs.c
41121 --- linux-2.6.32.41/fs/nfsd/vfs.c 2011-05-10 22:12:01.000000000 -0400
41122 +++ linux-2.6.32.41/fs/nfsd/vfs.c 2011-05-10 22:12:33.000000000 -0400
41123 @@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
41124 } else {
41125 oldfs = get_fs();
41126 set_fs(KERNEL_DS);
41127 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
41128 + host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
41129 set_fs(oldfs);
41130 }
41131
41132 @@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
41133
41134 /* Write the data. */
41135 oldfs = get_fs(); set_fs(KERNEL_DS);
41136 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
41137 + host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
41138 set_fs(oldfs);
41139 if (host_err < 0)
41140 goto out_nfserr;
41141 @@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
41142 */
41143
41144 oldfs = get_fs(); set_fs(KERNEL_DS);
41145 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
41146 + host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
41147 set_fs(oldfs);
41148
41149 if (host_err < 0)
41150 diff -urNp linux-2.6.32.41/fs/nilfs2/ioctl.c linux-2.6.32.41/fs/nilfs2/ioctl.c
41151 --- linux-2.6.32.41/fs/nilfs2/ioctl.c 2011-03-27 14:31:47.000000000 -0400
41152 +++ linux-2.6.32.41/fs/nilfs2/ioctl.c 2011-05-04 17:56:28.000000000 -0400
41153 @@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(st
41154 unsigned int cmd, void __user *argp)
41155 {
41156 struct nilfs_argv argv[5];
41157 - const static size_t argsz[5] = {
41158 + static const size_t argsz[5] = {
41159 sizeof(struct nilfs_vdesc),
41160 sizeof(struct nilfs_period),
41161 sizeof(__u64),
41162 diff -urNp linux-2.6.32.41/fs/notify/dnotify/dnotify.c linux-2.6.32.41/fs/notify/dnotify/dnotify.c
41163 --- linux-2.6.32.41/fs/notify/dnotify/dnotify.c 2011-03-27 14:31:47.000000000 -0400
41164 +++ linux-2.6.32.41/fs/notify/dnotify/dnotify.c 2011-04-17 15:56:46.000000000 -0400
41165 @@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsn
41166 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
41167 }
41168
41169 -static struct fsnotify_ops dnotify_fsnotify_ops = {
41170 +static const struct fsnotify_ops dnotify_fsnotify_ops = {
41171 .handle_event = dnotify_handle_event,
41172 .should_send_event = dnotify_should_send_event,
41173 .free_group_priv = NULL,
41174 diff -urNp linux-2.6.32.41/fs/notify/notification.c linux-2.6.32.41/fs/notify/notification.c
41175 --- linux-2.6.32.41/fs/notify/notification.c 2011-03-27 14:31:47.000000000 -0400
41176 +++ linux-2.6.32.41/fs/notify/notification.c 2011-05-04 17:56:28.000000000 -0400
41177 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
41178 * get set to 0 so it will never get 'freed'
41179 */
41180 static struct fsnotify_event q_overflow_event;
41181 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
41182 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
41183
41184 /**
41185 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
41186 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
41187 */
41188 u32 fsnotify_get_cookie(void)
41189 {
41190 - return atomic_inc_return(&fsnotify_sync_cookie);
41191 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
41192 }
41193 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
41194
41195 diff -urNp linux-2.6.32.41/fs/ntfs/dir.c linux-2.6.32.41/fs/ntfs/dir.c
41196 --- linux-2.6.32.41/fs/ntfs/dir.c 2011-03-27 14:31:47.000000000 -0400
41197 +++ linux-2.6.32.41/fs/ntfs/dir.c 2011-04-17 15:56:46.000000000 -0400
41198 @@ -1328,7 +1328,7 @@ find_next_index_buffer:
41199 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
41200 ~(s64)(ndir->itype.index.block_size - 1)));
41201 /* Bounds checks. */
41202 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
41203 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
41204 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
41205 "inode 0x%lx or driver bug.", vdir->i_ino);
41206 goto err_out;
41207 diff -urNp linux-2.6.32.41/fs/ntfs/file.c linux-2.6.32.41/fs/ntfs/file.c
41208 --- linux-2.6.32.41/fs/ntfs/file.c 2011-03-27 14:31:47.000000000 -0400
41209 +++ linux-2.6.32.41/fs/ntfs/file.c 2011-04-17 15:56:46.000000000 -0400
41210 @@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_
41211 #endif /* NTFS_RW */
41212 };
41213
41214 -const struct file_operations ntfs_empty_file_ops = {};
41215 +const struct file_operations ntfs_empty_file_ops __read_only;
41216
41217 -const struct inode_operations ntfs_empty_inode_ops = {};
41218 +const struct inode_operations ntfs_empty_inode_ops __read_only;
41219 diff -urNp linux-2.6.32.41/fs/ocfs2/cluster/masklog.c linux-2.6.32.41/fs/ocfs2/cluster/masklog.c
41220 --- linux-2.6.32.41/fs/ocfs2/cluster/masklog.c 2011-03-27 14:31:47.000000000 -0400
41221 +++ linux-2.6.32.41/fs/ocfs2/cluster/masklog.c 2011-04-17 15:56:46.000000000 -0400
41222 @@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject
41223 return mlog_mask_store(mlog_attr->mask, buf, count);
41224 }
41225
41226 -static struct sysfs_ops mlog_attr_ops = {
41227 +static const struct sysfs_ops mlog_attr_ops = {
41228 .show = mlog_show,
41229 .store = mlog_store,
41230 };
41231 diff -urNp linux-2.6.32.41/fs/ocfs2/localalloc.c linux-2.6.32.41/fs/ocfs2/localalloc.c
41232 --- linux-2.6.32.41/fs/ocfs2/localalloc.c 2011-03-27 14:31:47.000000000 -0400
41233 +++ linux-2.6.32.41/fs/ocfs2/localalloc.c 2011-04-17 15:56:46.000000000 -0400
41234 @@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_windo
41235 goto bail;
41236 }
41237
41238 - atomic_inc(&osb->alloc_stats.moves);
41239 + atomic_inc_unchecked(&osb->alloc_stats.moves);
41240
41241 status = 0;
41242 bail:
41243 diff -urNp linux-2.6.32.41/fs/ocfs2/namei.c linux-2.6.32.41/fs/ocfs2/namei.c
41244 --- linux-2.6.32.41/fs/ocfs2/namei.c 2011-03-27 14:31:47.000000000 -0400
41245 +++ linux-2.6.32.41/fs/ocfs2/namei.c 2011-05-16 21:46:57.000000000 -0400
41246 @@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *ol
41247 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
41248 struct ocfs2_dir_lookup_result target_insert = { NULL, };
41249
41250 + pax_track_stack();
41251 +
41252 /* At some point it might be nice to break this function up a
41253 * bit. */
41254
41255 diff -urNp linux-2.6.32.41/fs/ocfs2/ocfs2.h linux-2.6.32.41/fs/ocfs2/ocfs2.h
41256 --- linux-2.6.32.41/fs/ocfs2/ocfs2.h 2011-03-27 14:31:47.000000000 -0400
41257 +++ linux-2.6.32.41/fs/ocfs2/ocfs2.h 2011-04-17 15:56:46.000000000 -0400
41258 @@ -217,11 +217,11 @@ enum ocfs2_vol_state
41259
41260 struct ocfs2_alloc_stats
41261 {
41262 - atomic_t moves;
41263 - atomic_t local_data;
41264 - atomic_t bitmap_data;
41265 - atomic_t bg_allocs;
41266 - atomic_t bg_extends;
41267 + atomic_unchecked_t moves;
41268 + atomic_unchecked_t local_data;
41269 + atomic_unchecked_t bitmap_data;
41270 + atomic_unchecked_t bg_allocs;
41271 + atomic_unchecked_t bg_extends;
41272 };
41273
41274 enum ocfs2_local_alloc_state
41275 diff -urNp linux-2.6.32.41/fs/ocfs2/suballoc.c linux-2.6.32.41/fs/ocfs2/suballoc.c
41276 --- linux-2.6.32.41/fs/ocfs2/suballoc.c 2011-03-27 14:31:47.000000000 -0400
41277 +++ linux-2.6.32.41/fs/ocfs2/suballoc.c 2011-04-17 15:56:46.000000000 -0400
41278 @@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(s
41279 mlog_errno(status);
41280 goto bail;
41281 }
41282 - atomic_inc(&osb->alloc_stats.bg_extends);
41283 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
41284
41285 /* You should never ask for this much metadata */
41286 BUG_ON(bits_wanted >
41287 @@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_su
41288 mlog_errno(status);
41289 goto bail;
41290 }
41291 - atomic_inc(&osb->alloc_stats.bg_allocs);
41292 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
41293
41294 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
41295 ac->ac_bits_given += (*num_bits);
41296 @@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_s
41297 mlog_errno(status);
41298 goto bail;
41299 }
41300 - atomic_inc(&osb->alloc_stats.bg_allocs);
41301 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
41302
41303 BUG_ON(num_bits != 1);
41304
41305 @@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
41306 cluster_start,
41307 num_clusters);
41308 if (!status)
41309 - atomic_inc(&osb->alloc_stats.local_data);
41310 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
41311 } else {
41312 if (min_clusters > (osb->bitmap_cpg - 1)) {
41313 /* The only paths asking for contiguousness
41314 @@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
41315 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
41316 bg_blkno,
41317 bg_bit_off);
41318 - atomic_inc(&osb->alloc_stats.bitmap_data);
41319 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
41320 }
41321 }
41322 if (status < 0) {
41323 diff -urNp linux-2.6.32.41/fs/ocfs2/super.c linux-2.6.32.41/fs/ocfs2/super.c
41324 --- linux-2.6.32.41/fs/ocfs2/super.c 2011-03-27 14:31:47.000000000 -0400
41325 +++ linux-2.6.32.41/fs/ocfs2/super.c 2011-04-17 15:56:46.000000000 -0400
41326 @@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
41327 "%10s => GlobalAllocs: %d LocalAllocs: %d "
41328 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
41329 "Stats",
41330 - atomic_read(&osb->alloc_stats.bitmap_data),
41331 - atomic_read(&osb->alloc_stats.local_data),
41332 - atomic_read(&osb->alloc_stats.bg_allocs),
41333 - atomic_read(&osb->alloc_stats.moves),
41334 - atomic_read(&osb->alloc_stats.bg_extends));
41335 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
41336 + atomic_read_unchecked(&osb->alloc_stats.local_data),
41337 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
41338 + atomic_read_unchecked(&osb->alloc_stats.moves),
41339 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
41340
41341 out += snprintf(buf + out, len - out,
41342 "%10s => State: %u Descriptor: %llu Size: %u bits "
41343 @@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct
41344 spin_lock_init(&osb->osb_xattr_lock);
41345 ocfs2_init_inode_steal_slot(osb);
41346
41347 - atomic_set(&osb->alloc_stats.moves, 0);
41348 - atomic_set(&osb->alloc_stats.local_data, 0);
41349 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
41350 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
41351 - atomic_set(&osb->alloc_stats.bg_extends, 0);
41352 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
41353 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
41354 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
41355 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
41356 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
41357
41358 /* Copy the blockcheck stats from the superblock probe */
41359 osb->osb_ecc_stats = *stats;
41360 diff -urNp linux-2.6.32.41/fs/open.c linux-2.6.32.41/fs/open.c
41361 --- linux-2.6.32.41/fs/open.c 2011-03-27 14:31:47.000000000 -0400
41362 +++ linux-2.6.32.41/fs/open.c 2011-04-17 15:56:46.000000000 -0400
41363 @@ -275,6 +275,10 @@ static long do_sys_truncate(const char _
41364 error = locks_verify_truncate(inode, NULL, length);
41365 if (!error)
41366 error = security_path_truncate(&path, length, 0);
41367 +
41368 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
41369 + error = -EACCES;
41370 +
41371 if (!error) {
41372 vfs_dq_init(inode);
41373 error = do_truncate(path.dentry, length, 0, NULL);
41374 @@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
41375 if (__mnt_is_readonly(path.mnt))
41376 res = -EROFS;
41377
41378 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
41379 + res = -EACCES;
41380 +
41381 out_path_release:
41382 path_put(&path);
41383 out:
41384 @@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user
41385 if (error)
41386 goto dput_and_out;
41387
41388 + gr_log_chdir(path.dentry, path.mnt);
41389 +
41390 set_fs_pwd(current->fs, &path);
41391
41392 dput_and_out:
41393 @@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
41394 goto out_putf;
41395
41396 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
41397 +
41398 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
41399 + error = -EPERM;
41400 +
41401 + if (!error)
41402 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
41403 +
41404 if (!error)
41405 set_fs_pwd(current->fs, &file->f_path);
41406 out_putf:
41407 @@ -588,7 +604,18 @@ SYSCALL_DEFINE1(chroot, const char __use
41408 if (!capable(CAP_SYS_CHROOT))
41409 goto dput_and_out;
41410
41411 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
41412 + goto dput_and_out;
41413 +
41414 + if (gr_handle_chroot_caps(&path)) {
41415 + error = -ENOMEM;
41416 + goto dput_and_out;
41417 + }
41418 +
41419 set_fs_root(current->fs, &path);
41420 +
41421 + gr_handle_chroot_chdir(&path);
41422 +
41423 error = 0;
41424 dput_and_out:
41425 path_put(&path);
41426 @@ -616,12 +643,27 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
41427 err = mnt_want_write_file(file);
41428 if (err)
41429 goto out_putf;
41430 +
41431 mutex_lock(&inode->i_mutex);
41432 +
41433 + if (!gr_acl_handle_fchmod(dentry, file->f_path.mnt, mode)) {
41434 + err = -EACCES;
41435 + goto out_unlock;
41436 + }
41437 +
41438 if (mode == (mode_t) -1)
41439 mode = inode->i_mode;
41440 +
41441 + if (gr_handle_chroot_chmod(dentry, file->f_path.mnt, mode)) {
41442 + err = -EPERM;
41443 + goto out_unlock;
41444 + }
41445 +
41446 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
41447 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
41448 err = notify_change(dentry, &newattrs);
41449 +
41450 +out_unlock:
41451 mutex_unlock(&inode->i_mutex);
41452 mnt_drop_write(file->f_path.mnt);
41453 out_putf:
41454 @@ -645,12 +687,27 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
41455 error = mnt_want_write(path.mnt);
41456 if (error)
41457 goto dput_and_out;
41458 +
41459 mutex_lock(&inode->i_mutex);
41460 +
41461 + if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
41462 + error = -EACCES;
41463 + goto out_unlock;
41464 + }
41465 +
41466 if (mode == (mode_t) -1)
41467 mode = inode->i_mode;
41468 +
41469 + if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
41470 + error = -EACCES;
41471 + goto out_unlock;
41472 + }
41473 +
41474 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
41475 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
41476 error = notify_change(path.dentry, &newattrs);
41477 +
41478 +out_unlock:
41479 mutex_unlock(&inode->i_mutex);
41480 mnt_drop_write(path.mnt);
41481 dput_and_out:
41482 @@ -664,12 +721,15 @@ SYSCALL_DEFINE2(chmod, const char __user
41483 return sys_fchmodat(AT_FDCWD, filename, mode);
41484 }
41485
41486 -static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
41487 +static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
41488 {
41489 struct inode *inode = dentry->d_inode;
41490 int error;
41491 struct iattr newattrs;
41492
41493 + if (!gr_acl_handle_chown(dentry, mnt))
41494 + return -EACCES;
41495 +
41496 newattrs.ia_valid = ATTR_CTIME;
41497 if (user != (uid_t) -1) {
41498 newattrs.ia_valid |= ATTR_UID;
41499 @@ -700,7 +760,7 @@ SYSCALL_DEFINE3(chown, const char __user
41500 error = mnt_want_write(path.mnt);
41501 if (error)
41502 goto out_release;
41503 - error = chown_common(path.dentry, user, group);
41504 + error = chown_common(path.dentry, user, group, path.mnt);
41505 mnt_drop_write(path.mnt);
41506 out_release:
41507 path_put(&path);
41508 @@ -725,7 +785,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, cons
41509 error = mnt_want_write(path.mnt);
41510 if (error)
41511 goto out_release;
41512 - error = chown_common(path.dentry, user, group);
41513 + error = chown_common(path.dentry, user, group, path.mnt);
41514 mnt_drop_write(path.mnt);
41515 out_release:
41516 path_put(&path);
41517 @@ -744,7 +804,7 @@ SYSCALL_DEFINE3(lchown, const char __use
41518 error = mnt_want_write(path.mnt);
41519 if (error)
41520 goto out_release;
41521 - error = chown_common(path.dentry, user, group);
41522 + error = chown_common(path.dentry, user, group, path.mnt);
41523 mnt_drop_write(path.mnt);
41524 out_release:
41525 path_put(&path);
41526 @@ -767,7 +827,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd
41527 goto out_fput;
41528 dentry = file->f_path.dentry;
41529 audit_inode(NULL, dentry);
41530 - error = chown_common(dentry, user, group);
41531 + error = chown_common(dentry, user, group, file->f_path.mnt);
41532 mnt_drop_write(file->f_path.mnt);
41533 out_fput:
41534 fput(file);
41535 @@ -1036,7 +1096,10 @@ long do_sys_open(int dfd, const char __u
41536 if (!IS_ERR(tmp)) {
41537 fd = get_unused_fd_flags(flags);
41538 if (fd >= 0) {
41539 - struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
41540 + struct file *f;
41541 + /* don't allow to be set by userland */
41542 + flags &= ~FMODE_GREXEC;
41543 + f = do_filp_open(dfd, tmp, flags, mode, 0);
41544 if (IS_ERR(f)) {
41545 put_unused_fd(fd);
41546 fd = PTR_ERR(f);
41547 diff -urNp linux-2.6.32.41/fs/partitions/ldm.c linux-2.6.32.41/fs/partitions/ldm.c
41548 --- linux-2.6.32.41/fs/partitions/ldm.c 2011-05-10 22:12:01.000000000 -0400
41549 +++ linux-2.6.32.41/fs/partitions/ldm.c 2011-04-18 19:31:12.000000000 -0400
41550 @@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
41551 ldm_error ("A VBLK claims to have %d parts.", num);
41552 return false;
41553 }
41554 +
41555 if (rec >= num) {
41556 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
41557 return false;
41558 @@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
41559 goto found;
41560 }
41561
41562 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
41563 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
41564 if (!f) {
41565 ldm_crit ("Out of memory.");
41566 return false;
41567 diff -urNp linux-2.6.32.41/fs/partitions/mac.c linux-2.6.32.41/fs/partitions/mac.c
41568 --- linux-2.6.32.41/fs/partitions/mac.c 2011-03-27 14:31:47.000000000 -0400
41569 +++ linux-2.6.32.41/fs/partitions/mac.c 2011-04-17 15:56:46.000000000 -0400
41570 @@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitio
41571 return 0; /* not a MacOS disk */
41572 }
41573 blocks_in_map = be32_to_cpu(part->map_count);
41574 + printk(" [mac]");
41575 if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
41576 put_dev_sector(sect);
41577 return 0;
41578 }
41579 - printk(" [mac]");
41580 for (slot = 1; slot <= blocks_in_map; ++slot) {
41581 int pos = slot * secsize;
41582 put_dev_sector(sect);
41583 diff -urNp linux-2.6.32.41/fs/pipe.c linux-2.6.32.41/fs/pipe.c
41584 --- linux-2.6.32.41/fs/pipe.c 2011-03-27 14:31:47.000000000 -0400
41585 +++ linux-2.6.32.41/fs/pipe.c 2011-04-23 13:37:17.000000000 -0400
41586 @@ -401,9 +401,9 @@ redo:
41587 }
41588 if (bufs) /* More to do? */
41589 continue;
41590 - if (!pipe->writers)
41591 + if (!atomic_read(&pipe->writers))
41592 break;
41593 - if (!pipe->waiting_writers) {
41594 + if (!atomic_read(&pipe->waiting_writers)) {
41595 /* syscall merging: Usually we must not sleep
41596 * if O_NONBLOCK is set, or if we got some data.
41597 * But if a writer sleeps in kernel space, then
41598 @@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const str
41599 mutex_lock(&inode->i_mutex);
41600 pipe = inode->i_pipe;
41601
41602 - if (!pipe->readers) {
41603 + if (!atomic_read(&pipe->readers)) {
41604 send_sig(SIGPIPE, current, 0);
41605 ret = -EPIPE;
41606 goto out;
41607 @@ -511,7 +511,7 @@ redo1:
41608 for (;;) {
41609 int bufs;
41610
41611 - if (!pipe->readers) {
41612 + if (!atomic_read(&pipe->readers)) {
41613 send_sig(SIGPIPE, current, 0);
41614 if (!ret)
41615 ret = -EPIPE;
41616 @@ -597,9 +597,9 @@ redo2:
41617 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
41618 do_wakeup = 0;
41619 }
41620 - pipe->waiting_writers++;
41621 + atomic_inc(&pipe->waiting_writers);
41622 pipe_wait(pipe);
41623 - pipe->waiting_writers--;
41624 + atomic_dec(&pipe->waiting_writers);
41625 }
41626 out:
41627 mutex_unlock(&inode->i_mutex);
41628 @@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table
41629 mask = 0;
41630 if (filp->f_mode & FMODE_READ) {
41631 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
41632 - if (!pipe->writers && filp->f_version != pipe->w_counter)
41633 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
41634 mask |= POLLHUP;
41635 }
41636
41637 @@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table
41638 * Most Unices do not set POLLERR for FIFOs but on Linux they
41639 * behave exactly like pipes for poll().
41640 */
41641 - if (!pipe->readers)
41642 + if (!atomic_read(&pipe->readers))
41643 mask |= POLLERR;
41644 }
41645
41646 @@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int de
41647
41648 mutex_lock(&inode->i_mutex);
41649 pipe = inode->i_pipe;
41650 - pipe->readers -= decr;
41651 - pipe->writers -= decw;
41652 + atomic_sub(decr, &pipe->readers);
41653 + atomic_sub(decw, &pipe->writers);
41654
41655 - if (!pipe->readers && !pipe->writers) {
41656 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
41657 free_pipe_info(inode);
41658 } else {
41659 wake_up_interruptible_sync(&pipe->wait);
41660 @@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, stru
41661
41662 if (inode->i_pipe) {
41663 ret = 0;
41664 - inode->i_pipe->readers++;
41665 + atomic_inc(&inode->i_pipe->readers);
41666 }
41667
41668 mutex_unlock(&inode->i_mutex);
41669 @@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, str
41670
41671 if (inode->i_pipe) {
41672 ret = 0;
41673 - inode->i_pipe->writers++;
41674 + atomic_inc(&inode->i_pipe->writers);
41675 }
41676
41677 mutex_unlock(&inode->i_mutex);
41678 @@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, stru
41679 if (inode->i_pipe) {
41680 ret = 0;
41681 if (filp->f_mode & FMODE_READ)
41682 - inode->i_pipe->readers++;
41683 + atomic_inc(&inode->i_pipe->readers);
41684 if (filp->f_mode & FMODE_WRITE)
41685 - inode->i_pipe->writers++;
41686 + atomic_inc(&inode->i_pipe->writers);
41687 }
41688
41689 mutex_unlock(&inode->i_mutex);
41690 @@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
41691 inode->i_pipe = NULL;
41692 }
41693
41694 -static struct vfsmount *pipe_mnt __read_mostly;
41695 +struct vfsmount *pipe_mnt __read_mostly;
41696 static int pipefs_delete_dentry(struct dentry *dentry)
41697 {
41698 /*
41699 @@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(voi
41700 goto fail_iput;
41701 inode->i_pipe = pipe;
41702
41703 - pipe->readers = pipe->writers = 1;
41704 + atomic_set(&pipe->readers, 1);
41705 + atomic_set(&pipe->writers, 1);
41706 inode->i_fop = &rdwr_pipefifo_fops;
41707
41708 /*
41709 diff -urNp linux-2.6.32.41/fs/proc/array.c linux-2.6.32.41/fs/proc/array.c
41710 --- linux-2.6.32.41/fs/proc/array.c 2011-03-27 14:31:47.000000000 -0400
41711 +++ linux-2.6.32.41/fs/proc/array.c 2011-05-16 21:46:57.000000000 -0400
41712 @@ -60,6 +60,7 @@
41713 #include <linux/tty.h>
41714 #include <linux/string.h>
41715 #include <linux/mman.h>
41716 +#include <linux/grsecurity.h>
41717 #include <linux/proc_fs.h>
41718 #include <linux/ioport.h>
41719 #include <linux/uaccess.h>
41720 @@ -321,6 +322,21 @@ static inline void task_context_switch_c
41721 p->nivcsw);
41722 }
41723
41724 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41725 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
41726 +{
41727 + if (p->mm)
41728 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
41729 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
41730 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
41731 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
41732 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
41733 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
41734 + else
41735 + seq_printf(m, "PaX:\t-----\n");
41736 +}
41737 +#endif
41738 +
41739 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
41740 struct pid *pid, struct task_struct *task)
41741 {
41742 @@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m,
41743 task_cap(m, task);
41744 cpuset_task_status_allowed(m, task);
41745 task_context_switch_counts(m, task);
41746 +
41747 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41748 + task_pax(m, task);
41749 +#endif
41750 +
41751 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
41752 + task_grsec_rbac(m, task);
41753 +#endif
41754 +
41755 return 0;
41756 }
41757
41758 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41759 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
41760 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
41761 + _mm->pax_flags & MF_PAX_SEGMEXEC))
41762 +#endif
41763 +
41764 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
41765 struct pid *pid, struct task_struct *task, int whole)
41766 {
41767 @@ -358,9 +389,11 @@ static int do_task_stat(struct seq_file
41768 cputime_t cutime, cstime, utime, stime;
41769 cputime_t cgtime, gtime;
41770 unsigned long rsslim = 0;
41771 - char tcomm[sizeof(task->comm)];
41772 + char tcomm[sizeof(task->comm)] = { 0 };
41773 unsigned long flags;
41774
41775 + pax_track_stack();
41776 +
41777 state = *get_task_state(task);
41778 vsize = eip = esp = 0;
41779 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
41780 @@ -433,6 +466,19 @@ static int do_task_stat(struct seq_file
41781 gtime = task_gtime(task);
41782 }
41783
41784 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41785 + if (PAX_RAND_FLAGS(mm)) {
41786 + eip = 0;
41787 + esp = 0;
41788 + wchan = 0;
41789 + }
41790 +#endif
41791 +#ifdef CONFIG_GRKERNSEC_HIDESYM
41792 + wchan = 0;
41793 + eip =0;
41794 + esp =0;
41795 +#endif
41796 +
41797 /* scale priority and nice values from timeslices to -20..20 */
41798 /* to make it look like a "normal" Unix priority/nice value */
41799 priority = task_prio(task);
41800 @@ -473,9 +519,15 @@ static int do_task_stat(struct seq_file
41801 vsize,
41802 mm ? get_mm_rss(mm) : 0,
41803 rsslim,
41804 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41805 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
41806 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
41807 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
41808 +#else
41809 mm ? (permitted ? mm->start_code : 1) : 0,
41810 mm ? (permitted ? mm->end_code : 1) : 0,
41811 (permitted && mm) ? mm->start_stack : 0,
41812 +#endif
41813 esp,
41814 eip,
41815 /* The signal information here is obsolete.
41816 @@ -528,3 +580,18 @@ int proc_pid_statm(struct seq_file *m, s
41817
41818 return 0;
41819 }
41820 +
41821 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
41822 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
41823 +{
41824 + u32 curr_ip = 0;
41825 + unsigned long flags;
41826 +
41827 + if (lock_task_sighand(task, &flags)) {
41828 + curr_ip = task->signal->curr_ip;
41829 + unlock_task_sighand(task, &flags);
41830 + }
41831 +
41832 + return sprintf(buffer, "%pI4\n", &curr_ip);
41833 +}
41834 +#endif
41835 diff -urNp linux-2.6.32.41/fs/proc/base.c linux-2.6.32.41/fs/proc/base.c
41836 --- linux-2.6.32.41/fs/proc/base.c 2011-04-22 19:16:29.000000000 -0400
41837 +++ linux-2.6.32.41/fs/proc/base.c 2011-06-04 21:20:50.000000000 -0400
41838 @@ -102,6 +102,22 @@ struct pid_entry {
41839 union proc_op op;
41840 };
41841
41842 +struct getdents_callback {
41843 + struct linux_dirent __user * current_dir;
41844 + struct linux_dirent __user * previous;
41845 + struct file * file;
41846 + int count;
41847 + int error;
41848 +};
41849 +
41850 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
41851 + loff_t offset, u64 ino, unsigned int d_type)
41852 +{
41853 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
41854 + buf->error = -EINVAL;
41855 + return 0;
41856 +}
41857 +
41858 #define NOD(NAME, MODE, IOP, FOP, OP) { \
41859 .name = (NAME), \
41860 .len = sizeof(NAME) - 1, \
41861 @@ -213,6 +229,9 @@ static int check_mem_permission(struct t
41862 if (task == current)
41863 return 0;
41864
41865 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
41866 + return -EPERM;
41867 +
41868 /*
41869 * If current is actively ptrace'ing, and would also be
41870 * permitted to freshly attach with ptrace now, permit it.
41871 @@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_
41872 if (!mm->arg_end)
41873 goto out_mm; /* Shh! No looking before we're done */
41874
41875 + if (gr_acl_handle_procpidmem(task))
41876 + goto out_mm;
41877 +
41878 len = mm->arg_end - mm->arg_start;
41879
41880 if (len > PAGE_SIZE)
41881 @@ -287,12 +309,28 @@ out:
41882 return res;
41883 }
41884
41885 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41886 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
41887 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
41888 + _mm->pax_flags & MF_PAX_SEGMEXEC))
41889 +#endif
41890 +
41891 static int proc_pid_auxv(struct task_struct *task, char *buffer)
41892 {
41893 int res = 0;
41894 struct mm_struct *mm = get_task_mm(task);
41895 if (mm) {
41896 unsigned int nwords = 0;
41897 +
41898 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41899 + /* allow if we're currently ptracing this task */
41900 + if (PAX_RAND_FLAGS(mm) &&
41901 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
41902 + mmput(mm);
41903 + return res;
41904 + }
41905 +#endif
41906 +
41907 do {
41908 nwords += 2;
41909 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
41910 @@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_str
41911 }
41912
41913
41914 -#ifdef CONFIG_KALLSYMS
41915 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
41916 /*
41917 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
41918 * Returns the resolved symbol. If that fails, simply return the address.
41919 @@ -328,7 +366,7 @@ static int proc_pid_wchan(struct task_st
41920 }
41921 #endif /* CONFIG_KALLSYMS */
41922
41923 -#ifdef CONFIG_STACKTRACE
41924 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
41925
41926 #define MAX_STACK_TRACE_DEPTH 64
41927
41928 @@ -522,7 +560,7 @@ static int proc_pid_limits(struct task_s
41929 return count;
41930 }
41931
41932 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
41933 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
41934 static int proc_pid_syscall(struct task_struct *task, char *buffer)
41935 {
41936 long nr;
41937 @@ -547,7 +585,7 @@ static int proc_pid_syscall(struct task_
41938 /************************************************************************/
41939
41940 /* permission checks */
41941 -static int proc_fd_access_allowed(struct inode *inode)
41942 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
41943 {
41944 struct task_struct *task;
41945 int allowed = 0;
41946 @@ -557,7 +595,10 @@ static int proc_fd_access_allowed(struct
41947 */
41948 task = get_proc_task(inode);
41949 if (task) {
41950 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
41951 + if (log)
41952 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
41953 + else
41954 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
41955 put_task_struct(task);
41956 }
41957 return allowed;
41958 @@ -936,6 +977,9 @@ static ssize_t environ_read(struct file
41959 if (!task)
41960 goto out_no_task;
41961
41962 + if (gr_acl_handle_procpidmem(task))
41963 + goto out;
41964 +
41965 if (!ptrace_may_access(task, PTRACE_MODE_READ))
41966 goto out;
41967
41968 @@ -1350,7 +1394,7 @@ static void *proc_pid_follow_link(struct
41969 path_put(&nd->path);
41970
41971 /* Are we allowed to snoop on the tasks file descriptors? */
41972 - if (!proc_fd_access_allowed(inode))
41973 + if (!proc_fd_access_allowed(inode,0))
41974 goto out;
41975
41976 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
41977 @@ -1390,8 +1434,18 @@ static int proc_pid_readlink(struct dent
41978 struct path path;
41979
41980 /* Are we allowed to snoop on the tasks file descriptors? */
41981 - if (!proc_fd_access_allowed(inode))
41982 - goto out;
41983 + /* logging this is needed for learning on chromium to work properly,
41984 + but we don't want to flood the logs from 'ps' which does a readlink
41985 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
41986 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
41987 + */
41988 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
41989 + if (!proc_fd_access_allowed(inode,0))
41990 + goto out;
41991 + } else {
41992 + if (!proc_fd_access_allowed(inode,1))
41993 + goto out;
41994 + }
41995
41996 error = PROC_I(inode)->op.proc_get_link(inode, &path);
41997 if (error)
41998 @@ -1456,7 +1510,11 @@ static struct inode *proc_pid_make_inode
41999 rcu_read_lock();
42000 cred = __task_cred(task);
42001 inode->i_uid = cred->euid;
42002 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42003 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42004 +#else
42005 inode->i_gid = cred->egid;
42006 +#endif
42007 rcu_read_unlock();
42008 }
42009 security_task_to_inode(task, inode);
42010 @@ -1474,6 +1532,9 @@ static int pid_getattr(struct vfsmount *
42011 struct inode *inode = dentry->d_inode;
42012 struct task_struct *task;
42013 const struct cred *cred;
42014 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42015 + const struct cred *tmpcred = current_cred();
42016 +#endif
42017
42018 generic_fillattr(inode, stat);
42019
42020 @@ -1481,13 +1542,41 @@ static int pid_getattr(struct vfsmount *
42021 stat->uid = 0;
42022 stat->gid = 0;
42023 task = pid_task(proc_pid(inode), PIDTYPE_PID);
42024 +
42025 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
42026 + rcu_read_unlock();
42027 + return -ENOENT;
42028 + }
42029 +
42030 if (task) {
42031 + cred = __task_cred(task);
42032 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42033 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
42034 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42035 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
42036 +#endif
42037 + ) {
42038 +#endif
42039 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
42040 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42041 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
42042 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42043 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
42044 +#endif
42045 task_dumpable(task)) {
42046 - cred = __task_cred(task);
42047 stat->uid = cred->euid;
42048 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42049 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
42050 +#else
42051 stat->gid = cred->egid;
42052 +#endif
42053 }
42054 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42055 + } else {
42056 + rcu_read_unlock();
42057 + return -ENOENT;
42058 + }
42059 +#endif
42060 }
42061 rcu_read_unlock();
42062 return 0;
42063 @@ -1518,11 +1607,20 @@ static int pid_revalidate(struct dentry
42064
42065 if (task) {
42066 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
42067 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42068 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
42069 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42070 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
42071 +#endif
42072 task_dumpable(task)) {
42073 rcu_read_lock();
42074 cred = __task_cred(task);
42075 inode->i_uid = cred->euid;
42076 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42077 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42078 +#else
42079 inode->i_gid = cred->egid;
42080 +#endif
42081 rcu_read_unlock();
42082 } else {
42083 inode->i_uid = 0;
42084 @@ -1643,7 +1741,8 @@ static int proc_fd_info(struct inode *in
42085 int fd = proc_fd(inode);
42086
42087 if (task) {
42088 - files = get_files_struct(task);
42089 + if (!gr_acl_handle_procpidmem(task))
42090 + files = get_files_struct(task);
42091 put_task_struct(task);
42092 }
42093 if (files) {
42094 @@ -1895,12 +1994,22 @@ static const struct file_operations proc
42095 static int proc_fd_permission(struct inode *inode, int mask)
42096 {
42097 int rv;
42098 + struct task_struct *task;
42099
42100 rv = generic_permission(inode, mask, NULL);
42101 - if (rv == 0)
42102 - return 0;
42103 +
42104 if (task_pid(current) == proc_pid(inode))
42105 rv = 0;
42106 +
42107 + task = get_proc_task(inode);
42108 + if (task == NULL)
42109 + return rv;
42110 +
42111 + if (gr_acl_handle_procpidmem(task))
42112 + rv = -EACCES;
42113 +
42114 + put_task_struct(task);
42115 +
42116 return rv;
42117 }
42118
42119 @@ -2009,6 +2118,9 @@ static struct dentry *proc_pident_lookup
42120 if (!task)
42121 goto out_no_task;
42122
42123 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42124 + goto out;
42125 +
42126 /*
42127 * Yes, it does not scale. And it should not. Don't add
42128 * new entries into /proc/<tgid>/ without very good reasons.
42129 @@ -2053,6 +2165,9 @@ static int proc_pident_readdir(struct fi
42130 if (!task)
42131 goto out_no_task;
42132
42133 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42134 + goto out;
42135 +
42136 ret = 0;
42137 i = filp->f_pos;
42138 switch (i) {
42139 @@ -2320,7 +2435,7 @@ static void *proc_self_follow_link(struc
42140 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
42141 void *cookie)
42142 {
42143 - char *s = nd_get_link(nd);
42144 + const char *s = nd_get_link(nd);
42145 if (!IS_ERR(s))
42146 __putname(s);
42147 }
42148 @@ -2519,7 +2634,7 @@ static const struct pid_entry tgid_base_
42149 #ifdef CONFIG_SCHED_DEBUG
42150 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
42151 #endif
42152 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
42153 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
42154 INF("syscall", S_IRUSR, proc_pid_syscall),
42155 #endif
42156 INF("cmdline", S_IRUGO, proc_pid_cmdline),
42157 @@ -2544,10 +2659,10 @@ static const struct pid_entry tgid_base_
42158 #ifdef CONFIG_SECURITY
42159 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
42160 #endif
42161 -#ifdef CONFIG_KALLSYMS
42162 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42163 INF("wchan", S_IRUGO, proc_pid_wchan),
42164 #endif
42165 -#ifdef CONFIG_STACKTRACE
42166 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42167 ONE("stack", S_IRUSR, proc_pid_stack),
42168 #endif
42169 #ifdef CONFIG_SCHEDSTATS
42170 @@ -2577,6 +2692,9 @@ static const struct pid_entry tgid_base_
42171 #ifdef CONFIG_TASK_IO_ACCOUNTING
42172 INF("io", S_IRUGO, proc_tgid_io_accounting),
42173 #endif
42174 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
42175 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
42176 +#endif
42177 };
42178
42179 static int proc_tgid_base_readdir(struct file * filp,
42180 @@ -2701,7 +2819,14 @@ static struct dentry *proc_pid_instantia
42181 if (!inode)
42182 goto out;
42183
42184 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42185 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
42186 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42187 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42188 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
42189 +#else
42190 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
42191 +#endif
42192 inode->i_op = &proc_tgid_base_inode_operations;
42193 inode->i_fop = &proc_tgid_base_operations;
42194 inode->i_flags|=S_IMMUTABLE;
42195 @@ -2743,7 +2868,11 @@ struct dentry *proc_pid_lookup(struct in
42196 if (!task)
42197 goto out;
42198
42199 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42200 + goto out_put_task;
42201 +
42202 result = proc_pid_instantiate(dir, dentry, task, NULL);
42203 +out_put_task:
42204 put_task_struct(task);
42205 out:
42206 return result;
42207 @@ -2808,6 +2937,11 @@ int proc_pid_readdir(struct file * filp,
42208 {
42209 unsigned int nr;
42210 struct task_struct *reaper;
42211 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42212 + const struct cred *tmpcred = current_cred();
42213 + const struct cred *itercred;
42214 +#endif
42215 + filldir_t __filldir = filldir;
42216 struct tgid_iter iter;
42217 struct pid_namespace *ns;
42218
42219 @@ -2831,8 +2965,27 @@ int proc_pid_readdir(struct file * filp,
42220 for (iter = next_tgid(ns, iter);
42221 iter.task;
42222 iter.tgid += 1, iter = next_tgid(ns, iter)) {
42223 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42224 + rcu_read_lock();
42225 + itercred = __task_cred(iter.task);
42226 +#endif
42227 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
42228 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42229 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
42230 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42231 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
42232 +#endif
42233 + )
42234 +#endif
42235 + )
42236 + __filldir = &gr_fake_filldir;
42237 + else
42238 + __filldir = filldir;
42239 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42240 + rcu_read_unlock();
42241 +#endif
42242 filp->f_pos = iter.tgid + TGID_OFFSET;
42243 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
42244 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
42245 put_task_struct(iter.task);
42246 goto out;
42247 }
42248 @@ -2858,7 +3011,7 @@ static const struct pid_entry tid_base_s
42249 #ifdef CONFIG_SCHED_DEBUG
42250 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
42251 #endif
42252 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
42253 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
42254 INF("syscall", S_IRUSR, proc_pid_syscall),
42255 #endif
42256 INF("cmdline", S_IRUGO, proc_pid_cmdline),
42257 @@ -2882,10 +3035,10 @@ static const struct pid_entry tid_base_s
42258 #ifdef CONFIG_SECURITY
42259 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
42260 #endif
42261 -#ifdef CONFIG_KALLSYMS
42262 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42263 INF("wchan", S_IRUGO, proc_pid_wchan),
42264 #endif
42265 -#ifdef CONFIG_STACKTRACE
42266 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42267 ONE("stack", S_IRUSR, proc_pid_stack),
42268 #endif
42269 #ifdef CONFIG_SCHEDSTATS
42270 diff -urNp linux-2.6.32.41/fs/proc/cmdline.c linux-2.6.32.41/fs/proc/cmdline.c
42271 --- linux-2.6.32.41/fs/proc/cmdline.c 2011-03-27 14:31:47.000000000 -0400
42272 +++ linux-2.6.32.41/fs/proc/cmdline.c 2011-04-17 15:56:46.000000000 -0400
42273 @@ -23,7 +23,11 @@ static const struct file_operations cmdl
42274
42275 static int __init proc_cmdline_init(void)
42276 {
42277 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
42278 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
42279 +#else
42280 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
42281 +#endif
42282 return 0;
42283 }
42284 module_init(proc_cmdline_init);
42285 diff -urNp linux-2.6.32.41/fs/proc/devices.c linux-2.6.32.41/fs/proc/devices.c
42286 --- linux-2.6.32.41/fs/proc/devices.c 2011-03-27 14:31:47.000000000 -0400
42287 +++ linux-2.6.32.41/fs/proc/devices.c 2011-04-17 15:56:46.000000000 -0400
42288 @@ -64,7 +64,11 @@ static const struct file_operations proc
42289
42290 static int __init proc_devices_init(void)
42291 {
42292 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
42293 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
42294 +#else
42295 proc_create("devices", 0, NULL, &proc_devinfo_operations);
42296 +#endif
42297 return 0;
42298 }
42299 module_init(proc_devices_init);
42300 diff -urNp linux-2.6.32.41/fs/proc/inode.c linux-2.6.32.41/fs/proc/inode.c
42301 --- linux-2.6.32.41/fs/proc/inode.c 2011-03-27 14:31:47.000000000 -0400
42302 +++ linux-2.6.32.41/fs/proc/inode.c 2011-04-17 15:56:46.000000000 -0400
42303 @@ -457,7 +457,11 @@ struct inode *proc_get_inode(struct supe
42304 if (de->mode) {
42305 inode->i_mode = de->mode;
42306 inode->i_uid = de->uid;
42307 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42308 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42309 +#else
42310 inode->i_gid = de->gid;
42311 +#endif
42312 }
42313 if (de->size)
42314 inode->i_size = de->size;
42315 diff -urNp linux-2.6.32.41/fs/proc/internal.h linux-2.6.32.41/fs/proc/internal.h
42316 --- linux-2.6.32.41/fs/proc/internal.h 2011-03-27 14:31:47.000000000 -0400
42317 +++ linux-2.6.32.41/fs/proc/internal.h 2011-04-17 15:56:46.000000000 -0400
42318 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
42319 struct pid *pid, struct task_struct *task);
42320 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
42321 struct pid *pid, struct task_struct *task);
42322 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
42323 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
42324 +#endif
42325 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
42326
42327 extern const struct file_operations proc_maps_operations;
42328 diff -urNp linux-2.6.32.41/fs/proc/Kconfig linux-2.6.32.41/fs/proc/Kconfig
42329 --- linux-2.6.32.41/fs/proc/Kconfig 2011-03-27 14:31:47.000000000 -0400
42330 +++ linux-2.6.32.41/fs/proc/Kconfig 2011-04-17 15:56:46.000000000 -0400
42331 @@ -30,12 +30,12 @@ config PROC_FS
42332
42333 config PROC_KCORE
42334 bool "/proc/kcore support" if !ARM
42335 - depends on PROC_FS && MMU
42336 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
42337
42338 config PROC_VMCORE
42339 bool "/proc/vmcore support (EXPERIMENTAL)"
42340 - depends on PROC_FS && CRASH_DUMP
42341 - default y
42342 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
42343 + default n
42344 help
42345 Exports the dump image of crashed kernel in ELF format.
42346
42347 @@ -59,8 +59,8 @@ config PROC_SYSCTL
42348 limited in memory.
42349
42350 config PROC_PAGE_MONITOR
42351 - default y
42352 - depends on PROC_FS && MMU
42353 + default n
42354 + depends on PROC_FS && MMU && !GRKERNSEC
42355 bool "Enable /proc page monitoring" if EMBEDDED
42356 help
42357 Various /proc files exist to monitor process memory utilization:
42358 diff -urNp linux-2.6.32.41/fs/proc/kcore.c linux-2.6.32.41/fs/proc/kcore.c
42359 --- linux-2.6.32.41/fs/proc/kcore.c 2011-03-27 14:31:47.000000000 -0400
42360 +++ linux-2.6.32.41/fs/proc/kcore.c 2011-05-16 21:46:57.000000000 -0400
42361 @@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bu
42362 off_t offset = 0;
42363 struct kcore_list *m;
42364
42365 + pax_track_stack();
42366 +
42367 /* setup ELF header */
42368 elf = (struct elfhdr *) bufp;
42369 bufp += sizeof(struct elfhdr);
42370 @@ -477,9 +479,10 @@ read_kcore(struct file *file, char __use
42371 * the addresses in the elf_phdr on our list.
42372 */
42373 start = kc_offset_to_vaddr(*fpos - elf_buflen);
42374 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
42375 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
42376 + if (tsz > buflen)
42377 tsz = buflen;
42378 -
42379 +
42380 while (buflen) {
42381 struct kcore_list *m;
42382
42383 @@ -508,20 +511,23 @@ read_kcore(struct file *file, char __use
42384 kfree(elf_buf);
42385 } else {
42386 if (kern_addr_valid(start)) {
42387 - unsigned long n;
42388 + char *elf_buf;
42389 + mm_segment_t oldfs;
42390
42391 - n = copy_to_user(buffer, (char *)start, tsz);
42392 - /*
42393 - * We cannot distingush between fault on source
42394 - * and fault on destination. When this happens
42395 - * we clear too and hope it will trigger the
42396 - * EFAULT again.
42397 - */
42398 - if (n) {
42399 - if (clear_user(buffer + tsz - n,
42400 - n))
42401 + elf_buf = kmalloc(tsz, GFP_KERNEL);
42402 + if (!elf_buf)
42403 + return -ENOMEM;
42404 + oldfs = get_fs();
42405 + set_fs(KERNEL_DS);
42406 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
42407 + set_fs(oldfs);
42408 + if (copy_to_user(buffer, elf_buf, tsz)) {
42409 + kfree(elf_buf);
42410 return -EFAULT;
42411 + }
42412 }
42413 + set_fs(oldfs);
42414 + kfree(elf_buf);
42415 } else {
42416 if (clear_user(buffer, tsz))
42417 return -EFAULT;
42418 @@ -541,6 +547,9 @@ read_kcore(struct file *file, char __use
42419
42420 static int open_kcore(struct inode *inode, struct file *filp)
42421 {
42422 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
42423 + return -EPERM;
42424 +#endif
42425 if (!capable(CAP_SYS_RAWIO))
42426 return -EPERM;
42427 if (kcore_need_update)
42428 diff -urNp linux-2.6.32.41/fs/proc/meminfo.c linux-2.6.32.41/fs/proc/meminfo.c
42429 --- linux-2.6.32.41/fs/proc/meminfo.c 2011-03-27 14:31:47.000000000 -0400
42430 +++ linux-2.6.32.41/fs/proc/meminfo.c 2011-05-16 21:46:57.000000000 -0400
42431 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
42432 unsigned long pages[NR_LRU_LISTS];
42433 int lru;
42434
42435 + pax_track_stack();
42436 +
42437 /*
42438 * display in kilobytes.
42439 */
42440 @@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_
42441 vmi.used >> 10,
42442 vmi.largest_chunk >> 10
42443 #ifdef CONFIG_MEMORY_FAILURE
42444 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
42445 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
42446 #endif
42447 );
42448
42449 diff -urNp linux-2.6.32.41/fs/proc/nommu.c linux-2.6.32.41/fs/proc/nommu.c
42450 --- linux-2.6.32.41/fs/proc/nommu.c 2011-03-27 14:31:47.000000000 -0400
42451 +++ linux-2.6.32.41/fs/proc/nommu.c 2011-04-17 15:56:46.000000000 -0400
42452 @@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_
42453 if (len < 1)
42454 len = 1;
42455 seq_printf(m, "%*c", len, ' ');
42456 - seq_path(m, &file->f_path, "");
42457 + seq_path(m, &file->f_path, "\n\\");
42458 }
42459
42460 seq_putc(m, '\n');
42461 diff -urNp linux-2.6.32.41/fs/proc/proc_net.c linux-2.6.32.41/fs/proc/proc_net.c
42462 --- linux-2.6.32.41/fs/proc/proc_net.c 2011-03-27 14:31:47.000000000 -0400
42463 +++ linux-2.6.32.41/fs/proc/proc_net.c 2011-04-17 15:56:46.000000000 -0400
42464 @@ -104,6 +104,17 @@ static struct net *get_proc_task_net(str
42465 struct task_struct *task;
42466 struct nsproxy *ns;
42467 struct net *net = NULL;
42468 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42469 + const struct cred *cred = current_cred();
42470 +#endif
42471 +
42472 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42473 + if (cred->fsuid)
42474 + return net;
42475 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42476 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
42477 + return net;
42478 +#endif
42479
42480 rcu_read_lock();
42481 task = pid_task(proc_pid(dir), PIDTYPE_PID);
42482 diff -urNp linux-2.6.32.41/fs/proc/proc_sysctl.c linux-2.6.32.41/fs/proc/proc_sysctl.c
42483 --- linux-2.6.32.41/fs/proc/proc_sysctl.c 2011-03-27 14:31:47.000000000 -0400
42484 +++ linux-2.6.32.41/fs/proc/proc_sysctl.c 2011-04-17 15:56:46.000000000 -0400
42485 @@ -7,6 +7,8 @@
42486 #include <linux/security.h>
42487 #include "internal.h"
42488
42489 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
42490 +
42491 static const struct dentry_operations proc_sys_dentry_operations;
42492 static const struct file_operations proc_sys_file_operations;
42493 static const struct inode_operations proc_sys_inode_operations;
42494 @@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(st
42495 if (!p)
42496 goto out;
42497
42498 + if (gr_handle_sysctl(p, MAY_EXEC))
42499 + goto out;
42500 +
42501 err = ERR_PTR(-ENOMEM);
42502 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
42503 if (h)
42504 @@ -228,6 +233,9 @@ static int scan(struct ctl_table_header
42505 if (*pos < file->f_pos)
42506 continue;
42507
42508 + if (gr_handle_sysctl(table, 0))
42509 + continue;
42510 +
42511 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
42512 if (res)
42513 return res;
42514 @@ -344,6 +352,9 @@ static int proc_sys_getattr(struct vfsmo
42515 if (IS_ERR(head))
42516 return PTR_ERR(head);
42517
42518 + if (table && gr_handle_sysctl(table, MAY_EXEC))
42519 + return -ENOENT;
42520 +
42521 generic_fillattr(inode, stat);
42522 if (table)
42523 stat->mode = (stat->mode & S_IFMT) | table->mode;
42524 diff -urNp linux-2.6.32.41/fs/proc/root.c linux-2.6.32.41/fs/proc/root.c
42525 --- linux-2.6.32.41/fs/proc/root.c 2011-03-27 14:31:47.000000000 -0400
42526 +++ linux-2.6.32.41/fs/proc/root.c 2011-04-17 15:56:46.000000000 -0400
42527 @@ -134,7 +134,15 @@ void __init proc_root_init(void)
42528 #ifdef CONFIG_PROC_DEVICETREE
42529 proc_device_tree_init();
42530 #endif
42531 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
42532 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42533 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
42534 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42535 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
42536 +#endif
42537 +#else
42538 proc_mkdir("bus", NULL);
42539 +#endif
42540 proc_sys_init();
42541 }
42542
42543 diff -urNp linux-2.6.32.41/fs/proc/task_mmu.c linux-2.6.32.41/fs/proc/task_mmu.c
42544 --- linux-2.6.32.41/fs/proc/task_mmu.c 2011-03-27 14:31:47.000000000 -0400
42545 +++ linux-2.6.32.41/fs/proc/task_mmu.c 2011-04-23 13:38:09.000000000 -0400
42546 @@ -46,15 +46,26 @@ void task_mem(struct seq_file *m, struct
42547 "VmStk:\t%8lu kB\n"
42548 "VmExe:\t%8lu kB\n"
42549 "VmLib:\t%8lu kB\n"
42550 - "VmPTE:\t%8lu kB\n",
42551 - hiwater_vm << (PAGE_SHIFT-10),
42552 + "VmPTE:\t%8lu kB\n"
42553 +
42554 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
42555 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
42556 +#endif
42557 +
42558 + ,hiwater_vm << (PAGE_SHIFT-10),
42559 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
42560 mm->locked_vm << (PAGE_SHIFT-10),
42561 hiwater_rss << (PAGE_SHIFT-10),
42562 total_rss << (PAGE_SHIFT-10),
42563 data << (PAGE_SHIFT-10),
42564 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
42565 - (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
42566 + (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
42567 +
42568 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
42569 + , mm->context.user_cs_base, mm->context.user_cs_limit
42570 +#endif
42571 +
42572 + );
42573 }
42574
42575 unsigned long task_vsize(struct mm_struct *mm)
42576 @@ -175,7 +186,8 @@ static void m_stop(struct seq_file *m, v
42577 struct proc_maps_private *priv = m->private;
42578 struct vm_area_struct *vma = v;
42579
42580 - vma_stop(priv, vma);
42581 + if (!IS_ERR(vma))
42582 + vma_stop(priv, vma);
42583 if (priv->task)
42584 put_task_struct(priv->task);
42585 }
42586 @@ -199,6 +211,12 @@ static int do_maps_open(struct inode *in
42587 return ret;
42588 }
42589
42590 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42591 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
42592 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
42593 + _mm->pax_flags & MF_PAX_SEGMEXEC))
42594 +#endif
42595 +
42596 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
42597 {
42598 struct mm_struct *mm = vma->vm_mm;
42599 @@ -206,7 +224,6 @@ static void show_map_vma(struct seq_file
42600 int flags = vma->vm_flags;
42601 unsigned long ino = 0;
42602 unsigned long long pgoff = 0;
42603 - unsigned long start;
42604 dev_t dev = 0;
42605 int len;
42606
42607 @@ -217,20 +234,23 @@ static void show_map_vma(struct seq_file
42608 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
42609 }
42610
42611 - /* We don't show the stack guard page in /proc/maps */
42612 - start = vma->vm_start;
42613 - if (vma->vm_flags & VM_GROWSDOWN)
42614 - if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
42615 - start += PAGE_SIZE;
42616 -
42617 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
42618 - start,
42619 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42620 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
42621 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
42622 +#else
42623 + vma->vm_start,
42624 vma->vm_end,
42625 +#endif
42626 flags & VM_READ ? 'r' : '-',
42627 flags & VM_WRITE ? 'w' : '-',
42628 flags & VM_EXEC ? 'x' : '-',
42629 flags & VM_MAYSHARE ? 's' : 'p',
42630 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42631 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
42632 +#else
42633 pgoff,
42634 +#endif
42635 MAJOR(dev), MINOR(dev), ino, &len);
42636
42637 /*
42638 @@ -239,7 +259,7 @@ static void show_map_vma(struct seq_file
42639 */
42640 if (file) {
42641 pad_len_spaces(m, len);
42642 - seq_path(m, &file->f_path, "\n");
42643 + seq_path(m, &file->f_path, "\n\\");
42644 } else {
42645 const char *name = arch_vma_name(vma);
42646 if (!name) {
42647 @@ -247,8 +267,9 @@ static void show_map_vma(struct seq_file
42648 if (vma->vm_start <= mm->brk &&
42649 vma->vm_end >= mm->start_brk) {
42650 name = "[heap]";
42651 - } else if (vma->vm_start <= mm->start_stack &&
42652 - vma->vm_end >= mm->start_stack) {
42653 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
42654 + (vma->vm_start <= mm->start_stack &&
42655 + vma->vm_end >= mm->start_stack)) {
42656 name = "[stack]";
42657 }
42658 } else {
42659 @@ -391,9 +412,16 @@ static int show_smap(struct seq_file *m,
42660 };
42661
42662 memset(&mss, 0, sizeof mss);
42663 - mss.vma = vma;
42664 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
42665 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
42666 +
42667 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42668 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
42669 +#endif
42670 + mss.vma = vma;
42671 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
42672 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
42673 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42674 + }
42675 +#endif
42676
42677 show_map_vma(m, vma);
42678
42679 @@ -409,7 +437,11 @@ static int show_smap(struct seq_file *m,
42680 "Swap: %8lu kB\n"
42681 "KernelPageSize: %8lu kB\n"
42682 "MMUPageSize: %8lu kB\n",
42683 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42684 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
42685 +#else
42686 (vma->vm_end - vma->vm_start) >> 10,
42687 +#endif
42688 mss.resident >> 10,
42689 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
42690 mss.shared_clean >> 10,
42691 diff -urNp linux-2.6.32.41/fs/proc/task_nommu.c linux-2.6.32.41/fs/proc/task_nommu.c
42692 --- linux-2.6.32.41/fs/proc/task_nommu.c 2011-03-27 14:31:47.000000000 -0400
42693 +++ linux-2.6.32.41/fs/proc/task_nommu.c 2011-04-17 15:56:46.000000000 -0400
42694 @@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct
42695 else
42696 bytes += kobjsize(mm);
42697
42698 - if (current->fs && current->fs->users > 1)
42699 + if (current->fs && atomic_read(&current->fs->users) > 1)
42700 sbytes += kobjsize(current->fs);
42701 else
42702 bytes += kobjsize(current->fs);
42703 @@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_fil
42704 if (len < 1)
42705 len = 1;
42706 seq_printf(m, "%*c", len, ' ');
42707 - seq_path(m, &file->f_path, "");
42708 + seq_path(m, &file->f_path, "\n\\");
42709 }
42710
42711 seq_putc(m, '\n');
42712 diff -urNp linux-2.6.32.41/fs/readdir.c linux-2.6.32.41/fs/readdir.c
42713 --- linux-2.6.32.41/fs/readdir.c 2011-03-27 14:31:47.000000000 -0400
42714 +++ linux-2.6.32.41/fs/readdir.c 2011-04-17 15:56:46.000000000 -0400
42715 @@ -16,6 +16,7 @@
42716 #include <linux/security.h>
42717 #include <linux/syscalls.h>
42718 #include <linux/unistd.h>
42719 +#include <linux/namei.h>
42720
42721 #include <asm/uaccess.h>
42722
42723 @@ -67,6 +68,7 @@ struct old_linux_dirent {
42724
42725 struct readdir_callback {
42726 struct old_linux_dirent __user * dirent;
42727 + struct file * file;
42728 int result;
42729 };
42730
42731 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
42732 buf->result = -EOVERFLOW;
42733 return -EOVERFLOW;
42734 }
42735 +
42736 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
42737 + return 0;
42738 +
42739 buf->result++;
42740 dirent = buf->dirent;
42741 if (!access_ok(VERIFY_WRITE, dirent,
42742 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
42743
42744 buf.result = 0;
42745 buf.dirent = dirent;
42746 + buf.file = file;
42747
42748 error = vfs_readdir(file, fillonedir, &buf);
42749 if (buf.result)
42750 @@ -142,6 +149,7 @@ struct linux_dirent {
42751 struct getdents_callback {
42752 struct linux_dirent __user * current_dir;
42753 struct linux_dirent __user * previous;
42754 + struct file * file;
42755 int count;
42756 int error;
42757 };
42758 @@ -162,6 +170,10 @@ static int filldir(void * __buf, const c
42759 buf->error = -EOVERFLOW;
42760 return -EOVERFLOW;
42761 }
42762 +
42763 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
42764 + return 0;
42765 +
42766 dirent = buf->previous;
42767 if (dirent) {
42768 if (__put_user(offset, &dirent->d_off))
42769 @@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
42770 buf.previous = NULL;
42771 buf.count = count;
42772 buf.error = 0;
42773 + buf.file = file;
42774
42775 error = vfs_readdir(file, filldir, &buf);
42776 if (error >= 0)
42777 @@ -228,6 +241,7 @@ out:
42778 struct getdents_callback64 {
42779 struct linux_dirent64 __user * current_dir;
42780 struct linux_dirent64 __user * previous;
42781 + struct file *file;
42782 int count;
42783 int error;
42784 };
42785 @@ -242,6 +256,10 @@ static int filldir64(void * __buf, const
42786 buf->error = -EINVAL; /* only used if we fail.. */
42787 if (reclen > buf->count)
42788 return -EINVAL;
42789 +
42790 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
42791 + return 0;
42792 +
42793 dirent = buf->previous;
42794 if (dirent) {
42795 if (__put_user(offset, &dirent->d_off))
42796 @@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
42797
42798 buf.current_dir = dirent;
42799 buf.previous = NULL;
42800 + buf.file = file;
42801 buf.count = count;
42802 buf.error = 0;
42803
42804 diff -urNp linux-2.6.32.41/fs/reiserfs/dir.c linux-2.6.32.41/fs/reiserfs/dir.c
42805 --- linux-2.6.32.41/fs/reiserfs/dir.c 2011-03-27 14:31:47.000000000 -0400
42806 +++ linux-2.6.32.41/fs/reiserfs/dir.c 2011-05-16 21:46:57.000000000 -0400
42807 @@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
42808 struct reiserfs_dir_entry de;
42809 int ret = 0;
42810
42811 + pax_track_stack();
42812 +
42813 reiserfs_write_lock(inode->i_sb);
42814
42815 reiserfs_check_lock_depth(inode->i_sb, "readdir");
42816 diff -urNp linux-2.6.32.41/fs/reiserfs/do_balan.c linux-2.6.32.41/fs/reiserfs/do_balan.c
42817 --- linux-2.6.32.41/fs/reiserfs/do_balan.c 2011-03-27 14:31:47.000000000 -0400
42818 +++ linux-2.6.32.41/fs/reiserfs/do_balan.c 2011-04-17 15:56:46.000000000 -0400
42819 @@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb,
42820 return;
42821 }
42822
42823 - atomic_inc(&(fs_generation(tb->tb_sb)));
42824 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
42825 do_balance_starts(tb);
42826
42827 /* balance leaf returns 0 except if combining L R and S into
42828 diff -urNp linux-2.6.32.41/fs/reiserfs/item_ops.c linux-2.6.32.41/fs/reiserfs/item_ops.c
42829 --- linux-2.6.32.41/fs/reiserfs/item_ops.c 2011-03-27 14:31:47.000000000 -0400
42830 +++ linux-2.6.32.41/fs/reiserfs/item_ops.c 2011-04-17 15:56:46.000000000 -0400
42831 @@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_i
42832 vi->vi_index, vi->vi_type, vi->vi_ih);
42833 }
42834
42835 -static struct item_operations stat_data_ops = {
42836 +static const struct item_operations stat_data_ops = {
42837 .bytes_number = sd_bytes_number,
42838 .decrement_key = sd_decrement_key,
42839 .is_left_mergeable = sd_is_left_mergeable,
42840 @@ -196,7 +196,7 @@ static void direct_print_vi(struct virtu
42841 vi->vi_index, vi->vi_type, vi->vi_ih);
42842 }
42843
42844 -static struct item_operations direct_ops = {
42845 +static const struct item_operations direct_ops = {
42846 .bytes_number = direct_bytes_number,
42847 .decrement_key = direct_decrement_key,
42848 .is_left_mergeable = direct_is_left_mergeable,
42849 @@ -341,7 +341,7 @@ static void indirect_print_vi(struct vir
42850 vi->vi_index, vi->vi_type, vi->vi_ih);
42851 }
42852
42853 -static struct item_operations indirect_ops = {
42854 +static const struct item_operations indirect_ops = {
42855 .bytes_number = indirect_bytes_number,
42856 .decrement_key = indirect_decrement_key,
42857 .is_left_mergeable = indirect_is_left_mergeable,
42858 @@ -628,7 +628,7 @@ static void direntry_print_vi(struct vir
42859 printk("\n");
42860 }
42861
42862 -static struct item_operations direntry_ops = {
42863 +static const struct item_operations direntry_ops = {
42864 .bytes_number = direntry_bytes_number,
42865 .decrement_key = direntry_decrement_key,
42866 .is_left_mergeable = direntry_is_left_mergeable,
42867 @@ -724,7 +724,7 @@ static void errcatch_print_vi(struct vir
42868 "Invalid item type observed, run fsck ASAP");
42869 }
42870
42871 -static struct item_operations errcatch_ops = {
42872 +static const struct item_operations errcatch_ops = {
42873 errcatch_bytes_number,
42874 errcatch_decrement_key,
42875 errcatch_is_left_mergeable,
42876 @@ -746,7 +746,7 @@ static struct item_operations errcatch_o
42877 #error Item types must use disk-format assigned values.
42878 #endif
42879
42880 -struct item_operations *item_ops[TYPE_ANY + 1] = {
42881 +const struct item_operations * const item_ops[TYPE_ANY + 1] = {
42882 &stat_data_ops,
42883 &indirect_ops,
42884 &direct_ops,
42885 diff -urNp linux-2.6.32.41/fs/reiserfs/journal.c linux-2.6.32.41/fs/reiserfs/journal.c
42886 --- linux-2.6.32.41/fs/reiserfs/journal.c 2011-03-27 14:31:47.000000000 -0400
42887 +++ linux-2.6.32.41/fs/reiserfs/journal.c 2011-05-16 21:46:57.000000000 -0400
42888 @@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_brea
42889 struct buffer_head *bh;
42890 int i, j;
42891
42892 + pax_track_stack();
42893 +
42894 bh = __getblk(dev, block, bufsize);
42895 if (buffer_uptodate(bh))
42896 return (bh);
42897 diff -urNp linux-2.6.32.41/fs/reiserfs/namei.c linux-2.6.32.41/fs/reiserfs/namei.c
42898 --- linux-2.6.32.41/fs/reiserfs/namei.c 2011-03-27 14:31:47.000000000 -0400
42899 +++ linux-2.6.32.41/fs/reiserfs/namei.c 2011-05-16 21:46:57.000000000 -0400
42900 @@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode
42901 unsigned long savelink = 1;
42902 struct timespec ctime;
42903
42904 + pax_track_stack();
42905 +
42906 /* three balancings: (1) old name removal, (2) new name insertion
42907 and (3) maybe "save" link insertion
42908 stat data updates: (1) old directory,
42909 diff -urNp linux-2.6.32.41/fs/reiserfs/procfs.c linux-2.6.32.41/fs/reiserfs/procfs.c
42910 --- linux-2.6.32.41/fs/reiserfs/procfs.c 2011-03-27 14:31:47.000000000 -0400
42911 +++ linux-2.6.32.41/fs/reiserfs/procfs.c 2011-05-16 21:46:57.000000000 -0400
42912 @@ -123,7 +123,7 @@ static int show_super(struct seq_file *m
42913 "SMALL_TAILS " : "NO_TAILS ",
42914 replay_only(sb) ? "REPLAY_ONLY " : "",
42915 convert_reiserfs(sb) ? "CONV " : "",
42916 - atomic_read(&r->s_generation_counter),
42917 + atomic_read_unchecked(&r->s_generation_counter),
42918 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
42919 SF(s_do_balance), SF(s_unneeded_left_neighbor),
42920 SF(s_good_search_by_key_reada), SF(s_bmaps),
42921 @@ -309,6 +309,8 @@ static int show_journal(struct seq_file
42922 struct journal_params *jp = &rs->s_v1.s_journal;
42923 char b[BDEVNAME_SIZE];
42924
42925 + pax_track_stack();
42926 +
42927 seq_printf(m, /* on-disk fields */
42928 "jp_journal_1st_block: \t%i\n"
42929 "jp_journal_dev: \t%s[%x]\n"
42930 diff -urNp linux-2.6.32.41/fs/reiserfs/stree.c linux-2.6.32.41/fs/reiserfs/stree.c
42931 --- linux-2.6.32.41/fs/reiserfs/stree.c 2011-03-27 14:31:47.000000000 -0400
42932 +++ linux-2.6.32.41/fs/reiserfs/stree.c 2011-05-16 21:46:57.000000000 -0400
42933 @@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs
42934 int iter = 0;
42935 #endif
42936
42937 + pax_track_stack();
42938 +
42939 BUG_ON(!th->t_trans_id);
42940
42941 init_tb_struct(th, &s_del_balance, sb, path,
42942 @@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct r
42943 int retval;
42944 int quota_cut_bytes = 0;
42945
42946 + pax_track_stack();
42947 +
42948 BUG_ON(!th->t_trans_id);
42949
42950 le_key2cpu_key(&cpu_key, key);
42951 @@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiser
42952 int quota_cut_bytes;
42953 loff_t tail_pos = 0;
42954
42955 + pax_track_stack();
42956 +
42957 BUG_ON(!th->t_trans_id);
42958
42959 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
42960 @@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reis
42961 int retval;
42962 int fs_gen;
42963
42964 + pax_track_stack();
42965 +
42966 BUG_ON(!th->t_trans_id);
42967
42968 fs_gen = get_generation(inode->i_sb);
42969 @@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs
42970 int fs_gen = 0;
42971 int quota_bytes = 0;
42972
42973 + pax_track_stack();
42974 +
42975 BUG_ON(!th->t_trans_id);
42976
42977 if (inode) { /* Do we count quotas for item? */
42978 diff -urNp linux-2.6.32.41/fs/reiserfs/super.c linux-2.6.32.41/fs/reiserfs/super.c
42979 --- linux-2.6.32.41/fs/reiserfs/super.c 2011-03-27 14:31:47.000000000 -0400
42980 +++ linux-2.6.32.41/fs/reiserfs/super.c 2011-05-16 21:46:57.000000000 -0400
42981 @@ -912,6 +912,8 @@ static int reiserfs_parse_options(struct
42982 {.option_name = NULL}
42983 };
42984
42985 + pax_track_stack();
42986 +
42987 *blocks = 0;
42988 if (!options || !*options)
42989 /* use default configuration: create tails, journaling on, no
42990 diff -urNp linux-2.6.32.41/fs/select.c linux-2.6.32.41/fs/select.c
42991 --- linux-2.6.32.41/fs/select.c 2011-03-27 14:31:47.000000000 -0400
42992 +++ linux-2.6.32.41/fs/select.c 2011-05-16 21:46:57.000000000 -0400
42993 @@ -20,6 +20,7 @@
42994 #include <linux/module.h>
42995 #include <linux/slab.h>
42996 #include <linux/poll.h>
42997 +#include <linux/security.h>
42998 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
42999 #include <linux/file.h>
43000 #include <linux/fdtable.h>
43001 @@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, s
43002 int retval, i, timed_out = 0;
43003 unsigned long slack = 0;
43004
43005 + pax_track_stack();
43006 +
43007 rcu_read_lock();
43008 retval = max_select_fd(n, fds);
43009 rcu_read_unlock();
43010 @@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user
43011 /* Allocate small arguments on the stack to save memory and be faster */
43012 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
43013
43014 + pax_track_stack();
43015 +
43016 ret = -EINVAL;
43017 if (n < 0)
43018 goto out_nofds;
43019 @@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *uf
43020 struct poll_list *walk = head;
43021 unsigned long todo = nfds;
43022
43023 + pax_track_stack();
43024 +
43025 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
43026 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
43027 return -EINVAL;
43028
43029 diff -urNp linux-2.6.32.41/fs/seq_file.c linux-2.6.32.41/fs/seq_file.c
43030 --- linux-2.6.32.41/fs/seq_file.c 2011-03-27 14:31:47.000000000 -0400
43031 +++ linux-2.6.32.41/fs/seq_file.c 2011-04-17 15:56:46.000000000 -0400
43032 @@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
43033 return 0;
43034 }
43035 if (!m->buf) {
43036 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
43037 + m->size = PAGE_SIZE;
43038 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
43039 if (!m->buf)
43040 return -ENOMEM;
43041 }
43042 @@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
43043 Eoverflow:
43044 m->op->stop(m, p);
43045 kfree(m->buf);
43046 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
43047 + m->size <<= 1;
43048 + m->buf = kmalloc(m->size, GFP_KERNEL);
43049 return !m->buf ? -ENOMEM : -EAGAIN;
43050 }
43051
43052 @@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
43053 m->version = file->f_version;
43054 /* grab buffer if we didn't have one */
43055 if (!m->buf) {
43056 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
43057 + m->size = PAGE_SIZE;
43058 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
43059 if (!m->buf)
43060 goto Enomem;
43061 }
43062 @@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
43063 goto Fill;
43064 m->op->stop(m, p);
43065 kfree(m->buf);
43066 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
43067 + m->size <<= 1;
43068 + m->buf = kmalloc(m->size, GFP_KERNEL);
43069 if (!m->buf)
43070 goto Enomem;
43071 m->count = 0;
43072 diff -urNp linux-2.6.32.41/fs/smbfs/symlink.c linux-2.6.32.41/fs/smbfs/symlink.c
43073 --- linux-2.6.32.41/fs/smbfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
43074 +++ linux-2.6.32.41/fs/smbfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
43075 @@ -55,7 +55,7 @@ static void *smb_follow_link(struct dent
43076
43077 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
43078 {
43079 - char *s = nd_get_link(nd);
43080 + const char *s = nd_get_link(nd);
43081 if (!IS_ERR(s))
43082 __putname(s);
43083 }
43084 diff -urNp linux-2.6.32.41/fs/splice.c linux-2.6.32.41/fs/splice.c
43085 --- linux-2.6.32.41/fs/splice.c 2011-03-27 14:31:47.000000000 -0400
43086 +++ linux-2.6.32.41/fs/splice.c 2011-05-16 21:46:57.000000000 -0400
43087 @@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode
43088 pipe_lock(pipe);
43089
43090 for (;;) {
43091 - if (!pipe->readers) {
43092 + if (!atomic_read(&pipe->readers)) {
43093 send_sig(SIGPIPE, current, 0);
43094 if (!ret)
43095 ret = -EPIPE;
43096 @@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode
43097 do_wakeup = 0;
43098 }
43099
43100 - pipe->waiting_writers++;
43101 + atomic_inc(&pipe->waiting_writers);
43102 pipe_wait(pipe);
43103 - pipe->waiting_writers--;
43104 + atomic_dec(&pipe->waiting_writers);
43105 }
43106
43107 pipe_unlock(pipe);
43108 @@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *
43109 .spd_release = spd_release_page,
43110 };
43111
43112 + pax_track_stack();
43113 +
43114 index = *ppos >> PAGE_CACHE_SHIFT;
43115 loff = *ppos & ~PAGE_CACHE_MASK;
43116 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
43117 @@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file
43118 old_fs = get_fs();
43119 set_fs(get_ds());
43120 /* The cast to a user pointer is valid due to the set_fs() */
43121 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
43122 + res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
43123 set_fs(old_fs);
43124
43125 return res;
43126 @@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file
43127 old_fs = get_fs();
43128 set_fs(get_ds());
43129 /* The cast to a user pointer is valid due to the set_fs() */
43130 - res = vfs_write(file, (const char __user *)buf, count, &pos);
43131 + res = vfs_write(file, (__force const char __user *)buf, count, &pos);
43132 set_fs(old_fs);
43133
43134 return res;
43135 @@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct
43136 .spd_release = spd_release_page,
43137 };
43138
43139 + pax_track_stack();
43140 +
43141 index = *ppos >> PAGE_CACHE_SHIFT;
43142 offset = *ppos & ~PAGE_CACHE_MASK;
43143 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
43144 @@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct
43145 goto err;
43146
43147 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
43148 - vec[i].iov_base = (void __user *) page_address(page);
43149 + vec[i].iov_base = (__force void __user *) page_address(page);
43150 vec[i].iov_len = this_len;
43151 pages[i] = page;
43152 spd.nr_pages++;
43153 @@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
43154 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
43155 {
43156 while (!pipe->nrbufs) {
43157 - if (!pipe->writers)
43158 + if (!atomic_read(&pipe->writers))
43159 return 0;
43160
43161 - if (!pipe->waiting_writers && sd->num_spliced)
43162 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
43163 return 0;
43164
43165 if (sd->flags & SPLICE_F_NONBLOCK)
43166 @@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct fi
43167 * out of the pipe right after the splice_to_pipe(). So set
43168 * PIPE_READERS appropriately.
43169 */
43170 - pipe->readers = 1;
43171 + atomic_set(&pipe->readers, 1);
43172
43173 current->splice_pipe = pipe;
43174 }
43175 @@ -1592,6 +1596,8 @@ static long vmsplice_to_pipe(struct file
43176 .spd_release = spd_release_page,
43177 };
43178
43179 + pax_track_stack();
43180 +
43181 pipe = pipe_info(file->f_path.dentry->d_inode);
43182 if (!pipe)
43183 return -EBADF;
43184 @@ -1700,9 +1706,9 @@ static int ipipe_prep(struct pipe_inode_
43185 ret = -ERESTARTSYS;
43186 break;
43187 }
43188 - if (!pipe->writers)
43189 + if (!atomic_read(&pipe->writers))
43190 break;
43191 - if (!pipe->waiting_writers) {
43192 + if (!atomic_read(&pipe->waiting_writers)) {
43193 if (flags & SPLICE_F_NONBLOCK) {
43194 ret = -EAGAIN;
43195 break;
43196 @@ -1734,7 +1740,7 @@ static int opipe_prep(struct pipe_inode_
43197 pipe_lock(pipe);
43198
43199 while (pipe->nrbufs >= PIPE_BUFFERS) {
43200 - if (!pipe->readers) {
43201 + if (!atomic_read(&pipe->readers)) {
43202 send_sig(SIGPIPE, current, 0);
43203 ret = -EPIPE;
43204 break;
43205 @@ -1747,9 +1753,9 @@ static int opipe_prep(struct pipe_inode_
43206 ret = -ERESTARTSYS;
43207 break;
43208 }
43209 - pipe->waiting_writers++;
43210 + atomic_inc(&pipe->waiting_writers);
43211 pipe_wait(pipe);
43212 - pipe->waiting_writers--;
43213 + atomic_dec(&pipe->waiting_writers);
43214 }
43215
43216 pipe_unlock(pipe);
43217 @@ -1785,14 +1791,14 @@ retry:
43218 pipe_double_lock(ipipe, opipe);
43219
43220 do {
43221 - if (!opipe->readers) {
43222 + if (!atomic_read(&opipe->readers)) {
43223 send_sig(SIGPIPE, current, 0);
43224 if (!ret)
43225 ret = -EPIPE;
43226 break;
43227 }
43228
43229 - if (!ipipe->nrbufs && !ipipe->writers)
43230 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
43231 break;
43232
43233 /*
43234 @@ -1892,7 +1898,7 @@ static int link_pipe(struct pipe_inode_i
43235 pipe_double_lock(ipipe, opipe);
43236
43237 do {
43238 - if (!opipe->readers) {
43239 + if (!atomic_read(&opipe->readers)) {
43240 send_sig(SIGPIPE, current, 0);
43241 if (!ret)
43242 ret = -EPIPE;
43243 @@ -1937,7 +1943,7 @@ static int link_pipe(struct pipe_inode_i
43244 * return EAGAIN if we have the potential of some data in the
43245 * future, otherwise just return 0
43246 */
43247 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
43248 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
43249 ret = -EAGAIN;
43250
43251 pipe_unlock(ipipe);
43252 diff -urNp linux-2.6.32.41/fs/sysfs/file.c linux-2.6.32.41/fs/sysfs/file.c
43253 --- linux-2.6.32.41/fs/sysfs/file.c 2011-03-27 14:31:47.000000000 -0400
43254 +++ linux-2.6.32.41/fs/sysfs/file.c 2011-05-04 17:56:20.000000000 -0400
43255 @@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
43256
43257 struct sysfs_open_dirent {
43258 atomic_t refcnt;
43259 - atomic_t event;
43260 + atomic_unchecked_t event;
43261 wait_queue_head_t poll;
43262 struct list_head buffers; /* goes through sysfs_buffer.list */
43263 };
43264 @@ -53,7 +53,7 @@ struct sysfs_buffer {
43265 size_t count;
43266 loff_t pos;
43267 char * page;
43268 - struct sysfs_ops * ops;
43269 + const struct sysfs_ops * ops;
43270 struct mutex mutex;
43271 int needs_read_fill;
43272 int event;
43273 @@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentr
43274 {
43275 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
43276 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
43277 - struct sysfs_ops * ops = buffer->ops;
43278 + const struct sysfs_ops * ops = buffer->ops;
43279 int ret = 0;
43280 ssize_t count;
43281
43282 @@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentr
43283 if (!sysfs_get_active_two(attr_sd))
43284 return -ENODEV;
43285
43286 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
43287 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
43288 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
43289
43290 sysfs_put_active_two(attr_sd);
43291 @@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentr
43292 {
43293 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
43294 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
43295 - struct sysfs_ops * ops = buffer->ops;
43296 + const struct sysfs_ops * ops = buffer->ops;
43297 int rc;
43298
43299 /* need attr_sd for attr and ops, its parent for kobj */
43300 @@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct
43301 return -ENOMEM;
43302
43303 atomic_set(&new_od->refcnt, 0);
43304 - atomic_set(&new_od->event, 1);
43305 + atomic_set_unchecked(&new_od->event, 1);
43306 init_waitqueue_head(&new_od->poll);
43307 INIT_LIST_HEAD(&new_od->buffers);
43308 goto retry;
43309 @@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode
43310 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
43311 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
43312 struct sysfs_buffer *buffer;
43313 - struct sysfs_ops *ops;
43314 + const struct sysfs_ops *ops;
43315 int error = -EACCES;
43316 char *p;
43317
43318 @@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct fi
43319
43320 sysfs_put_active_two(attr_sd);
43321
43322 - if (buffer->event != atomic_read(&od->event))
43323 + if (buffer->event != atomic_read_unchecked(&od->event))
43324 goto trigger;
43325
43326 return DEFAULT_POLLMASK;
43327 @@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_di
43328
43329 od = sd->s_attr.open;
43330 if (od) {
43331 - atomic_inc(&od->event);
43332 + atomic_inc_unchecked(&od->event);
43333 wake_up_interruptible(&od->poll);
43334 }
43335
43336 diff -urNp linux-2.6.32.41/fs/sysfs/mount.c linux-2.6.32.41/fs/sysfs/mount.c
43337 --- linux-2.6.32.41/fs/sysfs/mount.c 2011-03-27 14:31:47.000000000 -0400
43338 +++ linux-2.6.32.41/fs/sysfs/mount.c 2011-04-17 15:56:46.000000000 -0400
43339 @@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
43340 .s_name = "",
43341 .s_count = ATOMIC_INIT(1),
43342 .s_flags = SYSFS_DIR,
43343 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
43344 + .s_mode = S_IFDIR | S_IRWXU,
43345 +#else
43346 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
43347 +#endif
43348 .s_ino = 1,
43349 };
43350
43351 diff -urNp linux-2.6.32.41/fs/sysfs/symlink.c linux-2.6.32.41/fs/sysfs/symlink.c
43352 --- linux-2.6.32.41/fs/sysfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
43353 +++ linux-2.6.32.41/fs/sysfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
43354 @@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct de
43355
43356 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
43357 {
43358 - char *page = nd_get_link(nd);
43359 + const char *page = nd_get_link(nd);
43360 if (!IS_ERR(page))
43361 free_page((unsigned long)page);
43362 }
43363 diff -urNp linux-2.6.32.41/fs/udf/balloc.c linux-2.6.32.41/fs/udf/balloc.c
43364 --- linux-2.6.32.41/fs/udf/balloc.c 2011-03-27 14:31:47.000000000 -0400
43365 +++ linux-2.6.32.41/fs/udf/balloc.c 2011-04-17 15:56:46.000000000 -0400
43366 @@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struc
43367
43368 mutex_lock(&sbi->s_alloc_mutex);
43369 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
43370 - if (bloc->logicalBlockNum < 0 ||
43371 - (bloc->logicalBlockNum + count) >
43372 - partmap->s_partition_len) {
43373 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
43374 udf_debug("%d < %d || %d + %d > %d\n",
43375 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
43376 count, partmap->s_partition_len);
43377 @@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct
43378
43379 mutex_lock(&sbi->s_alloc_mutex);
43380 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
43381 - if (bloc->logicalBlockNum < 0 ||
43382 - (bloc->logicalBlockNum + count) >
43383 - partmap->s_partition_len) {
43384 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
43385 udf_debug("%d < %d || %d + %d > %d\n",
43386 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
43387 partmap->s_partition_len);
43388 diff -urNp linux-2.6.32.41/fs/udf/inode.c linux-2.6.32.41/fs/udf/inode.c
43389 --- linux-2.6.32.41/fs/udf/inode.c 2011-03-27 14:31:47.000000000 -0400
43390 +++ linux-2.6.32.41/fs/udf/inode.c 2011-05-16 21:46:57.000000000 -0400
43391 @@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(
43392 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
43393 int lastblock = 0;
43394
43395 + pax_track_stack();
43396 +
43397 prev_epos.offset = udf_file_entry_alloc_offset(inode);
43398 prev_epos.block = iinfo->i_location;
43399 prev_epos.bh = NULL;
43400 diff -urNp linux-2.6.32.41/fs/udf/misc.c linux-2.6.32.41/fs/udf/misc.c
43401 --- linux-2.6.32.41/fs/udf/misc.c 2011-03-27 14:31:47.000000000 -0400
43402 +++ linux-2.6.32.41/fs/udf/misc.c 2011-04-23 12:56:11.000000000 -0400
43403 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
43404
43405 u8 udf_tag_checksum(const struct tag *t)
43406 {
43407 - u8 *data = (u8 *)t;
43408 + const u8 *data = (const u8 *)t;
43409 u8 checksum = 0;
43410 int i;
43411 for (i = 0; i < sizeof(struct tag); ++i)
43412 diff -urNp linux-2.6.32.41/fs/utimes.c linux-2.6.32.41/fs/utimes.c
43413 --- linux-2.6.32.41/fs/utimes.c 2011-03-27 14:31:47.000000000 -0400
43414 +++ linux-2.6.32.41/fs/utimes.c 2011-04-17 15:56:46.000000000 -0400
43415 @@ -1,6 +1,7 @@
43416 #include <linux/compiler.h>
43417 #include <linux/file.h>
43418 #include <linux/fs.h>
43419 +#include <linux/security.h>
43420 #include <linux/linkage.h>
43421 #include <linux/mount.h>
43422 #include <linux/namei.h>
43423 @@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
43424 goto mnt_drop_write_and_out;
43425 }
43426 }
43427 +
43428 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
43429 + error = -EACCES;
43430 + goto mnt_drop_write_and_out;
43431 + }
43432 +
43433 mutex_lock(&inode->i_mutex);
43434 error = notify_change(path->dentry, &newattrs);
43435 mutex_unlock(&inode->i_mutex);
43436 diff -urNp linux-2.6.32.41/fs/xattr_acl.c linux-2.6.32.41/fs/xattr_acl.c
43437 --- linux-2.6.32.41/fs/xattr_acl.c 2011-03-27 14:31:47.000000000 -0400
43438 +++ linux-2.6.32.41/fs/xattr_acl.c 2011-04-17 15:56:46.000000000 -0400
43439 @@ -17,8 +17,8 @@
43440 struct posix_acl *
43441 posix_acl_from_xattr(const void *value, size_t size)
43442 {
43443 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
43444 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
43445 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
43446 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
43447 int count;
43448 struct posix_acl *acl;
43449 struct posix_acl_entry *acl_e;
43450 diff -urNp linux-2.6.32.41/fs/xattr.c linux-2.6.32.41/fs/xattr.c
43451 --- linux-2.6.32.41/fs/xattr.c 2011-03-27 14:31:47.000000000 -0400
43452 +++ linux-2.6.32.41/fs/xattr.c 2011-04-17 15:56:46.000000000 -0400
43453 @@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
43454 * Extended attribute SET operations
43455 */
43456 static long
43457 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
43458 +setxattr(struct path *path, const char __user *name, const void __user *value,
43459 size_t size, int flags)
43460 {
43461 int error;
43462 @@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __
43463 return PTR_ERR(kvalue);
43464 }
43465
43466 - error = vfs_setxattr(d, kname, kvalue, size, flags);
43467 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
43468 + error = -EACCES;
43469 + goto out;
43470 + }
43471 +
43472 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
43473 +out:
43474 kfree(kvalue);
43475 return error;
43476 }
43477 @@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
43478 return error;
43479 error = mnt_want_write(path.mnt);
43480 if (!error) {
43481 - error = setxattr(path.dentry, name, value, size, flags);
43482 + error = setxattr(&path, name, value, size, flags);
43483 mnt_drop_write(path.mnt);
43484 }
43485 path_put(&path);
43486 @@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
43487 return error;
43488 error = mnt_want_write(path.mnt);
43489 if (!error) {
43490 - error = setxattr(path.dentry, name, value, size, flags);
43491 + error = setxattr(&path, name, value, size, flags);
43492 mnt_drop_write(path.mnt);
43493 }
43494 path_put(&path);
43495 @@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
43496 const void __user *,value, size_t, size, int, flags)
43497 {
43498 struct file *f;
43499 - struct dentry *dentry;
43500 int error = -EBADF;
43501
43502 f = fget(fd);
43503 if (!f)
43504 return error;
43505 - dentry = f->f_path.dentry;
43506 - audit_inode(NULL, dentry);
43507 + audit_inode(NULL, f->f_path.dentry);
43508 error = mnt_want_write_file(f);
43509 if (!error) {
43510 - error = setxattr(dentry, name, value, size, flags);
43511 + error = setxattr(&f->f_path, name, value, size, flags);
43512 mnt_drop_write(f->f_path.mnt);
43513 }
43514 fput(f);
43515 diff -urNp linux-2.6.32.41/fs/xfs/linux-2.6/xfs_ioctl32.c linux-2.6.32.41/fs/xfs/linux-2.6/xfs_ioctl32.c
43516 --- linux-2.6.32.41/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-03-27 14:31:47.000000000 -0400
43517 +++ linux-2.6.32.41/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-04-17 15:56:46.000000000 -0400
43518 @@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
43519 xfs_fsop_geom_t fsgeo;
43520 int error;
43521
43522 + memset(&fsgeo, 0, sizeof(fsgeo));
43523 error = xfs_fs_geometry(mp, &fsgeo, 3);
43524 if (error)
43525 return -error;
43526 diff -urNp linux-2.6.32.41/fs/xfs/linux-2.6/xfs_ioctl.c linux-2.6.32.41/fs/xfs/linux-2.6/xfs_ioctl.c
43527 --- linux-2.6.32.41/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 17:00:52.000000000 -0400
43528 +++ linux-2.6.32.41/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 20:07:09.000000000 -0400
43529 @@ -134,7 +134,7 @@ xfs_find_handle(
43530 }
43531
43532 error = -EFAULT;
43533 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
43534 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
43535 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
43536 goto out_put;
43537
43538 @@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
43539 if (IS_ERR(dentry))
43540 return PTR_ERR(dentry);
43541
43542 - kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
43543 + kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
43544 if (!kbuf)
43545 goto out_dput;
43546
43547 @@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
43548 xfs_mount_t *mp,
43549 void __user *arg)
43550 {
43551 - xfs_fsop_geom_t fsgeo;
43552 + xfs_fsop_geom_t fsgeo;
43553 int error;
43554
43555 error = xfs_fs_geometry(mp, &fsgeo, 3);
43556 diff -urNp linux-2.6.32.41/fs/xfs/linux-2.6/xfs_iops.c linux-2.6.32.41/fs/xfs/linux-2.6/xfs_iops.c
43557 --- linux-2.6.32.41/fs/xfs/linux-2.6/xfs_iops.c 2011-03-27 14:31:47.000000000 -0400
43558 +++ linux-2.6.32.41/fs/xfs/linux-2.6/xfs_iops.c 2011-04-17 15:56:46.000000000 -0400
43559 @@ -468,7 +468,7 @@ xfs_vn_put_link(
43560 struct nameidata *nd,
43561 void *p)
43562 {
43563 - char *s = nd_get_link(nd);
43564 + const char *s = nd_get_link(nd);
43565
43566 if (!IS_ERR(s))
43567 kfree(s);
43568 diff -urNp linux-2.6.32.41/fs/xfs/xfs_bmap.c linux-2.6.32.41/fs/xfs/xfs_bmap.c
43569 --- linux-2.6.32.41/fs/xfs/xfs_bmap.c 2011-03-27 14:31:47.000000000 -0400
43570 +++ linux-2.6.32.41/fs/xfs/xfs_bmap.c 2011-04-17 15:56:46.000000000 -0400
43571 @@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
43572 int nmap,
43573 int ret_nmap);
43574 #else
43575 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
43576 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
43577 #endif /* DEBUG */
43578
43579 #if defined(XFS_RW_TRACE)
43580 diff -urNp linux-2.6.32.41/fs/xfs/xfs_dir2_sf.c linux-2.6.32.41/fs/xfs/xfs_dir2_sf.c
43581 --- linux-2.6.32.41/fs/xfs/xfs_dir2_sf.c 2011-03-27 14:31:47.000000000 -0400
43582 +++ linux-2.6.32.41/fs/xfs/xfs_dir2_sf.c 2011-04-18 22:07:30.000000000 -0400
43583 @@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
43584 }
43585
43586 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
43587 - if (filldir(dirent, sfep->name, sfep->namelen,
43588 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
43589 + char name[sfep->namelen];
43590 + memcpy(name, sfep->name, sfep->namelen);
43591 + if (filldir(dirent, name, sfep->namelen,
43592 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
43593 + *offset = off & 0x7fffffff;
43594 + return 0;
43595 + }
43596 + } else if (filldir(dirent, sfep->name, sfep->namelen,
43597 off & 0x7fffffff, ino, DT_UNKNOWN)) {
43598 *offset = off & 0x7fffffff;
43599 return 0;
43600 diff -urNp linux-2.6.32.41/grsecurity/gracl_alloc.c linux-2.6.32.41/grsecurity/gracl_alloc.c
43601 --- linux-2.6.32.41/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
43602 +++ linux-2.6.32.41/grsecurity/gracl_alloc.c 2011-04-17 15:56:46.000000000 -0400
43603 @@ -0,0 +1,105 @@
43604 +#include <linux/kernel.h>
43605 +#include <linux/mm.h>
43606 +#include <linux/slab.h>
43607 +#include <linux/vmalloc.h>
43608 +#include <linux/gracl.h>
43609 +#include <linux/grsecurity.h>
43610 +
43611 +static unsigned long alloc_stack_next = 1;
43612 +static unsigned long alloc_stack_size = 1;
43613 +static void **alloc_stack;
43614 +
43615 +static __inline__ int
43616 +alloc_pop(void)
43617 +{
43618 + if (alloc_stack_next == 1)
43619 + return 0;
43620 +
43621 + kfree(alloc_stack[alloc_stack_next - 2]);
43622 +
43623 + alloc_stack_next--;
43624 +
43625 + return 1;
43626 +}
43627 +
43628 +static __inline__ int
43629 +alloc_push(void *buf)
43630 +{
43631 + if (alloc_stack_next >= alloc_stack_size)
43632 + return 1;
43633 +
43634 + alloc_stack[alloc_stack_next - 1] = buf;
43635 +
43636 + alloc_stack_next++;
43637 +
43638 + return 0;
43639 +}
43640 +
43641 +void *
43642 +acl_alloc(unsigned long len)
43643 +{
43644 + void *ret = NULL;
43645 +
43646 + if (!len || len > PAGE_SIZE)
43647 + goto out;
43648 +
43649 + ret = kmalloc(len, GFP_KERNEL);
43650 +
43651 + if (ret) {
43652 + if (alloc_push(ret)) {
43653 + kfree(ret);
43654 + ret = NULL;
43655 + }
43656 + }
43657 +
43658 +out:
43659 + return ret;
43660 +}
43661 +
43662 +void *
43663 +acl_alloc_num(unsigned long num, unsigned long len)
43664 +{
43665 + if (!len || (num > (PAGE_SIZE / len)))
43666 + return NULL;
43667 +
43668 + return acl_alloc(num * len);
43669 +}
43670 +
43671 +void
43672 +acl_free_all(void)
43673 +{
43674 + if (gr_acl_is_enabled() || !alloc_stack)
43675 + return;
43676 +
43677 + while (alloc_pop()) ;
43678 +
43679 + if (alloc_stack) {
43680 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
43681 + kfree(alloc_stack);
43682 + else
43683 + vfree(alloc_stack);
43684 + }
43685 +
43686 + alloc_stack = NULL;
43687 + alloc_stack_size = 1;
43688 + alloc_stack_next = 1;
43689 +
43690 + return;
43691 +}
43692 +
43693 +int
43694 +acl_alloc_stack_init(unsigned long size)
43695 +{
43696 + if ((size * sizeof (void *)) <= PAGE_SIZE)
43697 + alloc_stack =
43698 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
43699 + else
43700 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
43701 +
43702 + alloc_stack_size = size;
43703 +
43704 + if (!alloc_stack)
43705 + return 0;
43706 + else
43707 + return 1;
43708 +}
43709 diff -urNp linux-2.6.32.41/grsecurity/gracl.c linux-2.6.32.41/grsecurity/gracl.c
43710 --- linux-2.6.32.41/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
43711 +++ linux-2.6.32.41/grsecurity/gracl.c 2011-06-11 16:24:26.000000000 -0400
43712 @@ -0,0 +1,4085 @@
43713 +#include <linux/kernel.h>
43714 +#include <linux/module.h>
43715 +#include <linux/sched.h>
43716 +#include <linux/mm.h>
43717 +#include <linux/file.h>
43718 +#include <linux/fs.h>
43719 +#include <linux/namei.h>
43720 +#include <linux/mount.h>
43721 +#include <linux/tty.h>
43722 +#include <linux/proc_fs.h>
43723 +#include <linux/smp_lock.h>
43724 +#include <linux/slab.h>
43725 +#include <linux/vmalloc.h>
43726 +#include <linux/types.h>
43727 +#include <linux/sysctl.h>
43728 +#include <linux/netdevice.h>
43729 +#include <linux/ptrace.h>
43730 +#include <linux/gracl.h>
43731 +#include <linux/gralloc.h>
43732 +#include <linux/grsecurity.h>
43733 +#include <linux/grinternal.h>
43734 +#include <linux/pid_namespace.h>
43735 +#include <linux/fdtable.h>
43736 +#include <linux/percpu.h>
43737 +
43738 +#include <asm/uaccess.h>
43739 +#include <asm/errno.h>
43740 +#include <asm/mman.h>
43741 +
43742 +static struct acl_role_db acl_role_set;
43743 +static struct name_db name_set;
43744 +static struct inodev_db inodev_set;
43745 +
43746 +/* for keeping track of userspace pointers used for subjects, so we
43747 + can share references in the kernel as well
43748 +*/
43749 +
43750 +static struct dentry *real_root;
43751 +static struct vfsmount *real_root_mnt;
43752 +
43753 +static struct acl_subj_map_db subj_map_set;
43754 +
43755 +static struct acl_role_label *default_role;
43756 +
43757 +static struct acl_role_label *role_list;
43758 +
43759 +static u16 acl_sp_role_value;
43760 +
43761 +extern char *gr_shared_page[4];
43762 +static DEFINE_MUTEX(gr_dev_mutex);
43763 +DEFINE_RWLOCK(gr_inode_lock);
43764 +
43765 +struct gr_arg *gr_usermode;
43766 +
43767 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
43768 +
43769 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
43770 +extern void gr_clear_learn_entries(void);
43771 +
43772 +#ifdef CONFIG_GRKERNSEC_RESLOG
43773 +extern void gr_log_resource(const struct task_struct *task,
43774 + const int res, const unsigned long wanted, const int gt);
43775 +#endif
43776 +
43777 +unsigned char *gr_system_salt;
43778 +unsigned char *gr_system_sum;
43779 +
43780 +static struct sprole_pw **acl_special_roles = NULL;
43781 +static __u16 num_sprole_pws = 0;
43782 +
43783 +static struct acl_role_label *kernel_role = NULL;
43784 +
43785 +static unsigned int gr_auth_attempts = 0;
43786 +static unsigned long gr_auth_expires = 0UL;
43787 +
43788 +#ifdef CONFIG_NET
43789 +extern struct vfsmount *sock_mnt;
43790 +#endif
43791 +extern struct vfsmount *pipe_mnt;
43792 +extern struct vfsmount *shm_mnt;
43793 +#ifdef CONFIG_HUGETLBFS
43794 +extern struct vfsmount *hugetlbfs_vfsmount;
43795 +#endif
43796 +
43797 +static struct acl_object_label *fakefs_obj_rw;
43798 +static struct acl_object_label *fakefs_obj_rwx;
43799 +
43800 +extern int gr_init_uidset(void);
43801 +extern void gr_free_uidset(void);
43802 +extern void gr_remove_uid(uid_t uid);
43803 +extern int gr_find_uid(uid_t uid);
43804 +
43805 +__inline__ int
43806 +gr_acl_is_enabled(void)
43807 +{
43808 + return (gr_status & GR_READY);
43809 +}
43810 +
43811 +#ifdef CONFIG_BTRFS_FS
43812 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
43813 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
43814 +#endif
43815 +
43816 +static inline dev_t __get_dev(const struct dentry *dentry)
43817 +{
43818 +#ifdef CONFIG_BTRFS_FS
43819 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
43820 + return get_btrfs_dev_from_inode(dentry->d_inode);
43821 + else
43822 +#endif
43823 + return dentry->d_inode->i_sb->s_dev;
43824 +}
43825 +
43826 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
43827 +{
43828 + return __get_dev(dentry);
43829 +}
43830 +
43831 +static char gr_task_roletype_to_char(struct task_struct *task)
43832 +{
43833 + switch (task->role->roletype &
43834 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
43835 + GR_ROLE_SPECIAL)) {
43836 + case GR_ROLE_DEFAULT:
43837 + return 'D';
43838 + case GR_ROLE_USER:
43839 + return 'U';
43840 + case GR_ROLE_GROUP:
43841 + return 'G';
43842 + case GR_ROLE_SPECIAL:
43843 + return 'S';
43844 + }
43845 +
43846 + return 'X';
43847 +}
43848 +
43849 +char gr_roletype_to_char(void)
43850 +{
43851 + return gr_task_roletype_to_char(current);
43852 +}
43853 +
43854 +__inline__ int
43855 +gr_acl_tpe_check(void)
43856 +{
43857 + if (unlikely(!(gr_status & GR_READY)))
43858 + return 0;
43859 + if (current->role->roletype & GR_ROLE_TPE)
43860 + return 1;
43861 + else
43862 + return 0;
43863 +}
43864 +
43865 +int
43866 +gr_handle_rawio(const struct inode *inode)
43867 +{
43868 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
43869 + if (inode && S_ISBLK(inode->i_mode) &&
43870 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
43871 + !capable(CAP_SYS_RAWIO))
43872 + return 1;
43873 +#endif
43874 + return 0;
43875 +}
43876 +
43877 +static int
43878 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
43879 +{
43880 + if (likely(lena != lenb))
43881 + return 0;
43882 +
43883 + return !memcmp(a, b, lena);
43884 +}
43885 +
43886 +/* this must be called with vfsmount_lock and dcache_lock held */
43887 +
43888 +static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
43889 + struct dentry *root, struct vfsmount *rootmnt,
43890 + char *buffer, int buflen)
43891 +{
43892 + char * end = buffer+buflen;
43893 + char * retval;
43894 + int namelen;
43895 +
43896 + *--end = '\0';
43897 + buflen--;
43898 +
43899 + if (buflen < 1)
43900 + goto Elong;
43901 + /* Get '/' right */
43902 + retval = end-1;
43903 + *retval = '/';
43904 +
43905 + for (;;) {
43906 + struct dentry * parent;
43907 +
43908 + if (dentry == root && vfsmnt == rootmnt)
43909 + break;
43910 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
43911 + /* Global root? */
43912 + if (vfsmnt->mnt_parent == vfsmnt)
43913 + goto global_root;
43914 + dentry = vfsmnt->mnt_mountpoint;
43915 + vfsmnt = vfsmnt->mnt_parent;
43916 + continue;
43917 + }
43918 + parent = dentry->d_parent;
43919 + prefetch(parent);
43920 + namelen = dentry->d_name.len;
43921 + buflen -= namelen + 1;
43922 + if (buflen < 0)
43923 + goto Elong;
43924 + end -= namelen;
43925 + memcpy(end, dentry->d_name.name, namelen);
43926 + *--end = '/';
43927 + retval = end;
43928 + dentry = parent;
43929 + }
43930 +
43931 +out:
43932 + return retval;
43933 +
43934 +global_root:
43935 + namelen = dentry->d_name.len;
43936 + buflen -= namelen;
43937 + if (buflen < 0)
43938 + goto Elong;
43939 + retval -= namelen-1; /* hit the slash */
43940 + memcpy(retval, dentry->d_name.name, namelen);
43941 + goto out;
43942 +Elong:
43943 + retval = ERR_PTR(-ENAMETOOLONG);
43944 + goto out;
43945 +}
43946 +
43947 +static char *
43948 +gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
43949 + struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
43950 +{
43951 + char *retval;
43952 +
43953 + retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
43954 + if (unlikely(IS_ERR(retval)))
43955 + retval = strcpy(buf, "<path too long>");
43956 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
43957 + retval[1] = '\0';
43958 +
43959 + return retval;
43960 +}
43961 +
43962 +static char *
43963 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
43964 + char *buf, int buflen)
43965 +{
43966 + char *res;
43967 +
43968 + /* we can use real_root, real_root_mnt, because this is only called
43969 + by the RBAC system */
43970 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
43971 +
43972 + return res;
43973 +}
43974 +
43975 +static char *
43976 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
43977 + char *buf, int buflen)
43978 +{
43979 + char *res;
43980 + struct dentry *root;
43981 + struct vfsmount *rootmnt;
43982 + struct task_struct *reaper = &init_task;
43983 +
43984 + /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
43985 + read_lock(&reaper->fs->lock);
43986 + root = dget(reaper->fs->root.dentry);
43987 + rootmnt = mntget(reaper->fs->root.mnt);
43988 + read_unlock(&reaper->fs->lock);
43989 +
43990 + spin_lock(&dcache_lock);
43991 + spin_lock(&vfsmount_lock);
43992 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
43993 + spin_unlock(&vfsmount_lock);
43994 + spin_unlock(&dcache_lock);
43995 +
43996 + dput(root);
43997 + mntput(rootmnt);
43998 + return res;
43999 +}
44000 +
44001 +static char *
44002 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
44003 +{
44004 + char *ret;
44005 + spin_lock(&dcache_lock);
44006 + spin_lock(&vfsmount_lock);
44007 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
44008 + PAGE_SIZE);
44009 + spin_unlock(&vfsmount_lock);
44010 + spin_unlock(&dcache_lock);
44011 + return ret;
44012 +}
44013 +
44014 +char *
44015 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
44016 +{
44017 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
44018 + PAGE_SIZE);
44019 +}
44020 +
44021 +char *
44022 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
44023 +{
44024 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
44025 + PAGE_SIZE);
44026 +}
44027 +
44028 +char *
44029 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
44030 +{
44031 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
44032 + PAGE_SIZE);
44033 +}
44034 +
44035 +char *
44036 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
44037 +{
44038 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
44039 + PAGE_SIZE);
44040 +}
44041 +
44042 +char *
44043 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
44044 +{
44045 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
44046 + PAGE_SIZE);
44047 +}
44048 +
44049 +__inline__ __u32
44050 +to_gr_audit(const __u32 reqmode)
44051 +{
44052 + /* masks off auditable permission flags, then shifts them to create
44053 + auditing flags, and adds the special case of append auditing if
44054 + we're requesting write */
44055 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
44056 +}
44057 +
44058 +struct acl_subject_label *
44059 +lookup_subject_map(const struct acl_subject_label *userp)
44060 +{
44061 + unsigned int index = shash(userp, subj_map_set.s_size);
44062 + struct subject_map *match;
44063 +
44064 + match = subj_map_set.s_hash[index];
44065 +
44066 + while (match && match->user != userp)
44067 + match = match->next;
44068 +
44069 + if (match != NULL)
44070 + return match->kernel;
44071 + else
44072 + return NULL;
44073 +}
44074 +
44075 +static void
44076 +insert_subj_map_entry(struct subject_map *subjmap)
44077 +{
44078 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
44079 + struct subject_map **curr;
44080 +
44081 + subjmap->prev = NULL;
44082 +
44083 + curr = &subj_map_set.s_hash[index];
44084 + if (*curr != NULL)
44085 + (*curr)->prev = subjmap;
44086 +
44087 + subjmap->next = *curr;
44088 + *curr = subjmap;
44089 +
44090 + return;
44091 +}
44092 +
44093 +static struct acl_role_label *
44094 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
44095 + const gid_t gid)
44096 +{
44097 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
44098 + struct acl_role_label *match;
44099 + struct role_allowed_ip *ipp;
44100 + unsigned int x;
44101 + u32 curr_ip = task->signal->curr_ip;
44102 +
44103 + task->signal->saved_ip = curr_ip;
44104 +
44105 + match = acl_role_set.r_hash[index];
44106 +
44107 + while (match) {
44108 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
44109 + for (x = 0; x < match->domain_child_num; x++) {
44110 + if (match->domain_children[x] == uid)
44111 + goto found;
44112 + }
44113 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
44114 + break;
44115 + match = match->next;
44116 + }
44117 +found:
44118 + if (match == NULL) {
44119 + try_group:
44120 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
44121 + match = acl_role_set.r_hash[index];
44122 +
44123 + while (match) {
44124 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
44125 + for (x = 0; x < match->domain_child_num; x++) {
44126 + if (match->domain_children[x] == gid)
44127 + goto found2;
44128 + }
44129 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
44130 + break;
44131 + match = match->next;
44132 + }
44133 +found2:
44134 + if (match == NULL)
44135 + match = default_role;
44136 + if (match->allowed_ips == NULL)
44137 + return match;
44138 + else {
44139 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
44140 + if (likely
44141 + ((ntohl(curr_ip) & ipp->netmask) ==
44142 + (ntohl(ipp->addr) & ipp->netmask)))
44143 + return match;
44144 + }
44145 + match = default_role;
44146 + }
44147 + } else if (match->allowed_ips == NULL) {
44148 + return match;
44149 + } else {
44150 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
44151 + if (likely
44152 + ((ntohl(curr_ip) & ipp->netmask) ==
44153 + (ntohl(ipp->addr) & ipp->netmask)))
44154 + return match;
44155 + }
44156 + goto try_group;
44157 + }
44158 +
44159 + return match;
44160 +}
44161 +
44162 +struct acl_subject_label *
44163 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
44164 + const struct acl_role_label *role)
44165 +{
44166 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
44167 + struct acl_subject_label *match;
44168 +
44169 + match = role->subj_hash[index];
44170 +
44171 + while (match && (match->inode != ino || match->device != dev ||
44172 + (match->mode & GR_DELETED))) {
44173 + match = match->next;
44174 + }
44175 +
44176 + if (match && !(match->mode & GR_DELETED))
44177 + return match;
44178 + else
44179 + return NULL;
44180 +}
44181 +
44182 +struct acl_subject_label *
44183 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
44184 + const struct acl_role_label *role)
44185 +{
44186 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
44187 + struct acl_subject_label *match;
44188 +
44189 + match = role->subj_hash[index];
44190 +
44191 + while (match && (match->inode != ino || match->device != dev ||
44192 + !(match->mode & GR_DELETED))) {
44193 + match = match->next;
44194 + }
44195 +
44196 + if (match && (match->mode & GR_DELETED))
44197 + return match;
44198 + else
44199 + return NULL;
44200 +}
44201 +
44202 +static struct acl_object_label *
44203 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
44204 + const struct acl_subject_label *subj)
44205 +{
44206 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
44207 + struct acl_object_label *match;
44208 +
44209 + match = subj->obj_hash[index];
44210 +
44211 + while (match && (match->inode != ino || match->device != dev ||
44212 + (match->mode & GR_DELETED))) {
44213 + match = match->next;
44214 + }
44215 +
44216 + if (match && !(match->mode & GR_DELETED))
44217 + return match;
44218 + else
44219 + return NULL;
44220 +}
44221 +
44222 +static struct acl_object_label *
44223 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
44224 + const struct acl_subject_label *subj)
44225 +{
44226 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
44227 + struct acl_object_label *match;
44228 +
44229 + match = subj->obj_hash[index];
44230 +
44231 + while (match && (match->inode != ino || match->device != dev ||
44232 + !(match->mode & GR_DELETED))) {
44233 + match = match->next;
44234 + }
44235 +
44236 + if (match && (match->mode & GR_DELETED))
44237 + return match;
44238 +
44239 + match = subj->obj_hash[index];
44240 +
44241 + while (match && (match->inode != ino || match->device != dev ||
44242 + (match->mode & GR_DELETED))) {
44243 + match = match->next;
44244 + }
44245 +
44246 + if (match && !(match->mode & GR_DELETED))
44247 + return match;
44248 + else
44249 + return NULL;
44250 +}
44251 +
44252 +static struct name_entry *
44253 +lookup_name_entry(const char *name)
44254 +{
44255 + unsigned int len = strlen(name);
44256 + unsigned int key = full_name_hash(name, len);
44257 + unsigned int index = key % name_set.n_size;
44258 + struct name_entry *match;
44259 +
44260 + match = name_set.n_hash[index];
44261 +
44262 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
44263 + match = match->next;
44264 +
44265 + return match;
44266 +}
44267 +
44268 +static struct name_entry *
44269 +lookup_name_entry_create(const char *name)
44270 +{
44271 + unsigned int len = strlen(name);
44272 + unsigned int key = full_name_hash(name, len);
44273 + unsigned int index = key % name_set.n_size;
44274 + struct name_entry *match;
44275 +
44276 + match = name_set.n_hash[index];
44277 +
44278 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
44279 + !match->deleted))
44280 + match = match->next;
44281 +
44282 + if (match && match->deleted)
44283 + return match;
44284 +
44285 + match = name_set.n_hash[index];
44286 +
44287 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
44288 + match->deleted))
44289 + match = match->next;
44290 +
44291 + if (match && !match->deleted)
44292 + return match;
44293 + else
44294 + return NULL;
44295 +}
44296 +
44297 +static struct inodev_entry *
44298 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
44299 +{
44300 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
44301 + struct inodev_entry *match;
44302 +
44303 + match = inodev_set.i_hash[index];
44304 +
44305 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
44306 + match = match->next;
44307 +
44308 + return match;
44309 +}
44310 +
44311 +static void
44312 +insert_inodev_entry(struct inodev_entry *entry)
44313 +{
44314 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
44315 + inodev_set.i_size);
44316 + struct inodev_entry **curr;
44317 +
44318 + entry->prev = NULL;
44319 +
44320 + curr = &inodev_set.i_hash[index];
44321 + if (*curr != NULL)
44322 + (*curr)->prev = entry;
44323 +
44324 + entry->next = *curr;
44325 + *curr = entry;
44326 +
44327 + return;
44328 +}
44329 +
44330 +static void
44331 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
44332 +{
44333 + unsigned int index =
44334 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
44335 + struct acl_role_label **curr;
44336 + struct acl_role_label *tmp;
44337 +
44338 + curr = &acl_role_set.r_hash[index];
44339 +
44340 + /* if role was already inserted due to domains and already has
44341 + a role in the same bucket as it attached, then we need to
44342 + combine these two buckets
44343 + */
44344 + if (role->next) {
44345 + tmp = role->next;
44346 + while (tmp->next)
44347 + tmp = tmp->next;
44348 + tmp->next = *curr;
44349 + } else
44350 + role->next = *curr;
44351 + *curr = role;
44352 +
44353 + return;
44354 +}
44355 +
44356 +static void
44357 +insert_acl_role_label(struct acl_role_label *role)
44358 +{
44359 + int i;
44360 +
44361 + if (role_list == NULL) {
44362 + role_list = role;
44363 + role->prev = NULL;
44364 + } else {
44365 + role->prev = role_list;
44366 + role_list = role;
44367 + }
44368 +
44369 + /* used for hash chains */
44370 + role->next = NULL;
44371 +
44372 + if (role->roletype & GR_ROLE_DOMAIN) {
44373 + for (i = 0; i < role->domain_child_num; i++)
44374 + __insert_acl_role_label(role, role->domain_children[i]);
44375 + } else
44376 + __insert_acl_role_label(role, role->uidgid);
44377 +}
44378 +
44379 +static int
44380 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
44381 +{
44382 + struct name_entry **curr, *nentry;
44383 + struct inodev_entry *ientry;
44384 + unsigned int len = strlen(name);
44385 + unsigned int key = full_name_hash(name, len);
44386 + unsigned int index = key % name_set.n_size;
44387 +
44388 + curr = &name_set.n_hash[index];
44389 +
44390 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
44391 + curr = &((*curr)->next);
44392 +
44393 + if (*curr != NULL)
44394 + return 1;
44395 +
44396 + nentry = acl_alloc(sizeof (struct name_entry));
44397 + if (nentry == NULL)
44398 + return 0;
44399 + ientry = acl_alloc(sizeof (struct inodev_entry));
44400 + if (ientry == NULL)
44401 + return 0;
44402 + ientry->nentry = nentry;
44403 +
44404 + nentry->key = key;
44405 + nentry->name = name;
44406 + nentry->inode = inode;
44407 + nentry->device = device;
44408 + nentry->len = len;
44409 + nentry->deleted = deleted;
44410 +
44411 + nentry->prev = NULL;
44412 + curr = &name_set.n_hash[index];
44413 + if (*curr != NULL)
44414 + (*curr)->prev = nentry;
44415 + nentry->next = *curr;
44416 + *curr = nentry;
44417 +
44418 + /* insert us into the table searchable by inode/dev */
44419 + insert_inodev_entry(ientry);
44420 +
44421 + return 1;
44422 +}
44423 +
44424 +static void
44425 +insert_acl_obj_label(struct acl_object_label *obj,
44426 + struct acl_subject_label *subj)
44427 +{
44428 + unsigned int index =
44429 + fhash(obj->inode, obj->device, subj->obj_hash_size);
44430 + struct acl_object_label **curr;
44431 +
44432 +
44433 + obj->prev = NULL;
44434 +
44435 + curr = &subj->obj_hash[index];
44436 + if (*curr != NULL)
44437 + (*curr)->prev = obj;
44438 +
44439 + obj->next = *curr;
44440 + *curr = obj;
44441 +
44442 + return;
44443 +}
44444 +
44445 +static void
44446 +insert_acl_subj_label(struct acl_subject_label *obj,
44447 + struct acl_role_label *role)
44448 +{
44449 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
44450 + struct acl_subject_label **curr;
44451 +
44452 + obj->prev = NULL;
44453 +
44454 + curr = &role->subj_hash[index];
44455 + if (*curr != NULL)
44456 + (*curr)->prev = obj;
44457 +
44458 + obj->next = *curr;
44459 + *curr = obj;
44460 +
44461 + return;
44462 +}
44463 +
44464 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
44465 +
44466 +static void *
44467 +create_table(__u32 * len, int elementsize)
44468 +{
44469 + unsigned int table_sizes[] = {
44470 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
44471 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
44472 + 4194301, 8388593, 16777213, 33554393, 67108859
44473 + };
44474 + void *newtable = NULL;
44475 + unsigned int pwr = 0;
44476 +
44477 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
44478 + table_sizes[pwr] <= *len)
44479 + pwr++;
44480 +
44481 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
44482 + return newtable;
44483 +
44484 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
44485 + newtable =
44486 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
44487 + else
44488 + newtable = vmalloc(table_sizes[pwr] * elementsize);
44489 +
44490 + *len = table_sizes[pwr];
44491 +
44492 + return newtable;
44493 +}
44494 +
44495 +static int
44496 +init_variables(const struct gr_arg *arg)
44497 +{
44498 + struct task_struct *reaper = &init_task;
44499 + unsigned int stacksize;
44500 +
44501 + subj_map_set.s_size = arg->role_db.num_subjects;
44502 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
44503 + name_set.n_size = arg->role_db.num_objects;
44504 + inodev_set.i_size = arg->role_db.num_objects;
44505 +
44506 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
44507 + !name_set.n_size || !inodev_set.i_size)
44508 + return 1;
44509 +
44510 + if (!gr_init_uidset())
44511 + return 1;
44512 +
44513 + /* set up the stack that holds allocation info */
44514 +
44515 + stacksize = arg->role_db.num_pointers + 5;
44516 +
44517 + if (!acl_alloc_stack_init(stacksize))
44518 + return 1;
44519 +
44520 + /* grab reference for the real root dentry and vfsmount */
44521 + read_lock(&reaper->fs->lock);
44522 + real_root = dget(reaper->fs->root.dentry);
44523 + real_root_mnt = mntget(reaper->fs->root.mnt);
44524 + read_unlock(&reaper->fs->lock);
44525 +
44526 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44527 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
44528 +#endif
44529 +
44530 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
44531 + if (fakefs_obj_rw == NULL)
44532 + return 1;
44533 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
44534 +
44535 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
44536 + if (fakefs_obj_rwx == NULL)
44537 + return 1;
44538 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
44539 +
44540 + subj_map_set.s_hash =
44541 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
44542 + acl_role_set.r_hash =
44543 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
44544 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
44545 + inodev_set.i_hash =
44546 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
44547 +
44548 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
44549 + !name_set.n_hash || !inodev_set.i_hash)
44550 + return 1;
44551 +
44552 + memset(subj_map_set.s_hash, 0,
44553 + sizeof(struct subject_map *) * subj_map_set.s_size);
44554 + memset(acl_role_set.r_hash, 0,
44555 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
44556 + memset(name_set.n_hash, 0,
44557 + sizeof (struct name_entry *) * name_set.n_size);
44558 + memset(inodev_set.i_hash, 0,
44559 + sizeof (struct inodev_entry *) * inodev_set.i_size);
44560 +
44561 + return 0;
44562 +}
44563 +
44564 +/* free information not needed after startup
44565 + currently contains user->kernel pointer mappings for subjects
44566 +*/
44567 +
44568 +static void
44569 +free_init_variables(void)
44570 +{
44571 + __u32 i;
44572 +
44573 + if (subj_map_set.s_hash) {
44574 + for (i = 0; i < subj_map_set.s_size; i++) {
44575 + if (subj_map_set.s_hash[i]) {
44576 + kfree(subj_map_set.s_hash[i]);
44577 + subj_map_set.s_hash[i] = NULL;
44578 + }
44579 + }
44580 +
44581 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
44582 + PAGE_SIZE)
44583 + kfree(subj_map_set.s_hash);
44584 + else
44585 + vfree(subj_map_set.s_hash);
44586 + }
44587 +
44588 + return;
44589 +}
44590 +
44591 +static void
44592 +free_variables(void)
44593 +{
44594 + struct acl_subject_label *s;
44595 + struct acl_role_label *r;
44596 + struct task_struct *task, *task2;
44597 + unsigned int x;
44598 +
44599 + gr_clear_learn_entries();
44600 +
44601 + read_lock(&tasklist_lock);
44602 + do_each_thread(task2, task) {
44603 + task->acl_sp_role = 0;
44604 + task->acl_role_id = 0;
44605 + task->acl = NULL;
44606 + task->role = NULL;
44607 + } while_each_thread(task2, task);
44608 + read_unlock(&tasklist_lock);
44609 +
44610 + /* release the reference to the real root dentry and vfsmount */
44611 + if (real_root)
44612 + dput(real_root);
44613 + real_root = NULL;
44614 + if (real_root_mnt)
44615 + mntput(real_root_mnt);
44616 + real_root_mnt = NULL;
44617 +
44618 + /* free all object hash tables */
44619 +
44620 + FOR_EACH_ROLE_START(r)
44621 + if (r->subj_hash == NULL)
44622 + goto next_role;
44623 + FOR_EACH_SUBJECT_START(r, s, x)
44624 + if (s->obj_hash == NULL)
44625 + break;
44626 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
44627 + kfree(s->obj_hash);
44628 + else
44629 + vfree(s->obj_hash);
44630 + FOR_EACH_SUBJECT_END(s, x)
44631 + FOR_EACH_NESTED_SUBJECT_START(r, s)
44632 + if (s->obj_hash == NULL)
44633 + break;
44634 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
44635 + kfree(s->obj_hash);
44636 + else
44637 + vfree(s->obj_hash);
44638 + FOR_EACH_NESTED_SUBJECT_END(s)
44639 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
44640 + kfree(r->subj_hash);
44641 + else
44642 + vfree(r->subj_hash);
44643 + r->subj_hash = NULL;
44644 +next_role:
44645 + FOR_EACH_ROLE_END(r)
44646 +
44647 + acl_free_all();
44648 +
44649 + if (acl_role_set.r_hash) {
44650 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
44651 + PAGE_SIZE)
44652 + kfree(acl_role_set.r_hash);
44653 + else
44654 + vfree(acl_role_set.r_hash);
44655 + }
44656 + if (name_set.n_hash) {
44657 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
44658 + PAGE_SIZE)
44659 + kfree(name_set.n_hash);
44660 + else
44661 + vfree(name_set.n_hash);
44662 + }
44663 +
44664 + if (inodev_set.i_hash) {
44665 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
44666 + PAGE_SIZE)
44667 + kfree(inodev_set.i_hash);
44668 + else
44669 + vfree(inodev_set.i_hash);
44670 + }
44671 +
44672 + gr_free_uidset();
44673 +
44674 + memset(&name_set, 0, sizeof (struct name_db));
44675 + memset(&inodev_set, 0, sizeof (struct inodev_db));
44676 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
44677 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
44678 +
44679 + default_role = NULL;
44680 + role_list = NULL;
44681 +
44682 + return;
44683 +}
44684 +
44685 +static __u32
44686 +count_user_objs(struct acl_object_label *userp)
44687 +{
44688 + struct acl_object_label o_tmp;
44689 + __u32 num = 0;
44690 +
44691 + while (userp) {
44692 + if (copy_from_user(&o_tmp, userp,
44693 + sizeof (struct acl_object_label)))
44694 + break;
44695 +
44696 + userp = o_tmp.prev;
44697 + num++;
44698 + }
44699 +
44700 + return num;
44701 +}
44702 +
44703 +static struct acl_subject_label *
44704 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
44705 +
44706 +static int
44707 +copy_user_glob(struct acl_object_label *obj)
44708 +{
44709 + struct acl_object_label *g_tmp, **guser;
44710 + unsigned int len;
44711 + char *tmp;
44712 +
44713 + if (obj->globbed == NULL)
44714 + return 0;
44715 +
44716 + guser = &obj->globbed;
44717 + while (*guser) {
44718 + g_tmp = (struct acl_object_label *)
44719 + acl_alloc(sizeof (struct acl_object_label));
44720 + if (g_tmp == NULL)
44721 + return -ENOMEM;
44722 +
44723 + if (copy_from_user(g_tmp, *guser,
44724 + sizeof (struct acl_object_label)))
44725 + return -EFAULT;
44726 +
44727 + len = strnlen_user(g_tmp->filename, PATH_MAX);
44728 +
44729 + if (!len || len >= PATH_MAX)
44730 + return -EINVAL;
44731 +
44732 + if ((tmp = (char *) acl_alloc(len)) == NULL)
44733 + return -ENOMEM;
44734 +
44735 + if (copy_from_user(tmp, g_tmp->filename, len))
44736 + return -EFAULT;
44737 + tmp[len-1] = '\0';
44738 + g_tmp->filename = tmp;
44739 +
44740 + *guser = g_tmp;
44741 + guser = &(g_tmp->next);
44742 + }
44743 +
44744 + return 0;
44745 +}
44746 +
44747 +static int
44748 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
44749 + struct acl_role_label *role)
44750 +{
44751 + struct acl_object_label *o_tmp;
44752 + unsigned int len;
44753 + int ret;
44754 + char *tmp;
44755 +
44756 + while (userp) {
44757 + if ((o_tmp = (struct acl_object_label *)
44758 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
44759 + return -ENOMEM;
44760 +
44761 + if (copy_from_user(o_tmp, userp,
44762 + sizeof (struct acl_object_label)))
44763 + return -EFAULT;
44764 +
44765 + userp = o_tmp->prev;
44766 +
44767 + len = strnlen_user(o_tmp->filename, PATH_MAX);
44768 +
44769 + if (!len || len >= PATH_MAX)
44770 + return -EINVAL;
44771 +
44772 + if ((tmp = (char *) acl_alloc(len)) == NULL)
44773 + return -ENOMEM;
44774 +
44775 + if (copy_from_user(tmp, o_tmp->filename, len))
44776 + return -EFAULT;
44777 + tmp[len-1] = '\0';
44778 + o_tmp->filename = tmp;
44779 +
44780 + insert_acl_obj_label(o_tmp, subj);
44781 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
44782 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
44783 + return -ENOMEM;
44784 +
44785 + ret = copy_user_glob(o_tmp);
44786 + if (ret)
44787 + return ret;
44788 +
44789 + if (o_tmp->nested) {
44790 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
44791 + if (IS_ERR(o_tmp->nested))
44792 + return PTR_ERR(o_tmp->nested);
44793 +
44794 + /* insert into nested subject list */
44795 + o_tmp->nested->next = role->hash->first;
44796 + role->hash->first = o_tmp->nested;
44797 + }
44798 + }
44799 +
44800 + return 0;
44801 +}
44802 +
44803 +static __u32
44804 +count_user_subjs(struct acl_subject_label *userp)
44805 +{
44806 + struct acl_subject_label s_tmp;
44807 + __u32 num = 0;
44808 +
44809 + while (userp) {
44810 + if (copy_from_user(&s_tmp, userp,
44811 + sizeof (struct acl_subject_label)))
44812 + break;
44813 +
44814 + userp = s_tmp.prev;
44815 + /* do not count nested subjects against this count, since
44816 + they are not included in the hash table, but are
44817 + attached to objects. We have already counted
44818 + the subjects in userspace for the allocation
44819 + stack
44820 + */
44821 + if (!(s_tmp.mode & GR_NESTED))
44822 + num++;
44823 + }
44824 +
44825 + return num;
44826 +}
44827 +
44828 +static int
44829 +copy_user_allowedips(struct acl_role_label *rolep)
44830 +{
44831 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
44832 +
44833 + ruserip = rolep->allowed_ips;
44834 +
44835 + while (ruserip) {
44836 + rlast = rtmp;
44837 +
44838 + if ((rtmp = (struct role_allowed_ip *)
44839 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
44840 + return -ENOMEM;
44841 +
44842 + if (copy_from_user(rtmp, ruserip,
44843 + sizeof (struct role_allowed_ip)))
44844 + return -EFAULT;
44845 +
44846 + ruserip = rtmp->prev;
44847 +
44848 + if (!rlast) {
44849 + rtmp->prev = NULL;
44850 + rolep->allowed_ips = rtmp;
44851 + } else {
44852 + rlast->next = rtmp;
44853 + rtmp->prev = rlast;
44854 + }
44855 +
44856 + if (!ruserip)
44857 + rtmp->next = NULL;
44858 + }
44859 +
44860 + return 0;
44861 +}
44862 +
44863 +static int
44864 +copy_user_transitions(struct acl_role_label *rolep)
44865 +{
44866 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
44867 +
44868 + unsigned int len;
44869 + char *tmp;
44870 +
44871 + rusertp = rolep->transitions;
44872 +
44873 + while (rusertp) {
44874 + rlast = rtmp;
44875 +
44876 + if ((rtmp = (struct role_transition *)
44877 + acl_alloc(sizeof (struct role_transition))) == NULL)
44878 + return -ENOMEM;
44879 +
44880 + if (copy_from_user(rtmp, rusertp,
44881 + sizeof (struct role_transition)))
44882 + return -EFAULT;
44883 +
44884 + rusertp = rtmp->prev;
44885 +
44886 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
44887 +
44888 + if (!len || len >= GR_SPROLE_LEN)
44889 + return -EINVAL;
44890 +
44891 + if ((tmp = (char *) acl_alloc(len)) == NULL)
44892 + return -ENOMEM;
44893 +
44894 + if (copy_from_user(tmp, rtmp->rolename, len))
44895 + return -EFAULT;
44896 + tmp[len-1] = '\0';
44897 + rtmp->rolename = tmp;
44898 +
44899 + if (!rlast) {
44900 + rtmp->prev = NULL;
44901 + rolep->transitions = rtmp;
44902 + } else {
44903 + rlast->next = rtmp;
44904 + rtmp->prev = rlast;
44905 + }
44906 +
44907 + if (!rusertp)
44908 + rtmp->next = NULL;
44909 + }
44910 +
44911 + return 0;
44912 +}
44913 +
44914 +static struct acl_subject_label *
44915 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
44916 +{
44917 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
44918 + unsigned int len;
44919 + char *tmp;
44920 + __u32 num_objs;
44921 + struct acl_ip_label **i_tmp, *i_utmp2;
44922 + struct gr_hash_struct ghash;
44923 + struct subject_map *subjmap;
44924 + unsigned int i_num;
44925 + int err;
44926 +
44927 + s_tmp = lookup_subject_map(userp);
44928 +
44929 + /* we've already copied this subject into the kernel, just return
44930 + the reference to it, and don't copy it over again
44931 + */
44932 + if (s_tmp)
44933 + return(s_tmp);
44934 +
44935 + if ((s_tmp = (struct acl_subject_label *)
44936 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
44937 + return ERR_PTR(-ENOMEM);
44938 +
44939 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
44940 + if (subjmap == NULL)
44941 + return ERR_PTR(-ENOMEM);
44942 +
44943 + subjmap->user = userp;
44944 + subjmap->kernel = s_tmp;
44945 + insert_subj_map_entry(subjmap);
44946 +
44947 + if (copy_from_user(s_tmp, userp,
44948 + sizeof (struct acl_subject_label)))
44949 + return ERR_PTR(-EFAULT);
44950 +
44951 + len = strnlen_user(s_tmp->filename, PATH_MAX);
44952 +
44953 + if (!len || len >= PATH_MAX)
44954 + return ERR_PTR(-EINVAL);
44955 +
44956 + if ((tmp = (char *) acl_alloc(len)) == NULL)
44957 + return ERR_PTR(-ENOMEM);
44958 +
44959 + if (copy_from_user(tmp, s_tmp->filename, len))
44960 + return ERR_PTR(-EFAULT);
44961 + tmp[len-1] = '\0';
44962 + s_tmp->filename = tmp;
44963 +
44964 + if (!strcmp(s_tmp->filename, "/"))
44965 + role->root_label = s_tmp;
44966 +
44967 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
44968 + return ERR_PTR(-EFAULT);
44969 +
44970 + /* copy user and group transition tables */
44971 +
44972 + if (s_tmp->user_trans_num) {
44973 + uid_t *uidlist;
44974 +
44975 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
44976 + if (uidlist == NULL)
44977 + return ERR_PTR(-ENOMEM);
44978 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
44979 + return ERR_PTR(-EFAULT);
44980 +
44981 + s_tmp->user_transitions = uidlist;
44982 + }
44983 +
44984 + if (s_tmp->group_trans_num) {
44985 + gid_t *gidlist;
44986 +
44987 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
44988 + if (gidlist == NULL)
44989 + return ERR_PTR(-ENOMEM);
44990 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
44991 + return ERR_PTR(-EFAULT);
44992 +
44993 + s_tmp->group_transitions = gidlist;
44994 + }
44995 +
44996 + /* set up object hash table */
44997 + num_objs = count_user_objs(ghash.first);
44998 +
44999 + s_tmp->obj_hash_size = num_objs;
45000 + s_tmp->obj_hash =
45001 + (struct acl_object_label **)
45002 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
45003 +
45004 + if (!s_tmp->obj_hash)
45005 + return ERR_PTR(-ENOMEM);
45006 +
45007 + memset(s_tmp->obj_hash, 0,
45008 + s_tmp->obj_hash_size *
45009 + sizeof (struct acl_object_label *));
45010 +
45011 + /* add in objects */
45012 + err = copy_user_objs(ghash.first, s_tmp, role);
45013 +
45014 + if (err)
45015 + return ERR_PTR(err);
45016 +
45017 + /* set pointer for parent subject */
45018 + if (s_tmp->parent_subject) {
45019 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
45020 +
45021 + if (IS_ERR(s_tmp2))
45022 + return s_tmp2;
45023 +
45024 + s_tmp->parent_subject = s_tmp2;
45025 + }
45026 +
45027 + /* add in ip acls */
45028 +
45029 + if (!s_tmp->ip_num) {
45030 + s_tmp->ips = NULL;
45031 + goto insert;
45032 + }
45033 +
45034 + i_tmp =
45035 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
45036 + sizeof (struct acl_ip_label *));
45037 +
45038 + if (!i_tmp)
45039 + return ERR_PTR(-ENOMEM);
45040 +
45041 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
45042 + *(i_tmp + i_num) =
45043 + (struct acl_ip_label *)
45044 + acl_alloc(sizeof (struct acl_ip_label));
45045 + if (!*(i_tmp + i_num))
45046 + return ERR_PTR(-ENOMEM);
45047 +
45048 + if (copy_from_user
45049 + (&i_utmp2, s_tmp->ips + i_num,
45050 + sizeof (struct acl_ip_label *)))
45051 + return ERR_PTR(-EFAULT);
45052 +
45053 + if (copy_from_user
45054 + (*(i_tmp + i_num), i_utmp2,
45055 + sizeof (struct acl_ip_label)))
45056 + return ERR_PTR(-EFAULT);
45057 +
45058 + if ((*(i_tmp + i_num))->iface == NULL)
45059 + continue;
45060 +
45061 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
45062 + if (!len || len >= IFNAMSIZ)
45063 + return ERR_PTR(-EINVAL);
45064 + tmp = acl_alloc(len);
45065 + if (tmp == NULL)
45066 + return ERR_PTR(-ENOMEM);
45067 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
45068 + return ERR_PTR(-EFAULT);
45069 + (*(i_tmp + i_num))->iface = tmp;
45070 + }
45071 +
45072 + s_tmp->ips = i_tmp;
45073 +
45074 +insert:
45075 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
45076 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
45077 + return ERR_PTR(-ENOMEM);
45078 +
45079 + return s_tmp;
45080 +}
45081 +
45082 +static int
45083 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
45084 +{
45085 + struct acl_subject_label s_pre;
45086 + struct acl_subject_label * ret;
45087 + int err;
45088 +
45089 + while (userp) {
45090 + if (copy_from_user(&s_pre, userp,
45091 + sizeof (struct acl_subject_label)))
45092 + return -EFAULT;
45093 +
45094 + /* do not add nested subjects here, add
45095 + while parsing objects
45096 + */
45097 +
45098 + if (s_pre.mode & GR_NESTED) {
45099 + userp = s_pre.prev;
45100 + continue;
45101 + }
45102 +
45103 + ret = do_copy_user_subj(userp, role);
45104 +
45105 + err = PTR_ERR(ret);
45106 + if (IS_ERR(ret))
45107 + return err;
45108 +
45109 + insert_acl_subj_label(ret, role);
45110 +
45111 + userp = s_pre.prev;
45112 + }
45113 +
45114 + return 0;
45115 +}
45116 +
45117 +static int
45118 +copy_user_acl(struct gr_arg *arg)
45119 +{
45120 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
45121 + struct sprole_pw *sptmp;
45122 + struct gr_hash_struct *ghash;
45123 + uid_t *domainlist;
45124 + unsigned int r_num;
45125 + unsigned int len;
45126 + char *tmp;
45127 + int err = 0;
45128 + __u16 i;
45129 + __u32 num_subjs;
45130 +
45131 + /* we need a default and kernel role */
45132 + if (arg->role_db.num_roles < 2)
45133 + return -EINVAL;
45134 +
45135 + /* copy special role authentication info from userspace */
45136 +
45137 + num_sprole_pws = arg->num_sprole_pws;
45138 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
45139 +
45140 + if (!acl_special_roles) {
45141 + err = -ENOMEM;
45142 + goto cleanup;
45143 + }
45144 +
45145 + for (i = 0; i < num_sprole_pws; i++) {
45146 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
45147 + if (!sptmp) {
45148 + err = -ENOMEM;
45149 + goto cleanup;
45150 + }
45151 + if (copy_from_user(sptmp, arg->sprole_pws + i,
45152 + sizeof (struct sprole_pw))) {
45153 + err = -EFAULT;
45154 + goto cleanup;
45155 + }
45156 +
45157 + len =
45158 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
45159 +
45160 + if (!len || len >= GR_SPROLE_LEN) {
45161 + err = -EINVAL;
45162 + goto cleanup;
45163 + }
45164 +
45165 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
45166 + err = -ENOMEM;
45167 + goto cleanup;
45168 + }
45169 +
45170 + if (copy_from_user(tmp, sptmp->rolename, len)) {
45171 + err = -EFAULT;
45172 + goto cleanup;
45173 + }
45174 + tmp[len-1] = '\0';
45175 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
45176 + printk(KERN_ALERT "Copying special role %s\n", tmp);
45177 +#endif
45178 + sptmp->rolename = tmp;
45179 + acl_special_roles[i] = sptmp;
45180 + }
45181 +
45182 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
45183 +
45184 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
45185 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
45186 +
45187 + if (!r_tmp) {
45188 + err = -ENOMEM;
45189 + goto cleanup;
45190 + }
45191 +
45192 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
45193 + sizeof (struct acl_role_label *))) {
45194 + err = -EFAULT;
45195 + goto cleanup;
45196 + }
45197 +
45198 + if (copy_from_user(r_tmp, r_utmp2,
45199 + sizeof (struct acl_role_label))) {
45200 + err = -EFAULT;
45201 + goto cleanup;
45202 + }
45203 +
45204 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
45205 +
45206 + if (!len || len >= PATH_MAX) {
45207 + err = -EINVAL;
45208 + goto cleanup;
45209 + }
45210 +
45211 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
45212 + err = -ENOMEM;
45213 + goto cleanup;
45214 + }
45215 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
45216 + err = -EFAULT;
45217 + goto cleanup;
45218 + }
45219 + tmp[len-1] = '\0';
45220 + r_tmp->rolename = tmp;
45221 +
45222 + if (!strcmp(r_tmp->rolename, "default")
45223 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
45224 + default_role = r_tmp;
45225 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
45226 + kernel_role = r_tmp;
45227 + }
45228 +
45229 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
45230 + err = -ENOMEM;
45231 + goto cleanup;
45232 + }
45233 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
45234 + err = -EFAULT;
45235 + goto cleanup;
45236 + }
45237 +
45238 + r_tmp->hash = ghash;
45239 +
45240 + num_subjs = count_user_subjs(r_tmp->hash->first);
45241 +
45242 + r_tmp->subj_hash_size = num_subjs;
45243 + r_tmp->subj_hash =
45244 + (struct acl_subject_label **)
45245 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
45246 +
45247 + if (!r_tmp->subj_hash) {
45248 + err = -ENOMEM;
45249 + goto cleanup;
45250 + }
45251 +
45252 + err = copy_user_allowedips(r_tmp);
45253 + if (err)
45254 + goto cleanup;
45255 +
45256 + /* copy domain info */
45257 + if (r_tmp->domain_children != NULL) {
45258 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
45259 + if (domainlist == NULL) {
45260 + err = -ENOMEM;
45261 + goto cleanup;
45262 + }
45263 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
45264 + err = -EFAULT;
45265 + goto cleanup;
45266 + }
45267 + r_tmp->domain_children = domainlist;
45268 + }
45269 +
45270 + err = copy_user_transitions(r_tmp);
45271 + if (err)
45272 + goto cleanup;
45273 +
45274 + memset(r_tmp->subj_hash, 0,
45275 + r_tmp->subj_hash_size *
45276 + sizeof (struct acl_subject_label *));
45277 +
45278 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
45279 +
45280 + if (err)
45281 + goto cleanup;
45282 +
45283 + /* set nested subject list to null */
45284 + r_tmp->hash->first = NULL;
45285 +
45286 + insert_acl_role_label(r_tmp);
45287 + }
45288 +
45289 + goto return_err;
45290 + cleanup:
45291 + free_variables();
45292 + return_err:
45293 + return err;
45294 +
45295 +}
45296 +
45297 +static int
45298 +gracl_init(struct gr_arg *args)
45299 +{
45300 + int error = 0;
45301 +
45302 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
45303 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
45304 +
45305 + if (init_variables(args)) {
45306 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
45307 + error = -ENOMEM;
45308 + free_variables();
45309 + goto out;
45310 + }
45311 +
45312 + error = copy_user_acl(args);
45313 + free_init_variables();
45314 + if (error) {
45315 + free_variables();
45316 + goto out;
45317 + }
45318 +
45319 + if ((error = gr_set_acls(0))) {
45320 + free_variables();
45321 + goto out;
45322 + }
45323 +
45324 + pax_open_kernel();
45325 + gr_status |= GR_READY;
45326 + pax_close_kernel();
45327 +
45328 + out:
45329 + return error;
45330 +}
45331 +
45332 +/* derived from glibc fnmatch() 0: match, 1: no match*/
45333 +
45334 +static int
45335 +glob_match(const char *p, const char *n)
45336 +{
45337 + char c;
45338 +
45339 + while ((c = *p++) != '\0') {
45340 + switch (c) {
45341 + case '?':
45342 + if (*n == '\0')
45343 + return 1;
45344 + else if (*n == '/')
45345 + return 1;
45346 + break;
45347 + case '\\':
45348 + if (*n != c)
45349 + return 1;
45350 + break;
45351 + case '*':
45352 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
45353 + if (*n == '/')
45354 + return 1;
45355 + else if (c == '?') {
45356 + if (*n == '\0')
45357 + return 1;
45358 + else
45359 + ++n;
45360 + }
45361 + }
45362 + if (c == '\0') {
45363 + return 0;
45364 + } else {
45365 + const char *endp;
45366 +
45367 + if ((endp = strchr(n, '/')) == NULL)
45368 + endp = n + strlen(n);
45369 +
45370 + if (c == '[') {
45371 + for (--p; n < endp; ++n)
45372 + if (!glob_match(p, n))
45373 + return 0;
45374 + } else if (c == '/') {
45375 + while (*n != '\0' && *n != '/')
45376 + ++n;
45377 + if (*n == '/' && !glob_match(p, n + 1))
45378 + return 0;
45379 + } else {
45380 + for (--p; n < endp; ++n)
45381 + if (*n == c && !glob_match(p, n))
45382 + return 0;
45383 + }
45384 +
45385 + return 1;
45386 + }
45387 + case '[':
45388 + {
45389 + int not;
45390 + char cold;
45391 +
45392 + if (*n == '\0' || *n == '/')
45393 + return 1;
45394 +
45395 + not = (*p == '!' || *p == '^');
45396 + if (not)
45397 + ++p;
45398 +
45399 + c = *p++;
45400 + for (;;) {
45401 + unsigned char fn = (unsigned char)*n;
45402 +
45403 + if (c == '\0')
45404 + return 1;
45405 + else {
45406 + if (c == fn)
45407 + goto matched;
45408 + cold = c;
45409 + c = *p++;
45410 +
45411 + if (c == '-' && *p != ']') {
45412 + unsigned char cend = *p++;
45413 +
45414 + if (cend == '\0')
45415 + return 1;
45416 +
45417 + if (cold <= fn && fn <= cend)
45418 + goto matched;
45419 +
45420 + c = *p++;
45421 + }
45422 + }
45423 +
45424 + if (c == ']')
45425 + break;
45426 + }
45427 + if (!not)
45428 + return 1;
45429 + break;
45430 + matched:
45431 + while (c != ']') {
45432 + if (c == '\0')
45433 + return 1;
45434 +
45435 + c = *p++;
45436 + }
45437 + if (not)
45438 + return 1;
45439 + }
45440 + break;
45441 + default:
45442 + if (c != *n)
45443 + return 1;
45444 + }
45445 +
45446 + ++n;
45447 + }
45448 +
45449 + if (*n == '\0')
45450 + return 0;
45451 +
45452 + if (*n == '/')
45453 + return 0;
45454 +
45455 + return 1;
45456 +}
45457 +
45458 +static struct acl_object_label *
45459 +chk_glob_label(struct acl_object_label *globbed,
45460 + struct dentry *dentry, struct vfsmount *mnt, char **path)
45461 +{
45462 + struct acl_object_label *tmp;
45463 +
45464 + if (*path == NULL)
45465 + *path = gr_to_filename_nolock(dentry, mnt);
45466 +
45467 + tmp = globbed;
45468 +
45469 + while (tmp) {
45470 + if (!glob_match(tmp->filename, *path))
45471 + return tmp;
45472 + tmp = tmp->next;
45473 + }
45474 +
45475 + return NULL;
45476 +}
45477 +
45478 +static struct acl_object_label *
45479 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
45480 + const ino_t curr_ino, const dev_t curr_dev,
45481 + const struct acl_subject_label *subj, char **path, const int checkglob)
45482 +{
45483 + struct acl_subject_label *tmpsubj;
45484 + struct acl_object_label *retval;
45485 + struct acl_object_label *retval2;
45486 +
45487 + tmpsubj = (struct acl_subject_label *) subj;
45488 + read_lock(&gr_inode_lock);
45489 + do {
45490 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
45491 + if (retval) {
45492 + if (checkglob && retval->globbed) {
45493 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
45494 + (struct vfsmount *)orig_mnt, path);
45495 + if (retval2)
45496 + retval = retval2;
45497 + }
45498 + break;
45499 + }
45500 + } while ((tmpsubj = tmpsubj->parent_subject));
45501 + read_unlock(&gr_inode_lock);
45502 +
45503 + return retval;
45504 +}
45505 +
45506 +static __inline__ struct acl_object_label *
45507 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
45508 + const struct dentry *curr_dentry,
45509 + const struct acl_subject_label *subj, char **path, const int checkglob)
45510 +{
45511 + int newglob = checkglob;
45512 +
45513 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
45514 + as we don't want a / * rule to match instead of the / object
45515 + don't do this for create lookups that call this function though, since they're looking up
45516 + on the parent and thus need globbing checks on all paths
45517 + */
45518 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
45519 + newglob = GR_NO_GLOB;
45520 +
45521 + return __full_lookup(orig_dentry, orig_mnt,
45522 + curr_dentry->d_inode->i_ino,
45523 + __get_dev(curr_dentry), subj, path, newglob);
45524 +}
45525 +
45526 +static struct acl_object_label *
45527 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45528 + const struct acl_subject_label *subj, char *path, const int checkglob)
45529 +{
45530 + struct dentry *dentry = (struct dentry *) l_dentry;
45531 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
45532 + struct acl_object_label *retval;
45533 +
45534 + spin_lock(&dcache_lock);
45535 + spin_lock(&vfsmount_lock);
45536 +
45537 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
45538 +#ifdef CONFIG_NET
45539 + mnt == sock_mnt ||
45540 +#endif
45541 +#ifdef CONFIG_HUGETLBFS
45542 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
45543 +#endif
45544 + /* ignore Eric Biederman */
45545 + IS_PRIVATE(l_dentry->d_inode))) {
45546 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
45547 + goto out;
45548 + }
45549 +
45550 + for (;;) {
45551 + if (dentry == real_root && mnt == real_root_mnt)
45552 + break;
45553 +
45554 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
45555 + if (mnt->mnt_parent == mnt)
45556 + break;
45557 +
45558 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
45559 + if (retval != NULL)
45560 + goto out;
45561 +
45562 + dentry = mnt->mnt_mountpoint;
45563 + mnt = mnt->mnt_parent;
45564 + continue;
45565 + }
45566 +
45567 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
45568 + if (retval != NULL)
45569 + goto out;
45570 +
45571 + dentry = dentry->d_parent;
45572 + }
45573 +
45574 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
45575 +
45576 + if (retval == NULL)
45577 + retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
45578 +out:
45579 + spin_unlock(&vfsmount_lock);
45580 + spin_unlock(&dcache_lock);
45581 +
45582 + BUG_ON(retval == NULL);
45583 +
45584 + return retval;
45585 +}
45586 +
45587 +static __inline__ struct acl_object_label *
45588 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45589 + const struct acl_subject_label *subj)
45590 +{
45591 + char *path = NULL;
45592 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
45593 +}
45594 +
45595 +static __inline__ struct acl_object_label *
45596 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45597 + const struct acl_subject_label *subj)
45598 +{
45599 + char *path = NULL;
45600 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
45601 +}
45602 +
45603 +static __inline__ struct acl_object_label *
45604 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45605 + const struct acl_subject_label *subj, char *path)
45606 +{
45607 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
45608 +}
45609 +
45610 +static struct acl_subject_label *
45611 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45612 + const struct acl_role_label *role)
45613 +{
45614 + struct dentry *dentry = (struct dentry *) l_dentry;
45615 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
45616 + struct acl_subject_label *retval;
45617 +
45618 + spin_lock(&dcache_lock);
45619 + spin_lock(&vfsmount_lock);
45620 +
45621 + for (;;) {
45622 + if (dentry == real_root && mnt == real_root_mnt)
45623 + break;
45624 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
45625 + if (mnt->mnt_parent == mnt)
45626 + break;
45627 +
45628 + read_lock(&gr_inode_lock);
45629 + retval =
45630 + lookup_acl_subj_label(dentry->d_inode->i_ino,
45631 + __get_dev(dentry), role);
45632 + read_unlock(&gr_inode_lock);
45633 + if (retval != NULL)
45634 + goto out;
45635 +
45636 + dentry = mnt->mnt_mountpoint;
45637 + mnt = mnt->mnt_parent;
45638 + continue;
45639 + }
45640 +
45641 + read_lock(&gr_inode_lock);
45642 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
45643 + __get_dev(dentry), role);
45644 + read_unlock(&gr_inode_lock);
45645 + if (retval != NULL)
45646 + goto out;
45647 +
45648 + dentry = dentry->d_parent;
45649 + }
45650 +
45651 + read_lock(&gr_inode_lock);
45652 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
45653 + __get_dev(dentry), role);
45654 + read_unlock(&gr_inode_lock);
45655 +
45656 + if (unlikely(retval == NULL)) {
45657 + read_lock(&gr_inode_lock);
45658 + retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
45659 + __get_dev(real_root), role);
45660 + read_unlock(&gr_inode_lock);
45661 + }
45662 +out:
45663 + spin_unlock(&vfsmount_lock);
45664 + spin_unlock(&dcache_lock);
45665 +
45666 + BUG_ON(retval == NULL);
45667 +
45668 + return retval;
45669 +}
45670 +
45671 +static void
45672 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
45673 +{
45674 + struct task_struct *task = current;
45675 + const struct cred *cred = current_cred();
45676 +
45677 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
45678 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
45679 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
45680 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
45681 +
45682 + return;
45683 +}
45684 +
45685 +static void
45686 +gr_log_learn_sysctl(const char *path, const __u32 mode)
45687 +{
45688 + struct task_struct *task = current;
45689 + const struct cred *cred = current_cred();
45690 +
45691 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
45692 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
45693 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
45694 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
45695 +
45696 + return;
45697 +}
45698 +
45699 +static void
45700 +gr_log_learn_id_change(const char type, const unsigned int real,
45701 + const unsigned int effective, const unsigned int fs)
45702 +{
45703 + struct task_struct *task = current;
45704 + const struct cred *cred = current_cred();
45705 +
45706 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
45707 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
45708 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
45709 + type, real, effective, fs, &task->signal->saved_ip);
45710 +
45711 + return;
45712 +}
45713 +
45714 +__u32
45715 +gr_check_link(const struct dentry * new_dentry,
45716 + const struct dentry * parent_dentry,
45717 + const struct vfsmount * parent_mnt,
45718 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
45719 +{
45720 + struct acl_object_label *obj;
45721 + __u32 oldmode, newmode;
45722 + __u32 needmode;
45723 +
45724 + if (unlikely(!(gr_status & GR_READY)))
45725 + return (GR_CREATE | GR_LINK);
45726 +
45727 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
45728 + oldmode = obj->mode;
45729 +
45730 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
45731 + oldmode |= (GR_CREATE | GR_LINK);
45732 +
45733 + needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
45734 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
45735 + needmode |= GR_SETID | GR_AUDIT_SETID;
45736 +
45737 + newmode =
45738 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
45739 + oldmode | needmode);
45740 +
45741 + needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
45742 + GR_SETID | GR_READ | GR_FIND | GR_DELETE |
45743 + GR_INHERIT | GR_AUDIT_INHERIT);
45744 +
45745 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
45746 + goto bad;
45747 +
45748 + if ((oldmode & needmode) != needmode)
45749 + goto bad;
45750 +
45751 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
45752 + if ((newmode & needmode) != needmode)
45753 + goto bad;
45754 +
45755 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
45756 + return newmode;
45757 +bad:
45758 + needmode = oldmode;
45759 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
45760 + needmode |= GR_SETID;
45761 +
45762 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
45763 + gr_log_learn(old_dentry, old_mnt, needmode);
45764 + return (GR_CREATE | GR_LINK);
45765 + } else if (newmode & GR_SUPPRESS)
45766 + return GR_SUPPRESS;
45767 + else
45768 + return 0;
45769 +}
45770 +
45771 +__u32
45772 +gr_search_file(const struct dentry * dentry, const __u32 mode,
45773 + const struct vfsmount * mnt)
45774 +{
45775 + __u32 retval = mode;
45776 + struct acl_subject_label *curracl;
45777 + struct acl_object_label *currobj;
45778 +
45779 + if (unlikely(!(gr_status & GR_READY)))
45780 + return (mode & ~GR_AUDITS);
45781 +
45782 + curracl = current->acl;
45783 +
45784 + currobj = chk_obj_label(dentry, mnt, curracl);
45785 + retval = currobj->mode & mode;
45786 +
45787 + /* if we're opening a specified transfer file for writing
45788 + (e.g. /dev/initctl), then transfer our role to init
45789 + */
45790 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
45791 + current->role->roletype & GR_ROLE_PERSIST)) {
45792 + struct task_struct *task = init_pid_ns.child_reaper;
45793 +
45794 + if (task->role != current->role) {
45795 + task->acl_sp_role = 0;
45796 + task->acl_role_id = current->acl_role_id;
45797 + task->role = current->role;
45798 + rcu_read_lock();
45799 + read_lock(&grsec_exec_file_lock);
45800 + gr_apply_subject_to_task(task);
45801 + read_unlock(&grsec_exec_file_lock);
45802 + rcu_read_unlock();
45803 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
45804 + }
45805 + }
45806 +
45807 + if (unlikely
45808 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
45809 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
45810 + __u32 new_mode = mode;
45811 +
45812 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
45813 +
45814 + retval = new_mode;
45815 +
45816 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
45817 + new_mode |= GR_INHERIT;
45818 +
45819 + if (!(mode & GR_NOLEARN))
45820 + gr_log_learn(dentry, mnt, new_mode);
45821 + }
45822 +
45823 + return retval;
45824 +}
45825 +
45826 +__u32
45827 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
45828 + const struct vfsmount * mnt, const __u32 mode)
45829 +{
45830 + struct name_entry *match;
45831 + struct acl_object_label *matchpo;
45832 + struct acl_subject_label *curracl;
45833 + char *path;
45834 + __u32 retval;
45835 +
45836 + if (unlikely(!(gr_status & GR_READY)))
45837 + return (mode & ~GR_AUDITS);
45838 +
45839 + preempt_disable();
45840 + path = gr_to_filename_rbac(new_dentry, mnt);
45841 + match = lookup_name_entry_create(path);
45842 +
45843 + if (!match)
45844 + goto check_parent;
45845 +
45846 + curracl = current->acl;
45847 +
45848 + read_lock(&gr_inode_lock);
45849 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
45850 + read_unlock(&gr_inode_lock);
45851 +
45852 + if (matchpo) {
45853 + if ((matchpo->mode & mode) !=
45854 + (mode & ~(GR_AUDITS | GR_SUPPRESS))
45855 + && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
45856 + __u32 new_mode = mode;
45857 +
45858 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
45859 +
45860 + gr_log_learn(new_dentry, mnt, new_mode);
45861 +
45862 + preempt_enable();
45863 + return new_mode;
45864 + }
45865 + preempt_enable();
45866 + return (matchpo->mode & mode);
45867 + }
45868 +
45869 + check_parent:
45870 + curracl = current->acl;
45871 +
45872 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
45873 + retval = matchpo->mode & mode;
45874 +
45875 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
45876 + && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
45877 + __u32 new_mode = mode;
45878 +
45879 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
45880 +
45881 + gr_log_learn(new_dentry, mnt, new_mode);
45882 + preempt_enable();
45883 + return new_mode;
45884 + }
45885 +
45886 + preempt_enable();
45887 + return retval;
45888 +}
45889 +
45890 +int
45891 +gr_check_hidden_task(const struct task_struct *task)
45892 +{
45893 + if (unlikely(!(gr_status & GR_READY)))
45894 + return 0;
45895 +
45896 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
45897 + return 1;
45898 +
45899 + return 0;
45900 +}
45901 +
45902 +int
45903 +gr_check_protected_task(const struct task_struct *task)
45904 +{
45905 + if (unlikely(!(gr_status & GR_READY) || !task))
45906 + return 0;
45907 +
45908 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
45909 + task->acl != current->acl)
45910 + return 1;
45911 +
45912 + return 0;
45913 +}
45914 +
45915 +int
45916 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
45917 +{
45918 + struct task_struct *p;
45919 + int ret = 0;
45920 +
45921 + if (unlikely(!(gr_status & GR_READY) || !pid))
45922 + return ret;
45923 +
45924 + read_lock(&tasklist_lock);
45925 + do_each_pid_task(pid, type, p) {
45926 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
45927 + p->acl != current->acl) {
45928 + ret = 1;
45929 + goto out;
45930 + }
45931 + } while_each_pid_task(pid, type, p);
45932 +out:
45933 + read_unlock(&tasklist_lock);
45934 +
45935 + return ret;
45936 +}
45937 +
45938 +void
45939 +gr_copy_label(struct task_struct *tsk)
45940 +{
45941 + tsk->signal->used_accept = 0;
45942 + tsk->acl_sp_role = 0;
45943 + tsk->acl_role_id = current->acl_role_id;
45944 + tsk->acl = current->acl;
45945 + tsk->role = current->role;
45946 + tsk->signal->curr_ip = current->signal->curr_ip;
45947 + tsk->signal->saved_ip = current->signal->saved_ip;
45948 + if (current->exec_file)
45949 + get_file(current->exec_file);
45950 + tsk->exec_file = current->exec_file;
45951 + tsk->is_writable = current->is_writable;
45952 + if (unlikely(current->signal->used_accept)) {
45953 + current->signal->curr_ip = 0;
45954 + current->signal->saved_ip = 0;
45955 + }
45956 +
45957 + return;
45958 +}
45959 +
45960 +static void
45961 +gr_set_proc_res(struct task_struct *task)
45962 +{
45963 + struct acl_subject_label *proc;
45964 + unsigned short i;
45965 +
45966 + proc = task->acl;
45967 +
45968 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
45969 + return;
45970 +
45971 + for (i = 0; i < RLIM_NLIMITS; i++) {
45972 + if (!(proc->resmask & (1 << i)))
45973 + continue;
45974 +
45975 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
45976 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
45977 + }
45978 +
45979 + return;
45980 +}
45981 +
45982 +extern int __gr_process_user_ban(struct user_struct *user);
45983 +
45984 +int
45985 +gr_check_user_change(int real, int effective, int fs)
45986 +{
45987 + unsigned int i;
45988 + __u16 num;
45989 + uid_t *uidlist;
45990 + int curuid;
45991 + int realok = 0;
45992 + int effectiveok = 0;
45993 + int fsok = 0;
45994 +
45995 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
45996 + struct user_struct *user;
45997 +
45998 + if (real == -1)
45999 + goto skipit;
46000 +
46001 + user = find_user(real);
46002 + if (user == NULL)
46003 + goto skipit;
46004 +
46005 + if (__gr_process_user_ban(user)) {
46006 + /* for find_user */
46007 + free_uid(user);
46008 + return 1;
46009 + }
46010 +
46011 + /* for find_user */
46012 + free_uid(user);
46013 +
46014 +skipit:
46015 +#endif
46016 +
46017 + if (unlikely(!(gr_status & GR_READY)))
46018 + return 0;
46019 +
46020 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
46021 + gr_log_learn_id_change('u', real, effective, fs);
46022 +
46023 + num = current->acl->user_trans_num;
46024 + uidlist = current->acl->user_transitions;
46025 +
46026 + if (uidlist == NULL)
46027 + return 0;
46028 +
46029 + if (real == -1)
46030 + realok = 1;
46031 + if (effective == -1)
46032 + effectiveok = 1;
46033 + if (fs == -1)
46034 + fsok = 1;
46035 +
46036 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
46037 + for (i = 0; i < num; i++) {
46038 + curuid = (int)uidlist[i];
46039 + if (real == curuid)
46040 + realok = 1;
46041 + if (effective == curuid)
46042 + effectiveok = 1;
46043 + if (fs == curuid)
46044 + fsok = 1;
46045 + }
46046 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
46047 + for (i = 0; i < num; i++) {
46048 + curuid = (int)uidlist[i];
46049 + if (real == curuid)
46050 + break;
46051 + if (effective == curuid)
46052 + break;
46053 + if (fs == curuid)
46054 + break;
46055 + }
46056 + /* not in deny list */
46057 + if (i == num) {
46058 + realok = 1;
46059 + effectiveok = 1;
46060 + fsok = 1;
46061 + }
46062 + }
46063 +
46064 + if (realok && effectiveok && fsok)
46065 + return 0;
46066 + else {
46067 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
46068 + return 1;
46069 + }
46070 +}
46071 +
46072 +int
46073 +gr_check_group_change(int real, int effective, int fs)
46074 +{
46075 + unsigned int i;
46076 + __u16 num;
46077 + gid_t *gidlist;
46078 + int curgid;
46079 + int realok = 0;
46080 + int effectiveok = 0;
46081 + int fsok = 0;
46082 +
46083 + if (unlikely(!(gr_status & GR_READY)))
46084 + return 0;
46085 +
46086 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
46087 + gr_log_learn_id_change('g', real, effective, fs);
46088 +
46089 + num = current->acl->group_trans_num;
46090 + gidlist = current->acl->group_transitions;
46091 +
46092 + if (gidlist == NULL)
46093 + return 0;
46094 +
46095 + if (real == -1)
46096 + realok = 1;
46097 + if (effective == -1)
46098 + effectiveok = 1;
46099 + if (fs == -1)
46100 + fsok = 1;
46101 +
46102 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
46103 + for (i = 0; i < num; i++) {
46104 + curgid = (int)gidlist[i];
46105 + if (real == curgid)
46106 + realok = 1;
46107 + if (effective == curgid)
46108 + effectiveok = 1;
46109 + if (fs == curgid)
46110 + fsok = 1;
46111 + }
46112 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
46113 + for (i = 0; i < num; i++) {
46114 + curgid = (int)gidlist[i];
46115 + if (real == curgid)
46116 + break;
46117 + if (effective == curgid)
46118 + break;
46119 + if (fs == curgid)
46120 + break;
46121 + }
46122 + /* not in deny list */
46123 + if (i == num) {
46124 + realok = 1;
46125 + effectiveok = 1;
46126 + fsok = 1;
46127 + }
46128 + }
46129 +
46130 + if (realok && effectiveok && fsok)
46131 + return 0;
46132 + else {
46133 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
46134 + return 1;
46135 + }
46136 +}
46137 +
46138 +void
46139 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
46140 +{
46141 + struct acl_role_label *role = task->role;
46142 + struct acl_subject_label *subj = NULL;
46143 + struct acl_object_label *obj;
46144 + struct file *filp;
46145 +
46146 + if (unlikely(!(gr_status & GR_READY)))
46147 + return;
46148 +
46149 + filp = task->exec_file;
46150 +
46151 + /* kernel process, we'll give them the kernel role */
46152 + if (unlikely(!filp)) {
46153 + task->role = kernel_role;
46154 + task->acl = kernel_role->root_label;
46155 + return;
46156 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
46157 + role = lookup_acl_role_label(task, uid, gid);
46158 +
46159 + /* perform subject lookup in possibly new role
46160 + we can use this result below in the case where role == task->role
46161 + */
46162 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
46163 +
46164 + /* if we changed uid/gid, but result in the same role
46165 + and are using inheritance, don't lose the inherited subject
46166 + if current subject is other than what normal lookup
46167 + would result in, we arrived via inheritance, don't
46168 + lose subject
46169 + */
46170 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
46171 + (subj == task->acl)))
46172 + task->acl = subj;
46173 +
46174 + task->role = role;
46175 +
46176 + task->is_writable = 0;
46177 +
46178 + /* ignore additional mmap checks for processes that are writable
46179 + by the default ACL */
46180 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
46181 + if (unlikely(obj->mode & GR_WRITE))
46182 + task->is_writable = 1;
46183 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
46184 + if (unlikely(obj->mode & GR_WRITE))
46185 + task->is_writable = 1;
46186 +
46187 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46188 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
46189 +#endif
46190 +
46191 + gr_set_proc_res(task);
46192 +
46193 + return;
46194 +}
46195 +
46196 +int
46197 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
46198 + const int unsafe_share)
46199 +{
46200 + struct task_struct *task = current;
46201 + struct acl_subject_label *newacl;
46202 + struct acl_object_label *obj;
46203 + __u32 retmode;
46204 +
46205 + if (unlikely(!(gr_status & GR_READY)))
46206 + return 0;
46207 +
46208 + newacl = chk_subj_label(dentry, mnt, task->role);
46209 +
46210 + task_lock(task);
46211 + if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
46212 + !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
46213 + !(task->role->roletype & GR_ROLE_GOD) &&
46214 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
46215 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
46216 + task_unlock(task);
46217 + if (unsafe_share)
46218 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
46219 + else
46220 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
46221 + return -EACCES;
46222 + }
46223 + task_unlock(task);
46224 +
46225 + obj = chk_obj_label(dentry, mnt, task->acl);
46226 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
46227 +
46228 + if (!(task->acl->mode & GR_INHERITLEARN) &&
46229 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
46230 + if (obj->nested)
46231 + task->acl = obj->nested;
46232 + else
46233 + task->acl = newacl;
46234 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
46235 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
46236 +
46237 + task->is_writable = 0;
46238 +
46239 + /* ignore additional mmap checks for processes that are writable
46240 + by the default ACL */
46241 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
46242 + if (unlikely(obj->mode & GR_WRITE))
46243 + task->is_writable = 1;
46244 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
46245 + if (unlikely(obj->mode & GR_WRITE))
46246 + task->is_writable = 1;
46247 +
46248 + gr_set_proc_res(task);
46249 +
46250 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46251 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
46252 +#endif
46253 + return 0;
46254 +}
46255 +
46256 +/* always called with valid inodev ptr */
46257 +static void
46258 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
46259 +{
46260 + struct acl_object_label *matchpo;
46261 + struct acl_subject_label *matchps;
46262 + struct acl_subject_label *subj;
46263 + struct acl_role_label *role;
46264 + unsigned int x;
46265 +
46266 + FOR_EACH_ROLE_START(role)
46267 + FOR_EACH_SUBJECT_START(role, subj, x)
46268 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
46269 + matchpo->mode |= GR_DELETED;
46270 + FOR_EACH_SUBJECT_END(subj,x)
46271 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
46272 + if (subj->inode == ino && subj->device == dev)
46273 + subj->mode |= GR_DELETED;
46274 + FOR_EACH_NESTED_SUBJECT_END(subj)
46275 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
46276 + matchps->mode |= GR_DELETED;
46277 + FOR_EACH_ROLE_END(role)
46278 +
46279 + inodev->nentry->deleted = 1;
46280 +
46281 + return;
46282 +}
46283 +
46284 +void
46285 +gr_handle_delete(const ino_t ino, const dev_t dev)
46286 +{
46287 + struct inodev_entry *inodev;
46288 +
46289 + if (unlikely(!(gr_status & GR_READY)))
46290 + return;
46291 +
46292 + write_lock(&gr_inode_lock);
46293 + inodev = lookup_inodev_entry(ino, dev);
46294 + if (inodev != NULL)
46295 + do_handle_delete(inodev, ino, dev);
46296 + write_unlock(&gr_inode_lock);
46297 +
46298 + return;
46299 +}
46300 +
46301 +static void
46302 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
46303 + const ino_t newinode, const dev_t newdevice,
46304 + struct acl_subject_label *subj)
46305 +{
46306 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
46307 + struct acl_object_label *match;
46308 +
46309 + match = subj->obj_hash[index];
46310 +
46311 + while (match && (match->inode != oldinode ||
46312 + match->device != olddevice ||
46313 + !(match->mode & GR_DELETED)))
46314 + match = match->next;
46315 +
46316 + if (match && (match->inode == oldinode)
46317 + && (match->device == olddevice)
46318 + && (match->mode & GR_DELETED)) {
46319 + if (match->prev == NULL) {
46320 + subj->obj_hash[index] = match->next;
46321 + if (match->next != NULL)
46322 + match->next->prev = NULL;
46323 + } else {
46324 + match->prev->next = match->next;
46325 + if (match->next != NULL)
46326 + match->next->prev = match->prev;
46327 + }
46328 + match->prev = NULL;
46329 + match->next = NULL;
46330 + match->inode = newinode;
46331 + match->device = newdevice;
46332 + match->mode &= ~GR_DELETED;
46333 +
46334 + insert_acl_obj_label(match, subj);
46335 + }
46336 +
46337 + return;
46338 +}
46339 +
46340 +static void
46341 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
46342 + const ino_t newinode, const dev_t newdevice,
46343 + struct acl_role_label *role)
46344 +{
46345 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
46346 + struct acl_subject_label *match;
46347 +
46348 + match = role->subj_hash[index];
46349 +
46350 + while (match && (match->inode != oldinode ||
46351 + match->device != olddevice ||
46352 + !(match->mode & GR_DELETED)))
46353 + match = match->next;
46354 +
46355 + if (match && (match->inode == oldinode)
46356 + && (match->device == olddevice)
46357 + && (match->mode & GR_DELETED)) {
46358 + if (match->prev == NULL) {
46359 + role->subj_hash[index] = match->next;
46360 + if (match->next != NULL)
46361 + match->next->prev = NULL;
46362 + } else {
46363 + match->prev->next = match->next;
46364 + if (match->next != NULL)
46365 + match->next->prev = match->prev;
46366 + }
46367 + match->prev = NULL;
46368 + match->next = NULL;
46369 + match->inode = newinode;
46370 + match->device = newdevice;
46371 + match->mode &= ~GR_DELETED;
46372 +
46373 + insert_acl_subj_label(match, role);
46374 + }
46375 +
46376 + return;
46377 +}
46378 +
46379 +static void
46380 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
46381 + const ino_t newinode, const dev_t newdevice)
46382 +{
46383 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
46384 + struct inodev_entry *match;
46385 +
46386 + match = inodev_set.i_hash[index];
46387 +
46388 + while (match && (match->nentry->inode != oldinode ||
46389 + match->nentry->device != olddevice || !match->nentry->deleted))
46390 + match = match->next;
46391 +
46392 + if (match && (match->nentry->inode == oldinode)
46393 + && (match->nentry->device == olddevice) &&
46394 + match->nentry->deleted) {
46395 + if (match->prev == NULL) {
46396 + inodev_set.i_hash[index] = match->next;
46397 + if (match->next != NULL)
46398 + match->next->prev = NULL;
46399 + } else {
46400 + match->prev->next = match->next;
46401 + if (match->next != NULL)
46402 + match->next->prev = match->prev;
46403 + }
46404 + match->prev = NULL;
46405 + match->next = NULL;
46406 + match->nentry->inode = newinode;
46407 + match->nentry->device = newdevice;
46408 + match->nentry->deleted = 0;
46409 +
46410 + insert_inodev_entry(match);
46411 + }
46412 +
46413 + return;
46414 +}
46415 +
46416 +static void
46417 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
46418 + const struct vfsmount *mnt)
46419 +{
46420 + struct acl_subject_label *subj;
46421 + struct acl_role_label *role;
46422 + unsigned int x;
46423 + ino_t inode = dentry->d_inode->i_ino;
46424 + dev_t dev = __get_dev(dentry);
46425 +
46426 + FOR_EACH_ROLE_START(role)
46427 + update_acl_subj_label(matchn->inode, matchn->device,
46428 + inode, dev, role);
46429 +
46430 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
46431 + if ((subj->inode == inode) && (subj->device == dev)) {
46432 + subj->inode = inode;
46433 + subj->device = dev;
46434 + }
46435 + FOR_EACH_NESTED_SUBJECT_END(subj)
46436 + FOR_EACH_SUBJECT_START(role, subj, x)
46437 + update_acl_obj_label(matchn->inode, matchn->device,
46438 + inode, dev, subj);
46439 + FOR_EACH_SUBJECT_END(subj,x)
46440 + FOR_EACH_ROLE_END(role)
46441 +
46442 + update_inodev_entry(matchn->inode, matchn->device, inode, dev);
46443 +
46444 + return;
46445 +}
46446 +
46447 +void
46448 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
46449 +{
46450 + struct name_entry *matchn;
46451 +
46452 + if (unlikely(!(gr_status & GR_READY)))
46453 + return;
46454 +
46455 + preempt_disable();
46456 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
46457 +
46458 + if (unlikely((unsigned long)matchn)) {
46459 + write_lock(&gr_inode_lock);
46460 + do_handle_create(matchn, dentry, mnt);
46461 + write_unlock(&gr_inode_lock);
46462 + }
46463 + preempt_enable();
46464 +
46465 + return;
46466 +}
46467 +
46468 +void
46469 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
46470 + struct dentry *old_dentry,
46471 + struct dentry *new_dentry,
46472 + struct vfsmount *mnt, const __u8 replace)
46473 +{
46474 + struct name_entry *matchn;
46475 + struct inodev_entry *inodev;
46476 + ino_t oldinode = old_dentry->d_inode->i_ino;
46477 + dev_t olddev = __get_dev(old_dentry);
46478 +
46479 + /* vfs_rename swaps the name and parent link for old_dentry and
46480 + new_dentry
46481 + at this point, old_dentry has the new name, parent link, and inode
46482 + for the renamed file
46483 + if a file is being replaced by a rename, new_dentry has the inode
46484 + and name for the replaced file
46485 + */
46486 +
46487 + if (unlikely(!(gr_status & GR_READY)))
46488 + return;
46489 +
46490 + preempt_disable();
46491 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
46492 +
46493 + /* we wouldn't have to check d_inode if it weren't for
46494 + NFS silly-renaming
46495 + */
46496 +
46497 + write_lock(&gr_inode_lock);
46498 + if (unlikely(replace && new_dentry->d_inode)) {
46499 + ino_t newinode = new_dentry->d_inode->i_ino;
46500 + dev_t newdev = __get_dev(new_dentry);
46501 + inodev = lookup_inodev_entry(newinode, newdev);
46502 + if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
46503 + do_handle_delete(inodev, newinode, newdev);
46504 + }
46505 +
46506 + inodev = lookup_inodev_entry(oldinode, olddev);
46507 + if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
46508 + do_handle_delete(inodev, oldinode, olddev);
46509 +
46510 + if (unlikely((unsigned long)matchn))
46511 + do_handle_create(matchn, old_dentry, mnt);
46512 +
46513 + write_unlock(&gr_inode_lock);
46514 + preempt_enable();
46515 +
46516 + return;
46517 +}
46518 +
46519 +static int
46520 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
46521 + unsigned char **sum)
46522 +{
46523 + struct acl_role_label *r;
46524 + struct role_allowed_ip *ipp;
46525 + struct role_transition *trans;
46526 + unsigned int i;
46527 + int found = 0;
46528 + u32 curr_ip = current->signal->curr_ip;
46529 +
46530 + current->signal->saved_ip = curr_ip;
46531 +
46532 + /* check transition table */
46533 +
46534 + for (trans = current->role->transitions; trans; trans = trans->next) {
46535 + if (!strcmp(rolename, trans->rolename)) {
46536 + found = 1;
46537 + break;
46538 + }
46539 + }
46540 +
46541 + if (!found)
46542 + return 0;
46543 +
46544 + /* handle special roles that do not require authentication
46545 + and check ip */
46546 +
46547 + FOR_EACH_ROLE_START(r)
46548 + if (!strcmp(rolename, r->rolename) &&
46549 + (r->roletype & GR_ROLE_SPECIAL)) {
46550 + found = 0;
46551 + if (r->allowed_ips != NULL) {
46552 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
46553 + if ((ntohl(curr_ip) & ipp->netmask) ==
46554 + (ntohl(ipp->addr) & ipp->netmask))
46555 + found = 1;
46556 + }
46557 + } else
46558 + found = 2;
46559 + if (!found)
46560 + return 0;
46561 +
46562 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
46563 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
46564 + *salt = NULL;
46565 + *sum = NULL;
46566 + return 1;
46567 + }
46568 + }
46569 + FOR_EACH_ROLE_END(r)
46570 +
46571 + for (i = 0; i < num_sprole_pws; i++) {
46572 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
46573 + *salt = acl_special_roles[i]->salt;
46574 + *sum = acl_special_roles[i]->sum;
46575 + return 1;
46576 + }
46577 + }
46578 +
46579 + return 0;
46580 +}
46581 +
46582 +static void
46583 +assign_special_role(char *rolename)
46584 +{
46585 + struct acl_object_label *obj;
46586 + struct acl_role_label *r;
46587 + struct acl_role_label *assigned = NULL;
46588 + struct task_struct *tsk;
46589 + struct file *filp;
46590 +
46591 + FOR_EACH_ROLE_START(r)
46592 + if (!strcmp(rolename, r->rolename) &&
46593 + (r->roletype & GR_ROLE_SPECIAL)) {
46594 + assigned = r;
46595 + break;
46596 + }
46597 + FOR_EACH_ROLE_END(r)
46598 +
46599 + if (!assigned)
46600 + return;
46601 +
46602 + read_lock(&tasklist_lock);
46603 + read_lock(&grsec_exec_file_lock);
46604 +
46605 + tsk = current->real_parent;
46606 + if (tsk == NULL)
46607 + goto out_unlock;
46608 +
46609 + filp = tsk->exec_file;
46610 + if (filp == NULL)
46611 + goto out_unlock;
46612 +
46613 + tsk->is_writable = 0;
46614 +
46615 + tsk->acl_sp_role = 1;
46616 + tsk->acl_role_id = ++acl_sp_role_value;
46617 + tsk->role = assigned;
46618 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
46619 +
46620 + /* ignore additional mmap checks for processes that are writable
46621 + by the default ACL */
46622 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
46623 + if (unlikely(obj->mode & GR_WRITE))
46624 + tsk->is_writable = 1;
46625 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
46626 + if (unlikely(obj->mode & GR_WRITE))
46627 + tsk->is_writable = 1;
46628 +
46629 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46630 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
46631 +#endif
46632 +
46633 +out_unlock:
46634 + read_unlock(&grsec_exec_file_lock);
46635 + read_unlock(&tasklist_lock);
46636 + return;
46637 +}
46638 +
46639 +int gr_check_secure_terminal(struct task_struct *task)
46640 +{
46641 + struct task_struct *p, *p2, *p3;
46642 + struct files_struct *files;
46643 + struct fdtable *fdt;
46644 + struct file *our_file = NULL, *file;
46645 + int i;
46646 +
46647 + if (task->signal->tty == NULL)
46648 + return 1;
46649 +
46650 + files = get_files_struct(task);
46651 + if (files != NULL) {
46652 + rcu_read_lock();
46653 + fdt = files_fdtable(files);
46654 + for (i=0; i < fdt->max_fds; i++) {
46655 + file = fcheck_files(files, i);
46656 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
46657 + get_file(file);
46658 + our_file = file;
46659 + }
46660 + }
46661 + rcu_read_unlock();
46662 + put_files_struct(files);
46663 + }
46664 +
46665 + if (our_file == NULL)
46666 + return 1;
46667 +
46668 + read_lock(&tasklist_lock);
46669 + do_each_thread(p2, p) {
46670 + files = get_files_struct(p);
46671 + if (files == NULL ||
46672 + (p->signal && p->signal->tty == task->signal->tty)) {
46673 + if (files != NULL)
46674 + put_files_struct(files);
46675 + continue;
46676 + }
46677 + rcu_read_lock();
46678 + fdt = files_fdtable(files);
46679 + for (i=0; i < fdt->max_fds; i++) {
46680 + file = fcheck_files(files, i);
46681 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
46682 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
46683 + p3 = task;
46684 + while (p3->pid > 0) {
46685 + if (p3 == p)
46686 + break;
46687 + p3 = p3->real_parent;
46688 + }
46689 + if (p3 == p)
46690 + break;
46691 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
46692 + gr_handle_alertkill(p);
46693 + rcu_read_unlock();
46694 + put_files_struct(files);
46695 + read_unlock(&tasklist_lock);
46696 + fput(our_file);
46697 + return 0;
46698 + }
46699 + }
46700 + rcu_read_unlock();
46701 + put_files_struct(files);
46702 + } while_each_thread(p2, p);
46703 + read_unlock(&tasklist_lock);
46704 +
46705 + fput(our_file);
46706 + return 1;
46707 +}
46708 +
46709 +ssize_t
46710 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
46711 +{
46712 + struct gr_arg_wrapper uwrap;
46713 + unsigned char *sprole_salt = NULL;
46714 + unsigned char *sprole_sum = NULL;
46715 + int error = sizeof (struct gr_arg_wrapper);
46716 + int error2 = 0;
46717 +
46718 + mutex_lock(&gr_dev_mutex);
46719 +
46720 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
46721 + error = -EPERM;
46722 + goto out;
46723 + }
46724 +
46725 + if (count != sizeof (struct gr_arg_wrapper)) {
46726 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
46727 + error = -EINVAL;
46728 + goto out;
46729 + }
46730 +
46731 +
46732 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
46733 + gr_auth_expires = 0;
46734 + gr_auth_attempts = 0;
46735 + }
46736 +
46737 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
46738 + error = -EFAULT;
46739 + goto out;
46740 + }
46741 +
46742 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
46743 + error = -EINVAL;
46744 + goto out;
46745 + }
46746 +
46747 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
46748 + error = -EFAULT;
46749 + goto out;
46750 + }
46751 +
46752 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
46753 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
46754 + time_after(gr_auth_expires, get_seconds())) {
46755 + error = -EBUSY;
46756 + goto out;
46757 + }
46758 +
46759 + /* if non-root trying to do anything other than use a special role,
46760 + do not attempt authentication, do not count towards authentication
46761 + locking
46762 + */
46763 +
46764 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
46765 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
46766 + current_uid()) {
46767 + error = -EPERM;
46768 + goto out;
46769 + }
46770 +
46771 + /* ensure pw and special role name are null terminated */
46772 +
46773 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
46774 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
46775 +
46776 + /* Okay.
46777 + * We have our enough of the argument structure..(we have yet
46778 + * to copy_from_user the tables themselves) . Copy the tables
46779 + * only if we need them, i.e. for loading operations. */
46780 +
46781 + switch (gr_usermode->mode) {
46782 + case GR_STATUS:
46783 + if (gr_status & GR_READY) {
46784 + error = 1;
46785 + if (!gr_check_secure_terminal(current))
46786 + error = 3;
46787 + } else
46788 + error = 2;
46789 + goto out;
46790 + case GR_SHUTDOWN:
46791 + if ((gr_status & GR_READY)
46792 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
46793 + pax_open_kernel();
46794 + gr_status &= ~GR_READY;
46795 + pax_close_kernel();
46796 +
46797 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
46798 + free_variables();
46799 + memset(gr_usermode, 0, sizeof (struct gr_arg));
46800 + memset(gr_system_salt, 0, GR_SALT_LEN);
46801 + memset(gr_system_sum, 0, GR_SHA_LEN);
46802 + } else if (gr_status & GR_READY) {
46803 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
46804 + error = -EPERM;
46805 + } else {
46806 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
46807 + error = -EAGAIN;
46808 + }
46809 + break;
46810 + case GR_ENABLE:
46811 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
46812 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
46813 + else {
46814 + if (gr_status & GR_READY)
46815 + error = -EAGAIN;
46816 + else
46817 + error = error2;
46818 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
46819 + }
46820 + break;
46821 + case GR_RELOAD:
46822 + if (!(gr_status & GR_READY)) {
46823 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
46824 + error = -EAGAIN;
46825 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
46826 + lock_kernel();
46827 +
46828 + pax_open_kernel();
46829 + gr_status &= ~GR_READY;
46830 + pax_close_kernel();
46831 +
46832 + free_variables();
46833 + if (!(error2 = gracl_init(gr_usermode))) {
46834 + unlock_kernel();
46835 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
46836 + } else {
46837 + unlock_kernel();
46838 + error = error2;
46839 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
46840 + }
46841 + } else {
46842 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
46843 + error = -EPERM;
46844 + }
46845 + break;
46846 + case GR_SEGVMOD:
46847 + if (unlikely(!(gr_status & GR_READY))) {
46848 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
46849 + error = -EAGAIN;
46850 + break;
46851 + }
46852 +
46853 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
46854 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
46855 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
46856 + struct acl_subject_label *segvacl;
46857 + segvacl =
46858 + lookup_acl_subj_label(gr_usermode->segv_inode,
46859 + gr_usermode->segv_device,
46860 + current->role);
46861 + if (segvacl) {
46862 + segvacl->crashes = 0;
46863 + segvacl->expires = 0;
46864 + }
46865 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
46866 + gr_remove_uid(gr_usermode->segv_uid);
46867 + }
46868 + } else {
46869 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
46870 + error = -EPERM;
46871 + }
46872 + break;
46873 + case GR_SPROLE:
46874 + case GR_SPROLEPAM:
46875 + if (unlikely(!(gr_status & GR_READY))) {
46876 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
46877 + error = -EAGAIN;
46878 + break;
46879 + }
46880 +
46881 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
46882 + current->role->expires = 0;
46883 + current->role->auth_attempts = 0;
46884 + }
46885 +
46886 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
46887 + time_after(current->role->expires, get_seconds())) {
46888 + error = -EBUSY;
46889 + goto out;
46890 + }
46891 +
46892 + if (lookup_special_role_auth
46893 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
46894 + && ((!sprole_salt && !sprole_sum)
46895 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
46896 + char *p = "";
46897 + assign_special_role(gr_usermode->sp_role);
46898 + read_lock(&tasklist_lock);
46899 + if (current->real_parent)
46900 + p = current->real_parent->role->rolename;
46901 + read_unlock(&tasklist_lock);
46902 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
46903 + p, acl_sp_role_value);
46904 + } else {
46905 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
46906 + error = -EPERM;
46907 + if(!(current->role->auth_attempts++))
46908 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
46909 +
46910 + goto out;
46911 + }
46912 + break;
46913 + case GR_UNSPROLE:
46914 + if (unlikely(!(gr_status & GR_READY))) {
46915 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
46916 + error = -EAGAIN;
46917 + break;
46918 + }
46919 +
46920 + if (current->role->roletype & GR_ROLE_SPECIAL) {
46921 + char *p = "";
46922 + int i = 0;
46923 +
46924 + read_lock(&tasklist_lock);
46925 + if (current->real_parent) {
46926 + p = current->real_parent->role->rolename;
46927 + i = current->real_parent->acl_role_id;
46928 + }
46929 + read_unlock(&tasklist_lock);
46930 +
46931 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
46932 + gr_set_acls(1);
46933 + } else {
46934 + error = -EPERM;
46935 + goto out;
46936 + }
46937 + break;
46938 + default:
46939 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
46940 + error = -EINVAL;
46941 + break;
46942 + }
46943 +
46944 + if (error != -EPERM)
46945 + goto out;
46946 +
46947 + if(!(gr_auth_attempts++))
46948 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
46949 +
46950 + out:
46951 + mutex_unlock(&gr_dev_mutex);
46952 + return error;
46953 +}
46954 +
46955 +/* must be called with
46956 + rcu_read_lock();
46957 + read_lock(&tasklist_lock);
46958 + read_lock(&grsec_exec_file_lock);
46959 +*/
46960 +int gr_apply_subject_to_task(struct task_struct *task)
46961 +{
46962 + struct acl_object_label *obj;
46963 + char *tmpname;
46964 + struct acl_subject_label *tmpsubj;
46965 + struct file *filp;
46966 + struct name_entry *nmatch;
46967 +
46968 + filp = task->exec_file;
46969 + if (filp == NULL)
46970 + return 0;
46971 +
46972 + /* the following is to apply the correct subject
46973 + on binaries running when the RBAC system
46974 + is enabled, when the binaries have been
46975 + replaced or deleted since their execution
46976 + -----
46977 + when the RBAC system starts, the inode/dev
46978 + from exec_file will be one the RBAC system
46979 + is unaware of. It only knows the inode/dev
46980 + of the present file on disk, or the absence
46981 + of it.
46982 + */
46983 + preempt_disable();
46984 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
46985 +
46986 + nmatch = lookup_name_entry(tmpname);
46987 + preempt_enable();
46988 + tmpsubj = NULL;
46989 + if (nmatch) {
46990 + if (nmatch->deleted)
46991 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
46992 + else
46993 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
46994 + if (tmpsubj != NULL)
46995 + task->acl = tmpsubj;
46996 + }
46997 + if (tmpsubj == NULL)
46998 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
46999 + task->role);
47000 + if (task->acl) {
47001 + struct acl_subject_label *curr;
47002 + curr = task->acl;
47003 +
47004 + task->is_writable = 0;
47005 + /* ignore additional mmap checks for processes that are writable
47006 + by the default ACL */
47007 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
47008 + if (unlikely(obj->mode & GR_WRITE))
47009 + task->is_writable = 1;
47010 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
47011 + if (unlikely(obj->mode & GR_WRITE))
47012 + task->is_writable = 1;
47013 +
47014 + gr_set_proc_res(task);
47015 +
47016 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47017 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
47018 +#endif
47019 + } else {
47020 + return 1;
47021 + }
47022 +
47023 + return 0;
47024 +}
47025 +
47026 +int
47027 +gr_set_acls(const int type)
47028 +{
47029 + struct task_struct *task, *task2;
47030 + struct acl_role_label *role = current->role;
47031 + __u16 acl_role_id = current->acl_role_id;
47032 + const struct cred *cred;
47033 + int ret;
47034 +
47035 + rcu_read_lock();
47036 + read_lock(&tasklist_lock);
47037 + read_lock(&grsec_exec_file_lock);
47038 + do_each_thread(task2, task) {
47039 + /* check to see if we're called from the exit handler,
47040 + if so, only replace ACLs that have inherited the admin
47041 + ACL */
47042 +
47043 + if (type && (task->role != role ||
47044 + task->acl_role_id != acl_role_id))
47045 + continue;
47046 +
47047 + task->acl_role_id = 0;
47048 + task->acl_sp_role = 0;
47049 +
47050 + if (task->exec_file) {
47051 + cred = __task_cred(task);
47052 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
47053 +
47054 + ret = gr_apply_subject_to_task(task);
47055 + if (ret) {
47056 + read_unlock(&grsec_exec_file_lock);
47057 + read_unlock(&tasklist_lock);
47058 + rcu_read_unlock();
47059 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
47060 + return ret;
47061 + }
47062 + } else {
47063 + // it's a kernel process
47064 + task->role = kernel_role;
47065 + task->acl = kernel_role->root_label;
47066 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
47067 + task->acl->mode &= ~GR_PROCFIND;
47068 +#endif
47069 + }
47070 + } while_each_thread(task2, task);
47071 + read_unlock(&grsec_exec_file_lock);
47072 + read_unlock(&tasklist_lock);
47073 + rcu_read_unlock();
47074 +
47075 + return 0;
47076 +}
47077 +
47078 +void
47079 +gr_learn_resource(const struct task_struct *task,
47080 + const int res, const unsigned long wanted, const int gt)
47081 +{
47082 + struct acl_subject_label *acl;
47083 + const struct cred *cred;
47084 +
47085 + if (unlikely((gr_status & GR_READY) &&
47086 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
47087 + goto skip_reslog;
47088 +
47089 +#ifdef CONFIG_GRKERNSEC_RESLOG
47090 + gr_log_resource(task, res, wanted, gt);
47091 +#endif
47092 + skip_reslog:
47093 +
47094 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
47095 + return;
47096 +
47097 + acl = task->acl;
47098 +
47099 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
47100 + !(acl->resmask & (1 << (unsigned short) res))))
47101 + return;
47102 +
47103 + if (wanted >= acl->res[res].rlim_cur) {
47104 + unsigned long res_add;
47105 +
47106 + res_add = wanted;
47107 + switch (res) {
47108 + case RLIMIT_CPU:
47109 + res_add += GR_RLIM_CPU_BUMP;
47110 + break;
47111 + case RLIMIT_FSIZE:
47112 + res_add += GR_RLIM_FSIZE_BUMP;
47113 + break;
47114 + case RLIMIT_DATA:
47115 + res_add += GR_RLIM_DATA_BUMP;
47116 + break;
47117 + case RLIMIT_STACK:
47118 + res_add += GR_RLIM_STACK_BUMP;
47119 + break;
47120 + case RLIMIT_CORE:
47121 + res_add += GR_RLIM_CORE_BUMP;
47122 + break;
47123 + case RLIMIT_RSS:
47124 + res_add += GR_RLIM_RSS_BUMP;
47125 + break;
47126 + case RLIMIT_NPROC:
47127 + res_add += GR_RLIM_NPROC_BUMP;
47128 + break;
47129 + case RLIMIT_NOFILE:
47130 + res_add += GR_RLIM_NOFILE_BUMP;
47131 + break;
47132 + case RLIMIT_MEMLOCK:
47133 + res_add += GR_RLIM_MEMLOCK_BUMP;
47134 + break;
47135 + case RLIMIT_AS:
47136 + res_add += GR_RLIM_AS_BUMP;
47137 + break;
47138 + case RLIMIT_LOCKS:
47139 + res_add += GR_RLIM_LOCKS_BUMP;
47140 + break;
47141 + case RLIMIT_SIGPENDING:
47142 + res_add += GR_RLIM_SIGPENDING_BUMP;
47143 + break;
47144 + case RLIMIT_MSGQUEUE:
47145 + res_add += GR_RLIM_MSGQUEUE_BUMP;
47146 + break;
47147 + case RLIMIT_NICE:
47148 + res_add += GR_RLIM_NICE_BUMP;
47149 + break;
47150 + case RLIMIT_RTPRIO:
47151 + res_add += GR_RLIM_RTPRIO_BUMP;
47152 + break;
47153 + case RLIMIT_RTTIME:
47154 + res_add += GR_RLIM_RTTIME_BUMP;
47155 + break;
47156 + }
47157 +
47158 + acl->res[res].rlim_cur = res_add;
47159 +
47160 + if (wanted > acl->res[res].rlim_max)
47161 + acl->res[res].rlim_max = res_add;
47162 +
47163 + /* only log the subject filename, since resource logging is supported for
47164 + single-subject learning only */
47165 + rcu_read_lock();
47166 + cred = __task_cred(task);
47167 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
47168 + task->role->roletype, cred->uid, cred->gid, acl->filename,
47169 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
47170 + "", (unsigned long) res, &task->signal->saved_ip);
47171 + rcu_read_unlock();
47172 + }
47173 +
47174 + return;
47175 +}
47176 +
47177 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
47178 +void
47179 +pax_set_initial_flags(struct linux_binprm *bprm)
47180 +{
47181 + struct task_struct *task = current;
47182 + struct acl_subject_label *proc;
47183 + unsigned long flags;
47184 +
47185 + if (unlikely(!(gr_status & GR_READY)))
47186 + return;
47187 +
47188 + flags = pax_get_flags(task);
47189 +
47190 + proc = task->acl;
47191 +
47192 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
47193 + flags &= ~MF_PAX_PAGEEXEC;
47194 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
47195 + flags &= ~MF_PAX_SEGMEXEC;
47196 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
47197 + flags &= ~MF_PAX_RANDMMAP;
47198 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
47199 + flags &= ~MF_PAX_EMUTRAMP;
47200 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
47201 + flags &= ~MF_PAX_MPROTECT;
47202 +
47203 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
47204 + flags |= MF_PAX_PAGEEXEC;
47205 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
47206 + flags |= MF_PAX_SEGMEXEC;
47207 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
47208 + flags |= MF_PAX_RANDMMAP;
47209 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
47210 + flags |= MF_PAX_EMUTRAMP;
47211 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
47212 + flags |= MF_PAX_MPROTECT;
47213 +
47214 + pax_set_flags(task, flags);
47215 +
47216 + return;
47217 +}
47218 +#endif
47219 +
47220 +#ifdef CONFIG_SYSCTL
47221 +/* Eric Biederman likes breaking userland ABI and every inode-based security
47222 + system to save 35kb of memory */
47223 +
47224 +/* we modify the passed in filename, but adjust it back before returning */
47225 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
47226 +{
47227 + struct name_entry *nmatch;
47228 + char *p, *lastp = NULL;
47229 + struct acl_object_label *obj = NULL, *tmp;
47230 + struct acl_subject_label *tmpsubj;
47231 + char c = '\0';
47232 +
47233 + read_lock(&gr_inode_lock);
47234 +
47235 + p = name + len - 1;
47236 + do {
47237 + nmatch = lookup_name_entry(name);
47238 + if (lastp != NULL)
47239 + *lastp = c;
47240 +
47241 + if (nmatch == NULL)
47242 + goto next_component;
47243 + tmpsubj = current->acl;
47244 + do {
47245 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
47246 + if (obj != NULL) {
47247 + tmp = obj->globbed;
47248 + while (tmp) {
47249 + if (!glob_match(tmp->filename, name)) {
47250 + obj = tmp;
47251 + goto found_obj;
47252 + }
47253 + tmp = tmp->next;
47254 + }
47255 + goto found_obj;
47256 + }
47257 + } while ((tmpsubj = tmpsubj->parent_subject));
47258 +next_component:
47259 + /* end case */
47260 + if (p == name)
47261 + break;
47262 +
47263 + while (*p != '/')
47264 + p--;
47265 + if (p == name)
47266 + lastp = p + 1;
47267 + else {
47268 + lastp = p;
47269 + p--;
47270 + }
47271 + c = *lastp;
47272 + *lastp = '\0';
47273 + } while (1);
47274 +found_obj:
47275 + read_unlock(&gr_inode_lock);
47276 + /* obj returned will always be non-null */
47277 + return obj;
47278 +}
47279 +
47280 +/* returns 0 when allowing, non-zero on error
47281 + op of 0 is used for readdir, so we don't log the names of hidden files
47282 +*/
47283 +__u32
47284 +gr_handle_sysctl(const struct ctl_table *table, const int op)
47285 +{
47286 + ctl_table *tmp;
47287 + const char *proc_sys = "/proc/sys";
47288 + char *path;
47289 + struct acl_object_label *obj;
47290 + unsigned short len = 0, pos = 0, depth = 0, i;
47291 + __u32 err = 0;
47292 + __u32 mode = 0;
47293 +
47294 + if (unlikely(!(gr_status & GR_READY)))
47295 + return 0;
47296 +
47297 + /* for now, ignore operations on non-sysctl entries if it's not a
47298 + readdir*/
47299 + if (table->child != NULL && op != 0)
47300 + return 0;
47301 +
47302 + mode |= GR_FIND;
47303 + /* it's only a read if it's an entry, read on dirs is for readdir */
47304 + if (op & MAY_READ)
47305 + mode |= GR_READ;
47306 + if (op & MAY_WRITE)
47307 + mode |= GR_WRITE;
47308 +
47309 + preempt_disable();
47310 +
47311 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
47312 +
47313 + /* it's only a read/write if it's an actual entry, not a dir
47314 + (which are opened for readdir)
47315 + */
47316 +
47317 + /* convert the requested sysctl entry into a pathname */
47318 +
47319 + for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
47320 + len += strlen(tmp->procname);
47321 + len++;
47322 + depth++;
47323 + }
47324 +
47325 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
47326 + /* deny */
47327 + goto out;
47328 + }
47329 +
47330 + memset(path, 0, PAGE_SIZE);
47331 +
47332 + memcpy(path, proc_sys, strlen(proc_sys));
47333 +
47334 + pos += strlen(proc_sys);
47335 +
47336 + for (; depth > 0; depth--) {
47337 + path[pos] = '/';
47338 + pos++;
47339 + for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
47340 + if (depth == i) {
47341 + memcpy(path + pos, tmp->procname,
47342 + strlen(tmp->procname));
47343 + pos += strlen(tmp->procname);
47344 + }
47345 + i++;
47346 + }
47347 + }
47348 +
47349 + obj = gr_lookup_by_name(path, pos);
47350 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
47351 +
47352 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
47353 + ((err & mode) != mode))) {
47354 + __u32 new_mode = mode;
47355 +
47356 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
47357 +
47358 + err = 0;
47359 + gr_log_learn_sysctl(path, new_mode);
47360 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
47361 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
47362 + err = -ENOENT;
47363 + } else if (!(err & GR_FIND)) {
47364 + err = -ENOENT;
47365 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
47366 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
47367 + path, (mode & GR_READ) ? " reading" : "",
47368 + (mode & GR_WRITE) ? " writing" : "");
47369 + err = -EACCES;
47370 + } else if ((err & mode) != mode) {
47371 + err = -EACCES;
47372 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
47373 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
47374 + path, (mode & GR_READ) ? " reading" : "",
47375 + (mode & GR_WRITE) ? " writing" : "");
47376 + err = 0;
47377 + } else
47378 + err = 0;
47379 +
47380 + out:
47381 + preempt_enable();
47382 +
47383 + return err;
47384 +}
47385 +#endif
47386 +
47387 +int
47388 +gr_handle_proc_ptrace(struct task_struct *task)
47389 +{
47390 + struct file *filp;
47391 + struct task_struct *tmp = task;
47392 + struct task_struct *curtemp = current;
47393 + __u32 retmode;
47394 +
47395 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
47396 + if (unlikely(!(gr_status & GR_READY)))
47397 + return 0;
47398 +#endif
47399 +
47400 + read_lock(&tasklist_lock);
47401 + read_lock(&grsec_exec_file_lock);
47402 + filp = task->exec_file;
47403 +
47404 + while (tmp->pid > 0) {
47405 + if (tmp == curtemp)
47406 + break;
47407 + tmp = tmp->real_parent;
47408 + }
47409 +
47410 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
47411 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
47412 + read_unlock(&grsec_exec_file_lock);
47413 + read_unlock(&tasklist_lock);
47414 + return 1;
47415 + }
47416 +
47417 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
47418 + if (!(gr_status & GR_READY)) {
47419 + read_unlock(&grsec_exec_file_lock);
47420 + read_unlock(&tasklist_lock);
47421 + return 0;
47422 + }
47423 +#endif
47424 +
47425 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
47426 + read_unlock(&grsec_exec_file_lock);
47427 + read_unlock(&tasklist_lock);
47428 +
47429 + if (retmode & GR_NOPTRACE)
47430 + return 1;
47431 +
47432 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
47433 + && (current->acl != task->acl || (current->acl != current->role->root_label
47434 + && current->pid != task->pid)))
47435 + return 1;
47436 +
47437 + return 0;
47438 +}
47439 +
47440 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
47441 +{
47442 + if (unlikely(!(gr_status & GR_READY)))
47443 + return;
47444 +
47445 + if (!(current->role->roletype & GR_ROLE_GOD))
47446 + return;
47447 +
47448 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
47449 + p->role->rolename, gr_task_roletype_to_char(p),
47450 + p->acl->filename);
47451 +}
47452 +
47453 +int
47454 +gr_handle_ptrace(struct task_struct *task, const long request)
47455 +{
47456 + struct task_struct *tmp = task;
47457 + struct task_struct *curtemp = current;
47458 + __u32 retmode;
47459 +
47460 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
47461 + if (unlikely(!(gr_status & GR_READY)))
47462 + return 0;
47463 +#endif
47464 +
47465 + read_lock(&tasklist_lock);
47466 + while (tmp->pid > 0) {
47467 + if (tmp == curtemp)
47468 + break;
47469 + tmp = tmp->real_parent;
47470 + }
47471 +
47472 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
47473 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
47474 + read_unlock(&tasklist_lock);
47475 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
47476 + return 1;
47477 + }
47478 + read_unlock(&tasklist_lock);
47479 +
47480 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
47481 + if (!(gr_status & GR_READY))
47482 + return 0;
47483 +#endif
47484 +
47485 + read_lock(&grsec_exec_file_lock);
47486 + if (unlikely(!task->exec_file)) {
47487 + read_unlock(&grsec_exec_file_lock);
47488 + return 0;
47489 + }
47490 +
47491 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
47492 + read_unlock(&grsec_exec_file_lock);
47493 +
47494 + if (retmode & GR_NOPTRACE) {
47495 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
47496 + return 1;
47497 + }
47498 +
47499 + if (retmode & GR_PTRACERD) {
47500 + switch (request) {
47501 + case PTRACE_POKETEXT:
47502 + case PTRACE_POKEDATA:
47503 + case PTRACE_POKEUSR:
47504 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
47505 + case PTRACE_SETREGS:
47506 + case PTRACE_SETFPREGS:
47507 +#endif
47508 +#ifdef CONFIG_X86
47509 + case PTRACE_SETFPXREGS:
47510 +#endif
47511 +#ifdef CONFIG_ALTIVEC
47512 + case PTRACE_SETVRREGS:
47513 +#endif
47514 + return 1;
47515 + default:
47516 + return 0;
47517 + }
47518 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
47519 + !(current->role->roletype & GR_ROLE_GOD) &&
47520 + (current->acl != task->acl)) {
47521 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
47522 + return 1;
47523 + }
47524 +
47525 + return 0;
47526 +}
47527 +
47528 +static int is_writable_mmap(const struct file *filp)
47529 +{
47530 + struct task_struct *task = current;
47531 + struct acl_object_label *obj, *obj2;
47532 +
47533 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
47534 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
47535 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
47536 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
47537 + task->role->root_label);
47538 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
47539 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
47540 + return 1;
47541 + }
47542 + }
47543 + return 0;
47544 +}
47545 +
47546 +int
47547 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
47548 +{
47549 + __u32 mode;
47550 +
47551 + if (unlikely(!file || !(prot & PROT_EXEC)))
47552 + return 1;
47553 +
47554 + if (is_writable_mmap(file))
47555 + return 0;
47556 +
47557 + mode =
47558 + gr_search_file(file->f_path.dentry,
47559 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
47560 + file->f_path.mnt);
47561 +
47562 + if (!gr_tpe_allow(file))
47563 + return 0;
47564 +
47565 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
47566 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
47567 + return 0;
47568 + } else if (unlikely(!(mode & GR_EXEC))) {
47569 + return 0;
47570 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
47571 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
47572 + return 1;
47573 + }
47574 +
47575 + return 1;
47576 +}
47577 +
47578 +int
47579 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
47580 +{
47581 + __u32 mode;
47582 +
47583 + if (unlikely(!file || !(prot & PROT_EXEC)))
47584 + return 1;
47585 +
47586 + if (is_writable_mmap(file))
47587 + return 0;
47588 +
47589 + mode =
47590 + gr_search_file(file->f_path.dentry,
47591 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
47592 + file->f_path.mnt);
47593 +
47594 + if (!gr_tpe_allow(file))
47595 + return 0;
47596 +
47597 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
47598 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
47599 + return 0;
47600 + } else if (unlikely(!(mode & GR_EXEC))) {
47601 + return 0;
47602 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
47603 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
47604 + return 1;
47605 + }
47606 +
47607 + return 1;
47608 +}
47609 +
47610 +void
47611 +gr_acl_handle_psacct(struct task_struct *task, const long code)
47612 +{
47613 + unsigned long runtime;
47614 + unsigned long cputime;
47615 + unsigned int wday, cday;
47616 + __u8 whr, chr;
47617 + __u8 wmin, cmin;
47618 + __u8 wsec, csec;
47619 + struct timespec timeval;
47620 +
47621 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
47622 + !(task->acl->mode & GR_PROCACCT)))
47623 + return;
47624 +
47625 + do_posix_clock_monotonic_gettime(&timeval);
47626 + runtime = timeval.tv_sec - task->start_time.tv_sec;
47627 + wday = runtime / (3600 * 24);
47628 + runtime -= wday * (3600 * 24);
47629 + whr = runtime / 3600;
47630 + runtime -= whr * 3600;
47631 + wmin = runtime / 60;
47632 + runtime -= wmin * 60;
47633 + wsec = runtime;
47634 +
47635 + cputime = (task->utime + task->stime) / HZ;
47636 + cday = cputime / (3600 * 24);
47637 + cputime -= cday * (3600 * 24);
47638 + chr = cputime / 3600;
47639 + cputime -= chr * 3600;
47640 + cmin = cputime / 60;
47641 + cputime -= cmin * 60;
47642 + csec = cputime;
47643 +
47644 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
47645 +
47646 + return;
47647 +}
47648 +
47649 +void gr_set_kernel_label(struct task_struct *task)
47650 +{
47651 + if (gr_status & GR_READY) {
47652 + task->role = kernel_role;
47653 + task->acl = kernel_role->root_label;
47654 + }
47655 + return;
47656 +}
47657 +
47658 +#ifdef CONFIG_TASKSTATS
47659 +int gr_is_taskstats_denied(int pid)
47660 +{
47661 + struct task_struct *task;
47662 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47663 + const struct cred *cred;
47664 +#endif
47665 + int ret = 0;
47666 +
47667 + /* restrict taskstats viewing to un-chrooted root users
47668 + who have the 'view' subject flag if the RBAC system is enabled
47669 + */
47670 +
47671 + rcu_read_lock();
47672 + read_lock(&tasklist_lock);
47673 + task = find_task_by_vpid(pid);
47674 + if (task) {
47675 +#ifdef CONFIG_GRKERNSEC_CHROOT
47676 + if (proc_is_chrooted(task))
47677 + ret = -EACCES;
47678 +#endif
47679 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47680 + cred = __task_cred(task);
47681 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47682 + if (cred->uid != 0)
47683 + ret = -EACCES;
47684 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47685 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
47686 + ret = -EACCES;
47687 +#endif
47688 +#endif
47689 + if (gr_status & GR_READY) {
47690 + if (!(task->acl->mode & GR_VIEW))
47691 + ret = -EACCES;
47692 + }
47693 + } else
47694 + ret = -ENOENT;
47695 +
47696 + read_unlock(&tasklist_lock);
47697 + rcu_read_unlock();
47698 +
47699 + return ret;
47700 +}
47701 +#endif
47702 +
47703 +/* AUXV entries are filled via a descendant of search_binary_handler
47704 + after we've already applied the subject for the target
47705 +*/
47706 +int gr_acl_enable_at_secure(void)
47707 +{
47708 + if (unlikely(!(gr_status & GR_READY)))
47709 + return 0;
47710 +
47711 + if (current->acl->mode & GR_ATSECURE)
47712 + return 1;
47713 +
47714 + return 0;
47715 +}
47716 +
47717 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
47718 +{
47719 + struct task_struct *task = current;
47720 + struct dentry *dentry = file->f_path.dentry;
47721 + struct vfsmount *mnt = file->f_path.mnt;
47722 + struct acl_object_label *obj, *tmp;
47723 + struct acl_subject_label *subj;
47724 + unsigned int bufsize;
47725 + int is_not_root;
47726 + char *path;
47727 + dev_t dev = __get_dev(dentry);
47728 +
47729 + if (unlikely(!(gr_status & GR_READY)))
47730 + return 1;
47731 +
47732 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
47733 + return 1;
47734 +
47735 + /* ignore Eric Biederman */
47736 + if (IS_PRIVATE(dentry->d_inode))
47737 + return 1;
47738 +
47739 + subj = task->acl;
47740 + do {
47741 + obj = lookup_acl_obj_label(ino, dev, subj);
47742 + if (obj != NULL)
47743 + return (obj->mode & GR_FIND) ? 1 : 0;
47744 + } while ((subj = subj->parent_subject));
47745 +
47746 + /* this is purely an optimization since we're looking for an object
47747 + for the directory we're doing a readdir on
47748 + if it's possible for any globbed object to match the entry we're
47749 + filling into the directory, then the object we find here will be
47750 + an anchor point with attached globbed objects
47751 + */
47752 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
47753 + if (obj->globbed == NULL)
47754 + return (obj->mode & GR_FIND) ? 1 : 0;
47755 +
47756 + is_not_root = ((obj->filename[0] == '/') &&
47757 + (obj->filename[1] == '\0')) ? 0 : 1;
47758 + bufsize = PAGE_SIZE - namelen - is_not_root;
47759 +
47760 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
47761 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
47762 + return 1;
47763 +
47764 + preempt_disable();
47765 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
47766 + bufsize);
47767 +
47768 + bufsize = strlen(path);
47769 +
47770 + /* if base is "/", don't append an additional slash */
47771 + if (is_not_root)
47772 + *(path + bufsize) = '/';
47773 + memcpy(path + bufsize + is_not_root, name, namelen);
47774 + *(path + bufsize + namelen + is_not_root) = '\0';
47775 +
47776 + tmp = obj->globbed;
47777 + while (tmp) {
47778 + if (!glob_match(tmp->filename, path)) {
47779 + preempt_enable();
47780 + return (tmp->mode & GR_FIND) ? 1 : 0;
47781 + }
47782 + tmp = tmp->next;
47783 + }
47784 + preempt_enable();
47785 + return (obj->mode & GR_FIND) ? 1 : 0;
47786 +}
47787 +
47788 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
47789 +EXPORT_SYMBOL(gr_acl_is_enabled);
47790 +#endif
47791 +EXPORT_SYMBOL(gr_learn_resource);
47792 +EXPORT_SYMBOL(gr_set_kernel_label);
47793 +#ifdef CONFIG_SECURITY
47794 +EXPORT_SYMBOL(gr_check_user_change);
47795 +EXPORT_SYMBOL(gr_check_group_change);
47796 +#endif
47797 +
47798 diff -urNp linux-2.6.32.41/grsecurity/gracl_cap.c linux-2.6.32.41/grsecurity/gracl_cap.c
47799 --- linux-2.6.32.41/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
47800 +++ linux-2.6.32.41/grsecurity/gracl_cap.c 2011-04-17 15:56:46.000000000 -0400
47801 @@ -0,0 +1,138 @@
47802 +#include <linux/kernel.h>
47803 +#include <linux/module.h>
47804 +#include <linux/sched.h>
47805 +#include <linux/gracl.h>
47806 +#include <linux/grsecurity.h>
47807 +#include <linux/grinternal.h>
47808 +
47809 +static const char *captab_log[] = {
47810 + "CAP_CHOWN",
47811 + "CAP_DAC_OVERRIDE",
47812 + "CAP_DAC_READ_SEARCH",
47813 + "CAP_FOWNER",
47814 + "CAP_FSETID",
47815 + "CAP_KILL",
47816 + "CAP_SETGID",
47817 + "CAP_SETUID",
47818 + "CAP_SETPCAP",
47819 + "CAP_LINUX_IMMUTABLE",
47820 + "CAP_NET_BIND_SERVICE",
47821 + "CAP_NET_BROADCAST",
47822 + "CAP_NET_ADMIN",
47823 + "CAP_NET_RAW",
47824 + "CAP_IPC_LOCK",
47825 + "CAP_IPC_OWNER",
47826 + "CAP_SYS_MODULE",
47827 + "CAP_SYS_RAWIO",
47828 + "CAP_SYS_CHROOT",
47829 + "CAP_SYS_PTRACE",
47830 + "CAP_SYS_PACCT",
47831 + "CAP_SYS_ADMIN",
47832 + "CAP_SYS_BOOT",
47833 + "CAP_SYS_NICE",
47834 + "CAP_SYS_RESOURCE",
47835 + "CAP_SYS_TIME",
47836 + "CAP_SYS_TTY_CONFIG",
47837 + "CAP_MKNOD",
47838 + "CAP_LEASE",
47839 + "CAP_AUDIT_WRITE",
47840 + "CAP_AUDIT_CONTROL",
47841 + "CAP_SETFCAP",
47842 + "CAP_MAC_OVERRIDE",
47843 + "CAP_MAC_ADMIN"
47844 +};
47845 +
47846 +EXPORT_SYMBOL(gr_is_capable);
47847 +EXPORT_SYMBOL(gr_is_capable_nolog);
47848 +
47849 +int
47850 +gr_is_capable(const int cap)
47851 +{
47852 + struct task_struct *task = current;
47853 + const struct cred *cred = current_cred();
47854 + struct acl_subject_label *curracl;
47855 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
47856 + kernel_cap_t cap_audit = __cap_empty_set;
47857 +
47858 + if (!gr_acl_is_enabled())
47859 + return 1;
47860 +
47861 + curracl = task->acl;
47862 +
47863 + cap_drop = curracl->cap_lower;
47864 + cap_mask = curracl->cap_mask;
47865 + cap_audit = curracl->cap_invert_audit;
47866 +
47867 + while ((curracl = curracl->parent_subject)) {
47868 + /* if the cap isn't specified in the current computed mask but is specified in the
47869 + current level subject, and is lowered in the current level subject, then add
47870 + it to the set of dropped capabilities
47871 + otherwise, add the current level subject's mask to the current computed mask
47872 + */
47873 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
47874 + cap_raise(cap_mask, cap);
47875 + if (cap_raised(curracl->cap_lower, cap))
47876 + cap_raise(cap_drop, cap);
47877 + if (cap_raised(curracl->cap_invert_audit, cap))
47878 + cap_raise(cap_audit, cap);
47879 + }
47880 + }
47881 +
47882 + if (!cap_raised(cap_drop, cap)) {
47883 + if (cap_raised(cap_audit, cap))
47884 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
47885 + return 1;
47886 + }
47887 +
47888 + curracl = task->acl;
47889 +
47890 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
47891 + && cap_raised(cred->cap_effective, cap)) {
47892 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
47893 + task->role->roletype, cred->uid,
47894 + cred->gid, task->exec_file ?
47895 + gr_to_filename(task->exec_file->f_path.dentry,
47896 + task->exec_file->f_path.mnt) : curracl->filename,
47897 + curracl->filename, 0UL,
47898 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
47899 + return 1;
47900 + }
47901 +
47902 + if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
47903 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
47904 + return 0;
47905 +}
47906 +
47907 +int
47908 +gr_is_capable_nolog(const int cap)
47909 +{
47910 + struct acl_subject_label *curracl;
47911 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
47912 +
47913 + if (!gr_acl_is_enabled())
47914 + return 1;
47915 +
47916 + curracl = current->acl;
47917 +
47918 + cap_drop = curracl->cap_lower;
47919 + cap_mask = curracl->cap_mask;
47920 +
47921 + while ((curracl = curracl->parent_subject)) {
47922 + /* if the cap isn't specified in the current computed mask but is specified in the
47923 + current level subject, and is lowered in the current level subject, then add
47924 + it to the set of dropped capabilities
47925 + otherwise, add the current level subject's mask to the current computed mask
47926 + */
47927 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
47928 + cap_raise(cap_mask, cap);
47929 + if (cap_raised(curracl->cap_lower, cap))
47930 + cap_raise(cap_drop, cap);
47931 + }
47932 + }
47933 +
47934 + if (!cap_raised(cap_drop, cap))
47935 + return 1;
47936 +
47937 + return 0;
47938 +}
47939 +
47940 diff -urNp linux-2.6.32.41/grsecurity/gracl_fs.c linux-2.6.32.41/grsecurity/gracl_fs.c
47941 --- linux-2.6.32.41/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
47942 +++ linux-2.6.32.41/grsecurity/gracl_fs.c 2011-04-17 15:56:46.000000000 -0400
47943 @@ -0,0 +1,431 @@
47944 +#include <linux/kernel.h>
47945 +#include <linux/sched.h>
47946 +#include <linux/types.h>
47947 +#include <linux/fs.h>
47948 +#include <linux/file.h>
47949 +#include <linux/stat.h>
47950 +#include <linux/grsecurity.h>
47951 +#include <linux/grinternal.h>
47952 +#include <linux/gracl.h>
47953 +
47954 +__u32
47955 +gr_acl_handle_hidden_file(const struct dentry * dentry,
47956 + const struct vfsmount * mnt)
47957 +{
47958 + __u32 mode;
47959 +
47960 + if (unlikely(!dentry->d_inode))
47961 + return GR_FIND;
47962 +
47963 + mode =
47964 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
47965 +
47966 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
47967 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
47968 + return mode;
47969 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
47970 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
47971 + return 0;
47972 + } else if (unlikely(!(mode & GR_FIND)))
47973 + return 0;
47974 +
47975 + return GR_FIND;
47976 +}
47977 +
47978 +__u32
47979 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
47980 + const int fmode)
47981 +{
47982 + __u32 reqmode = GR_FIND;
47983 + __u32 mode;
47984 +
47985 + if (unlikely(!dentry->d_inode))
47986 + return reqmode;
47987 +
47988 + if (unlikely(fmode & O_APPEND))
47989 + reqmode |= GR_APPEND;
47990 + else if (unlikely(fmode & FMODE_WRITE))
47991 + reqmode |= GR_WRITE;
47992 + if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
47993 + reqmode |= GR_READ;
47994 + if ((fmode & FMODE_GREXEC) && (fmode & FMODE_EXEC))
47995 + reqmode &= ~GR_READ;
47996 + mode =
47997 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
47998 + mnt);
47999 +
48000 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48001 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
48002 + reqmode & GR_READ ? " reading" : "",
48003 + reqmode & GR_WRITE ? " writing" : reqmode &
48004 + GR_APPEND ? " appending" : "");
48005 + return reqmode;
48006 + } else
48007 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48008 + {
48009 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
48010 + reqmode & GR_READ ? " reading" : "",
48011 + reqmode & GR_WRITE ? " writing" : reqmode &
48012 + GR_APPEND ? " appending" : "");
48013 + return 0;
48014 + } else if (unlikely((mode & reqmode) != reqmode))
48015 + return 0;
48016 +
48017 + return reqmode;
48018 +}
48019 +
48020 +__u32
48021 +gr_acl_handle_creat(const struct dentry * dentry,
48022 + const struct dentry * p_dentry,
48023 + const struct vfsmount * p_mnt, const int fmode,
48024 + const int imode)
48025 +{
48026 + __u32 reqmode = GR_WRITE | GR_CREATE;
48027 + __u32 mode;
48028 +
48029 + if (unlikely(fmode & O_APPEND))
48030 + reqmode |= GR_APPEND;
48031 + if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
48032 + reqmode |= GR_READ;
48033 + if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
48034 + reqmode |= GR_SETID;
48035 +
48036 + mode =
48037 + gr_check_create(dentry, p_dentry, p_mnt,
48038 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
48039 +
48040 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48041 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
48042 + reqmode & GR_READ ? " reading" : "",
48043 + reqmode & GR_WRITE ? " writing" : reqmode &
48044 + GR_APPEND ? " appending" : "");
48045 + return reqmode;
48046 + } else
48047 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48048 + {
48049 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
48050 + reqmode & GR_READ ? " reading" : "",
48051 + reqmode & GR_WRITE ? " writing" : reqmode &
48052 + GR_APPEND ? " appending" : "");
48053 + return 0;
48054 + } else if (unlikely((mode & reqmode) != reqmode))
48055 + return 0;
48056 +
48057 + return reqmode;
48058 +}
48059 +
48060 +__u32
48061 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
48062 + const int fmode)
48063 +{
48064 + __u32 mode, reqmode = GR_FIND;
48065 +
48066 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
48067 + reqmode |= GR_EXEC;
48068 + if (fmode & S_IWOTH)
48069 + reqmode |= GR_WRITE;
48070 + if (fmode & S_IROTH)
48071 + reqmode |= GR_READ;
48072 +
48073 + mode =
48074 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
48075 + mnt);
48076 +
48077 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48078 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
48079 + reqmode & GR_READ ? " reading" : "",
48080 + reqmode & GR_WRITE ? " writing" : "",
48081 + reqmode & GR_EXEC ? " executing" : "");
48082 + return reqmode;
48083 + } else
48084 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48085 + {
48086 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
48087 + reqmode & GR_READ ? " reading" : "",
48088 + reqmode & GR_WRITE ? " writing" : "",
48089 + reqmode & GR_EXEC ? " executing" : "");
48090 + return 0;
48091 + } else if (unlikely((mode & reqmode) != reqmode))
48092 + return 0;
48093 +
48094 + return reqmode;
48095 +}
48096 +
48097 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
48098 +{
48099 + __u32 mode;
48100 +
48101 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
48102 +
48103 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
48104 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
48105 + return mode;
48106 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
48107 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
48108 + return 0;
48109 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
48110 + return 0;
48111 +
48112 + return (reqmode);
48113 +}
48114 +
48115 +__u32
48116 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
48117 +{
48118 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
48119 +}
48120 +
48121 +__u32
48122 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
48123 +{
48124 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
48125 +}
48126 +
48127 +__u32
48128 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
48129 +{
48130 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
48131 +}
48132 +
48133 +__u32
48134 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
48135 +{
48136 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
48137 +}
48138 +
48139 +__u32
48140 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
48141 + mode_t mode)
48142 +{
48143 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
48144 + return 1;
48145 +
48146 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
48147 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
48148 + GR_FCHMOD_ACL_MSG);
48149 + } else {
48150 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
48151 + }
48152 +}
48153 +
48154 +__u32
48155 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
48156 + mode_t mode)
48157 +{
48158 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
48159 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
48160 + GR_CHMOD_ACL_MSG);
48161 + } else {
48162 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
48163 + }
48164 +}
48165 +
48166 +__u32
48167 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
48168 +{
48169 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
48170 +}
48171 +
48172 +__u32
48173 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
48174 +{
48175 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
48176 +}
48177 +
48178 +__u32
48179 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
48180 +{
48181 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
48182 +}
48183 +
48184 +__u32
48185 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
48186 +{
48187 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
48188 + GR_UNIXCONNECT_ACL_MSG);
48189 +}
48190 +
48191 +/* hardlinks require at minimum create permission,
48192 + any additional privilege required is based on the
48193 + privilege of the file being linked to
48194 +*/
48195 +__u32
48196 +gr_acl_handle_link(const struct dentry * new_dentry,
48197 + const struct dentry * parent_dentry,
48198 + const struct vfsmount * parent_mnt,
48199 + const struct dentry * old_dentry,
48200 + const struct vfsmount * old_mnt, const char *to)
48201 +{
48202 + __u32 mode;
48203 + __u32 needmode = GR_CREATE | GR_LINK;
48204 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
48205 +
48206 + mode =
48207 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
48208 + old_mnt);
48209 +
48210 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
48211 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
48212 + return mode;
48213 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
48214 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
48215 + return 0;
48216 + } else if (unlikely((mode & needmode) != needmode))
48217 + return 0;
48218 +
48219 + return 1;
48220 +}
48221 +
48222 +__u32
48223 +gr_acl_handle_symlink(const struct dentry * new_dentry,
48224 + const struct dentry * parent_dentry,
48225 + const struct vfsmount * parent_mnt, const char *from)
48226 +{
48227 + __u32 needmode = GR_WRITE | GR_CREATE;
48228 + __u32 mode;
48229 +
48230 + mode =
48231 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
48232 + GR_CREATE | GR_AUDIT_CREATE |
48233 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
48234 +
48235 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
48236 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
48237 + return mode;
48238 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
48239 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
48240 + return 0;
48241 + } else if (unlikely((mode & needmode) != needmode))
48242 + return 0;
48243 +
48244 + return (GR_WRITE | GR_CREATE);
48245 +}
48246 +
48247 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
48248 +{
48249 + __u32 mode;
48250 +
48251 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
48252 +
48253 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
48254 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
48255 + return mode;
48256 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
48257 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
48258 + return 0;
48259 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
48260 + return 0;
48261 +
48262 + return (reqmode);
48263 +}
48264 +
48265 +__u32
48266 +gr_acl_handle_mknod(const struct dentry * new_dentry,
48267 + const struct dentry * parent_dentry,
48268 + const struct vfsmount * parent_mnt,
48269 + const int mode)
48270 +{
48271 + __u32 reqmode = GR_WRITE | GR_CREATE;
48272 + if (unlikely(mode & (S_ISUID | S_ISGID)))
48273 + reqmode |= GR_SETID;
48274 +
48275 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
48276 + reqmode, GR_MKNOD_ACL_MSG);
48277 +}
48278 +
48279 +__u32
48280 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
48281 + const struct dentry *parent_dentry,
48282 + const struct vfsmount *parent_mnt)
48283 +{
48284 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
48285 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
48286 +}
48287 +
48288 +#define RENAME_CHECK_SUCCESS(old, new) \
48289 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
48290 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
48291 +
48292 +int
48293 +gr_acl_handle_rename(struct dentry *new_dentry,
48294 + struct dentry *parent_dentry,
48295 + const struct vfsmount *parent_mnt,
48296 + struct dentry *old_dentry,
48297 + struct inode *old_parent_inode,
48298 + struct vfsmount *old_mnt, const char *newname)
48299 +{
48300 + __u32 comp1, comp2;
48301 + int error = 0;
48302 +
48303 + if (unlikely(!gr_acl_is_enabled()))
48304 + return 0;
48305 +
48306 + if (!new_dentry->d_inode) {
48307 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
48308 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
48309 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
48310 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
48311 + GR_DELETE | GR_AUDIT_DELETE |
48312 + GR_AUDIT_READ | GR_AUDIT_WRITE |
48313 + GR_SUPPRESS, old_mnt);
48314 + } else {
48315 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
48316 + GR_CREATE | GR_DELETE |
48317 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
48318 + GR_AUDIT_READ | GR_AUDIT_WRITE |
48319 + GR_SUPPRESS, parent_mnt);
48320 + comp2 =
48321 + gr_search_file(old_dentry,
48322 + GR_READ | GR_WRITE | GR_AUDIT_READ |
48323 + GR_DELETE | GR_AUDIT_DELETE |
48324 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
48325 + }
48326 +
48327 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
48328 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
48329 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
48330 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
48331 + && !(comp2 & GR_SUPPRESS)) {
48332 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
48333 + error = -EACCES;
48334 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
48335 + error = -EACCES;
48336 +
48337 + return error;
48338 +}
48339 +
48340 +void
48341 +gr_acl_handle_exit(void)
48342 +{
48343 + u16 id;
48344 + char *rolename;
48345 + struct file *exec_file;
48346 +
48347 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
48348 + !(current->role->roletype & GR_ROLE_PERSIST))) {
48349 + id = current->acl_role_id;
48350 + rolename = current->role->rolename;
48351 + gr_set_acls(1);
48352 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
48353 + }
48354 +
48355 + write_lock(&grsec_exec_file_lock);
48356 + exec_file = current->exec_file;
48357 + current->exec_file = NULL;
48358 + write_unlock(&grsec_exec_file_lock);
48359 +
48360 + if (exec_file)
48361 + fput(exec_file);
48362 +}
48363 +
48364 +int
48365 +gr_acl_handle_procpidmem(const struct task_struct *task)
48366 +{
48367 + if (unlikely(!gr_acl_is_enabled()))
48368 + return 0;
48369 +
48370 + if (task != current && task->acl->mode & GR_PROTPROCFD)
48371 + return -EACCES;
48372 +
48373 + return 0;
48374 +}
48375 diff -urNp linux-2.6.32.41/grsecurity/gracl_ip.c linux-2.6.32.41/grsecurity/gracl_ip.c
48376 --- linux-2.6.32.41/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
48377 +++ linux-2.6.32.41/grsecurity/gracl_ip.c 2011-04-17 15:56:46.000000000 -0400
48378 @@ -0,0 +1,382 @@
48379 +#include <linux/kernel.h>
48380 +#include <asm/uaccess.h>
48381 +#include <asm/errno.h>
48382 +#include <net/sock.h>
48383 +#include <linux/file.h>
48384 +#include <linux/fs.h>
48385 +#include <linux/net.h>
48386 +#include <linux/in.h>
48387 +#include <linux/skbuff.h>
48388 +#include <linux/ip.h>
48389 +#include <linux/udp.h>
48390 +#include <linux/smp_lock.h>
48391 +#include <linux/types.h>
48392 +#include <linux/sched.h>
48393 +#include <linux/netdevice.h>
48394 +#include <linux/inetdevice.h>
48395 +#include <linux/gracl.h>
48396 +#include <linux/grsecurity.h>
48397 +#include <linux/grinternal.h>
48398 +
48399 +#define GR_BIND 0x01
48400 +#define GR_CONNECT 0x02
48401 +#define GR_INVERT 0x04
48402 +#define GR_BINDOVERRIDE 0x08
48403 +#define GR_CONNECTOVERRIDE 0x10
48404 +#define GR_SOCK_FAMILY 0x20
48405 +
48406 +static const char * gr_protocols[IPPROTO_MAX] = {
48407 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
48408 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
48409 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
48410 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
48411 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
48412 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
48413 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
48414 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
48415 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
48416 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
48417 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
48418 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
48419 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
48420 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
48421 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
48422 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
48423 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
48424 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
48425 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
48426 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
48427 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
48428 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
48429 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
48430 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
48431 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
48432 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
48433 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
48434 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
48435 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
48436 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
48437 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
48438 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
48439 + };
48440 +
48441 +static const char * gr_socktypes[SOCK_MAX] = {
48442 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
48443 + "unknown:7", "unknown:8", "unknown:9", "packet"
48444 + };
48445 +
48446 +static const char * gr_sockfamilies[AF_MAX+1] = {
48447 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
48448 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
48449 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
48450 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
48451 + };
48452 +
48453 +const char *
48454 +gr_proto_to_name(unsigned char proto)
48455 +{
48456 + return gr_protocols[proto];
48457 +}
48458 +
48459 +const char *
48460 +gr_socktype_to_name(unsigned char type)
48461 +{
48462 + return gr_socktypes[type];
48463 +}
48464 +
48465 +const char *
48466 +gr_sockfamily_to_name(unsigned char family)
48467 +{
48468 + return gr_sockfamilies[family];
48469 +}
48470 +
48471 +int
48472 +gr_search_socket(const int domain, const int type, const int protocol)
48473 +{
48474 + struct acl_subject_label *curr;
48475 + const struct cred *cred = current_cred();
48476 +
48477 + if (unlikely(!gr_acl_is_enabled()))
48478 + goto exit;
48479 +
48480 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
48481 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
48482 + goto exit; // let the kernel handle it
48483 +
48484 + curr = current->acl;
48485 +
48486 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
48487 + /* the family is allowed, if this is PF_INET allow it only if
48488 + the extra sock type/protocol checks pass */
48489 + if (domain == PF_INET)
48490 + goto inet_check;
48491 + goto exit;
48492 + } else {
48493 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
48494 + __u32 fakeip = 0;
48495 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48496 + current->role->roletype, cred->uid,
48497 + cred->gid, current->exec_file ?
48498 + gr_to_filename(current->exec_file->f_path.dentry,
48499 + current->exec_file->f_path.mnt) :
48500 + curr->filename, curr->filename,
48501 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
48502 + &current->signal->saved_ip);
48503 + goto exit;
48504 + }
48505 + goto exit_fail;
48506 + }
48507 +
48508 +inet_check:
48509 + /* the rest of this checking is for IPv4 only */
48510 + if (!curr->ips)
48511 + goto exit;
48512 +
48513 + if ((curr->ip_type & (1 << type)) &&
48514 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
48515 + goto exit;
48516 +
48517 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
48518 + /* we don't place acls on raw sockets , and sometimes
48519 + dgram/ip sockets are opened for ioctl and not
48520 + bind/connect, so we'll fake a bind learn log */
48521 + if (type == SOCK_RAW || type == SOCK_PACKET) {
48522 + __u32 fakeip = 0;
48523 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48524 + current->role->roletype, cred->uid,
48525 + cred->gid, current->exec_file ?
48526 + gr_to_filename(current->exec_file->f_path.dentry,
48527 + current->exec_file->f_path.mnt) :
48528 + curr->filename, curr->filename,
48529 + &fakeip, 0, type,
48530 + protocol, GR_CONNECT, &current->signal->saved_ip);
48531 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
48532 + __u32 fakeip = 0;
48533 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48534 + current->role->roletype, cred->uid,
48535 + cred->gid, current->exec_file ?
48536 + gr_to_filename(current->exec_file->f_path.dentry,
48537 + current->exec_file->f_path.mnt) :
48538 + curr->filename, curr->filename,
48539 + &fakeip, 0, type,
48540 + protocol, GR_BIND, &current->signal->saved_ip);
48541 + }
48542 + /* we'll log when they use connect or bind */
48543 + goto exit;
48544 + }
48545 +
48546 +exit_fail:
48547 + if (domain == PF_INET)
48548 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
48549 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
48550 + else
48551 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
48552 + gr_socktype_to_name(type), protocol);
48553 +
48554 + return 0;
48555 +exit:
48556 + return 1;
48557 +}
48558 +
48559 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
48560 +{
48561 + if ((ip->mode & mode) &&
48562 + (ip_port >= ip->low) &&
48563 + (ip_port <= ip->high) &&
48564 + ((ntohl(ip_addr) & our_netmask) ==
48565 + (ntohl(our_addr) & our_netmask))
48566 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
48567 + && (ip->type & (1 << type))) {
48568 + if (ip->mode & GR_INVERT)
48569 + return 2; // specifically denied
48570 + else
48571 + return 1; // allowed
48572 + }
48573 +
48574 + return 0; // not specifically allowed, may continue parsing
48575 +}
48576 +
48577 +static int
48578 +gr_search_connectbind(const int full_mode, struct sock *sk,
48579 + struct sockaddr_in *addr, const int type)
48580 +{
48581 + char iface[IFNAMSIZ] = {0};
48582 + struct acl_subject_label *curr;
48583 + struct acl_ip_label *ip;
48584 + struct inet_sock *isk;
48585 + struct net_device *dev;
48586 + struct in_device *idev;
48587 + unsigned long i;
48588 + int ret;
48589 + int mode = full_mode & (GR_BIND | GR_CONNECT);
48590 + __u32 ip_addr = 0;
48591 + __u32 our_addr;
48592 + __u32 our_netmask;
48593 + char *p;
48594 + __u16 ip_port = 0;
48595 + const struct cred *cred = current_cred();
48596 +
48597 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
48598 + return 0;
48599 +
48600 + curr = current->acl;
48601 + isk = inet_sk(sk);
48602 +
48603 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
48604 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
48605 + addr->sin_addr.s_addr = curr->inaddr_any_override;
48606 + if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
48607 + struct sockaddr_in saddr;
48608 + int err;
48609 +
48610 + saddr.sin_family = AF_INET;
48611 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
48612 + saddr.sin_port = isk->sport;
48613 +
48614 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
48615 + if (err)
48616 + return err;
48617 +
48618 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
48619 + if (err)
48620 + return err;
48621 + }
48622 +
48623 + if (!curr->ips)
48624 + return 0;
48625 +
48626 + ip_addr = addr->sin_addr.s_addr;
48627 + ip_port = ntohs(addr->sin_port);
48628 +
48629 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
48630 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48631 + current->role->roletype, cred->uid,
48632 + cred->gid, current->exec_file ?
48633 + gr_to_filename(current->exec_file->f_path.dentry,
48634 + current->exec_file->f_path.mnt) :
48635 + curr->filename, curr->filename,
48636 + &ip_addr, ip_port, type,
48637 + sk->sk_protocol, mode, &current->signal->saved_ip);
48638 + return 0;
48639 + }
48640 +
48641 + for (i = 0; i < curr->ip_num; i++) {
48642 + ip = *(curr->ips + i);
48643 + if (ip->iface != NULL) {
48644 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
48645 + p = strchr(iface, ':');
48646 + if (p != NULL)
48647 + *p = '\0';
48648 + dev = dev_get_by_name(sock_net(sk), iface);
48649 + if (dev == NULL)
48650 + continue;
48651 + idev = in_dev_get(dev);
48652 + if (idev == NULL) {
48653 + dev_put(dev);
48654 + continue;
48655 + }
48656 + rcu_read_lock();
48657 + for_ifa(idev) {
48658 + if (!strcmp(ip->iface, ifa->ifa_label)) {
48659 + our_addr = ifa->ifa_address;
48660 + our_netmask = 0xffffffff;
48661 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
48662 + if (ret == 1) {
48663 + rcu_read_unlock();
48664 + in_dev_put(idev);
48665 + dev_put(dev);
48666 + return 0;
48667 + } else if (ret == 2) {
48668 + rcu_read_unlock();
48669 + in_dev_put(idev);
48670 + dev_put(dev);
48671 + goto denied;
48672 + }
48673 + }
48674 + } endfor_ifa(idev);
48675 + rcu_read_unlock();
48676 + in_dev_put(idev);
48677 + dev_put(dev);
48678 + } else {
48679 + our_addr = ip->addr;
48680 + our_netmask = ip->netmask;
48681 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
48682 + if (ret == 1)
48683 + return 0;
48684 + else if (ret == 2)
48685 + goto denied;
48686 + }
48687 + }
48688 +
48689 +denied:
48690 + if (mode == GR_BIND)
48691 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
48692 + else if (mode == GR_CONNECT)
48693 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
48694 +
48695 + return -EACCES;
48696 +}
48697 +
48698 +int
48699 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
48700 +{
48701 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
48702 +}
48703 +
48704 +int
48705 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
48706 +{
48707 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
48708 +}
48709 +
48710 +int gr_search_listen(struct socket *sock)
48711 +{
48712 + struct sock *sk = sock->sk;
48713 + struct sockaddr_in addr;
48714 +
48715 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
48716 + addr.sin_port = inet_sk(sk)->sport;
48717 +
48718 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
48719 +}
48720 +
48721 +int gr_search_accept(struct socket *sock)
48722 +{
48723 + struct sock *sk = sock->sk;
48724 + struct sockaddr_in addr;
48725 +
48726 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
48727 + addr.sin_port = inet_sk(sk)->sport;
48728 +
48729 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
48730 +}
48731 +
48732 +int
48733 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
48734 +{
48735 + if (addr)
48736 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
48737 + else {
48738 + struct sockaddr_in sin;
48739 + const struct inet_sock *inet = inet_sk(sk);
48740 +
48741 + sin.sin_addr.s_addr = inet->daddr;
48742 + sin.sin_port = inet->dport;
48743 +
48744 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
48745 + }
48746 +}
48747 +
48748 +int
48749 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
48750 +{
48751 + struct sockaddr_in sin;
48752 +
48753 + if (unlikely(skb->len < sizeof (struct udphdr)))
48754 + return 0; // skip this packet
48755 +
48756 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
48757 + sin.sin_port = udp_hdr(skb)->source;
48758 +
48759 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
48760 +}
48761 diff -urNp linux-2.6.32.41/grsecurity/gracl_learn.c linux-2.6.32.41/grsecurity/gracl_learn.c
48762 --- linux-2.6.32.41/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
48763 +++ linux-2.6.32.41/grsecurity/gracl_learn.c 2011-04-17 15:56:46.000000000 -0400
48764 @@ -0,0 +1,211 @@
48765 +#include <linux/kernel.h>
48766 +#include <linux/mm.h>
48767 +#include <linux/sched.h>
48768 +#include <linux/poll.h>
48769 +#include <linux/smp_lock.h>
48770 +#include <linux/string.h>
48771 +#include <linux/file.h>
48772 +#include <linux/types.h>
48773 +#include <linux/vmalloc.h>
48774 +#include <linux/grinternal.h>
48775 +
48776 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
48777 + size_t count, loff_t *ppos);
48778 +extern int gr_acl_is_enabled(void);
48779 +
48780 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
48781 +static int gr_learn_attached;
48782 +
48783 +/* use a 512k buffer */
48784 +#define LEARN_BUFFER_SIZE (512 * 1024)
48785 +
48786 +static DEFINE_SPINLOCK(gr_learn_lock);
48787 +static DEFINE_MUTEX(gr_learn_user_mutex);
48788 +
48789 +/* we need to maintain two buffers, so that the kernel context of grlearn
48790 + uses a semaphore around the userspace copying, and the other kernel contexts
48791 + use a spinlock when copying into the buffer, since they cannot sleep
48792 +*/
48793 +static char *learn_buffer;
48794 +static char *learn_buffer_user;
48795 +static int learn_buffer_len;
48796 +static int learn_buffer_user_len;
48797 +
48798 +static ssize_t
48799 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
48800 +{
48801 + DECLARE_WAITQUEUE(wait, current);
48802 + ssize_t retval = 0;
48803 +
48804 + add_wait_queue(&learn_wait, &wait);
48805 + set_current_state(TASK_INTERRUPTIBLE);
48806 + do {
48807 + mutex_lock(&gr_learn_user_mutex);
48808 + spin_lock(&gr_learn_lock);
48809 + if (learn_buffer_len)
48810 + break;
48811 + spin_unlock(&gr_learn_lock);
48812 + mutex_unlock(&gr_learn_user_mutex);
48813 + if (file->f_flags & O_NONBLOCK) {
48814 + retval = -EAGAIN;
48815 + goto out;
48816 + }
48817 + if (signal_pending(current)) {
48818 + retval = -ERESTARTSYS;
48819 + goto out;
48820 + }
48821 +
48822 + schedule();
48823 + } while (1);
48824 +
48825 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
48826 + learn_buffer_user_len = learn_buffer_len;
48827 + retval = learn_buffer_len;
48828 + learn_buffer_len = 0;
48829 +
48830 + spin_unlock(&gr_learn_lock);
48831 +
48832 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
48833 + retval = -EFAULT;
48834 +
48835 + mutex_unlock(&gr_learn_user_mutex);
48836 +out:
48837 + set_current_state(TASK_RUNNING);
48838 + remove_wait_queue(&learn_wait, &wait);
48839 + return retval;
48840 +}
48841 +
48842 +static unsigned int
48843 +poll_learn(struct file * file, poll_table * wait)
48844 +{
48845 + poll_wait(file, &learn_wait, wait);
48846 +
48847 + if (learn_buffer_len)
48848 + return (POLLIN | POLLRDNORM);
48849 +
48850 + return 0;
48851 +}
48852 +
48853 +void
48854 +gr_clear_learn_entries(void)
48855 +{
48856 + char *tmp;
48857 +
48858 + mutex_lock(&gr_learn_user_mutex);
48859 + if (learn_buffer != NULL) {
48860 + spin_lock(&gr_learn_lock);
48861 + tmp = learn_buffer;
48862 + learn_buffer = NULL;
48863 + spin_unlock(&gr_learn_lock);
48864 + vfree(learn_buffer);
48865 + }
48866 + if (learn_buffer_user != NULL) {
48867 + vfree(learn_buffer_user);
48868 + learn_buffer_user = NULL;
48869 + }
48870 + learn_buffer_len = 0;
48871 + mutex_unlock(&gr_learn_user_mutex);
48872 +
48873 + return;
48874 +}
48875 +
48876 +void
48877 +gr_add_learn_entry(const char *fmt, ...)
48878 +{
48879 + va_list args;
48880 + unsigned int len;
48881 +
48882 + if (!gr_learn_attached)
48883 + return;
48884 +
48885 + spin_lock(&gr_learn_lock);
48886 +
48887 + /* leave a gap at the end so we know when it's "full" but don't have to
48888 + compute the exact length of the string we're trying to append
48889 + */
48890 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
48891 + spin_unlock(&gr_learn_lock);
48892 + wake_up_interruptible(&learn_wait);
48893 + return;
48894 + }
48895 + if (learn_buffer == NULL) {
48896 + spin_unlock(&gr_learn_lock);
48897 + return;
48898 + }
48899 +
48900 + va_start(args, fmt);
48901 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
48902 + va_end(args);
48903 +
48904 + learn_buffer_len += len + 1;
48905 +
48906 + spin_unlock(&gr_learn_lock);
48907 + wake_up_interruptible(&learn_wait);
48908 +
48909 + return;
48910 +}
48911 +
48912 +static int
48913 +open_learn(struct inode *inode, struct file *file)
48914 +{
48915 + if (file->f_mode & FMODE_READ && gr_learn_attached)
48916 + return -EBUSY;
48917 + if (file->f_mode & FMODE_READ) {
48918 + int retval = 0;
48919 + mutex_lock(&gr_learn_user_mutex);
48920 + if (learn_buffer == NULL)
48921 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
48922 + if (learn_buffer_user == NULL)
48923 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
48924 + if (learn_buffer == NULL) {
48925 + retval = -ENOMEM;
48926 + goto out_error;
48927 + }
48928 + if (learn_buffer_user == NULL) {
48929 + retval = -ENOMEM;
48930 + goto out_error;
48931 + }
48932 + learn_buffer_len = 0;
48933 + learn_buffer_user_len = 0;
48934 + gr_learn_attached = 1;
48935 +out_error:
48936 + mutex_unlock(&gr_learn_user_mutex);
48937 + return retval;
48938 + }
48939 + return 0;
48940 +}
48941 +
48942 +static int
48943 +close_learn(struct inode *inode, struct file *file)
48944 +{
48945 + char *tmp;
48946 +
48947 + if (file->f_mode & FMODE_READ) {
48948 + mutex_lock(&gr_learn_user_mutex);
48949 + if (learn_buffer != NULL) {
48950 + spin_lock(&gr_learn_lock);
48951 + tmp = learn_buffer;
48952 + learn_buffer = NULL;
48953 + spin_unlock(&gr_learn_lock);
48954 + vfree(tmp);
48955 + }
48956 + if (learn_buffer_user != NULL) {
48957 + vfree(learn_buffer_user);
48958 + learn_buffer_user = NULL;
48959 + }
48960 + learn_buffer_len = 0;
48961 + learn_buffer_user_len = 0;
48962 + gr_learn_attached = 0;
48963 + mutex_unlock(&gr_learn_user_mutex);
48964 + }
48965 +
48966 + return 0;
48967 +}
48968 +
48969 +const struct file_operations grsec_fops = {
48970 + .read = read_learn,
48971 + .write = write_grsec_handler,
48972 + .open = open_learn,
48973 + .release = close_learn,
48974 + .poll = poll_learn,
48975 +};
48976 diff -urNp linux-2.6.32.41/grsecurity/gracl_res.c linux-2.6.32.41/grsecurity/gracl_res.c
48977 --- linux-2.6.32.41/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
48978 +++ linux-2.6.32.41/grsecurity/gracl_res.c 2011-04-17 15:56:46.000000000 -0400
48979 @@ -0,0 +1,67 @@
48980 +#include <linux/kernel.h>
48981 +#include <linux/sched.h>
48982 +#include <linux/gracl.h>
48983 +#include <linux/grinternal.h>
48984 +
48985 +static const char *restab_log[] = {
48986 + [RLIMIT_CPU] = "RLIMIT_CPU",
48987 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
48988 + [RLIMIT_DATA] = "RLIMIT_DATA",
48989 + [RLIMIT_STACK] = "RLIMIT_STACK",
48990 + [RLIMIT_CORE] = "RLIMIT_CORE",
48991 + [RLIMIT_RSS] = "RLIMIT_RSS",
48992 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
48993 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
48994 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
48995 + [RLIMIT_AS] = "RLIMIT_AS",
48996 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
48997 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
48998 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
48999 + [RLIMIT_NICE] = "RLIMIT_NICE",
49000 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
49001 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
49002 + [GR_CRASH_RES] = "RLIMIT_CRASH"
49003 +};
49004 +
49005 +void
49006 +gr_log_resource(const struct task_struct *task,
49007 + const int res, const unsigned long wanted, const int gt)
49008 +{
49009 + const struct cred *cred;
49010 + unsigned long rlim;
49011 +
49012 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
49013 + return;
49014 +
49015 + // not yet supported resource
49016 + if (unlikely(!restab_log[res]))
49017 + return;
49018 +
49019 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
49020 + rlim = task->signal->rlim[res].rlim_max;
49021 + else
49022 + rlim = task->signal->rlim[res].rlim_cur;
49023 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
49024 + return;
49025 +
49026 + rcu_read_lock();
49027 + cred = __task_cred(task);
49028 +
49029 + if (res == RLIMIT_NPROC &&
49030 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
49031 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
49032 + goto out_rcu_unlock;
49033 + else if (res == RLIMIT_MEMLOCK &&
49034 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
49035 + goto out_rcu_unlock;
49036 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
49037 + goto out_rcu_unlock;
49038 + rcu_read_unlock();
49039 +
49040 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
49041 +
49042 + return;
49043 +out_rcu_unlock:
49044 + rcu_read_unlock();
49045 + return;
49046 +}
49047 diff -urNp linux-2.6.32.41/grsecurity/gracl_segv.c linux-2.6.32.41/grsecurity/gracl_segv.c
49048 --- linux-2.6.32.41/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
49049 +++ linux-2.6.32.41/grsecurity/gracl_segv.c 2011-04-17 15:56:46.000000000 -0400
49050 @@ -0,0 +1,284 @@
49051 +#include <linux/kernel.h>
49052 +#include <linux/mm.h>
49053 +#include <asm/uaccess.h>
49054 +#include <asm/errno.h>
49055 +#include <asm/mman.h>
49056 +#include <net/sock.h>
49057 +#include <linux/file.h>
49058 +#include <linux/fs.h>
49059 +#include <linux/net.h>
49060 +#include <linux/in.h>
49061 +#include <linux/smp_lock.h>
49062 +#include <linux/slab.h>
49063 +#include <linux/types.h>
49064 +#include <linux/sched.h>
49065 +#include <linux/timer.h>
49066 +#include <linux/gracl.h>
49067 +#include <linux/grsecurity.h>
49068 +#include <linux/grinternal.h>
49069 +
49070 +static struct crash_uid *uid_set;
49071 +static unsigned short uid_used;
49072 +static DEFINE_SPINLOCK(gr_uid_lock);
49073 +extern rwlock_t gr_inode_lock;
49074 +extern struct acl_subject_label *
49075 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
49076 + struct acl_role_label *role);
49077 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
49078 +
49079 +int
49080 +gr_init_uidset(void)
49081 +{
49082 + uid_set =
49083 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
49084 + uid_used = 0;
49085 +
49086 + return uid_set ? 1 : 0;
49087 +}
49088 +
49089 +void
49090 +gr_free_uidset(void)
49091 +{
49092 + if (uid_set)
49093 + kfree(uid_set);
49094 +
49095 + return;
49096 +}
49097 +
49098 +int
49099 +gr_find_uid(const uid_t uid)
49100 +{
49101 + struct crash_uid *tmp = uid_set;
49102 + uid_t buid;
49103 + int low = 0, high = uid_used - 1, mid;
49104 +
49105 + while (high >= low) {
49106 + mid = (low + high) >> 1;
49107 + buid = tmp[mid].uid;
49108 + if (buid == uid)
49109 + return mid;
49110 + if (buid > uid)
49111 + high = mid - 1;
49112 + if (buid < uid)
49113 + low = mid + 1;
49114 + }
49115 +
49116 + return -1;
49117 +}
49118 +
49119 +static __inline__ void
49120 +gr_insertsort(void)
49121 +{
49122 + unsigned short i, j;
49123 + struct crash_uid index;
49124 +
49125 + for (i = 1; i < uid_used; i++) {
49126 + index = uid_set[i];
49127 + j = i;
49128 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
49129 + uid_set[j] = uid_set[j - 1];
49130 + j--;
49131 + }
49132 + uid_set[j] = index;
49133 + }
49134 +
49135 + return;
49136 +}
49137 +
49138 +static __inline__ void
49139 +gr_insert_uid(const uid_t uid, const unsigned long expires)
49140 +{
49141 + int loc;
49142 +
49143 + if (uid_used == GR_UIDTABLE_MAX)
49144 + return;
49145 +
49146 + loc = gr_find_uid(uid);
49147 +
49148 + if (loc >= 0) {
49149 + uid_set[loc].expires = expires;
49150 + return;
49151 + }
49152 +
49153 + uid_set[uid_used].uid = uid;
49154 + uid_set[uid_used].expires = expires;
49155 + uid_used++;
49156 +
49157 + gr_insertsort();
49158 +
49159 + return;
49160 +}
49161 +
49162 +void
49163 +gr_remove_uid(const unsigned short loc)
49164 +{
49165 + unsigned short i;
49166 +
49167 + for (i = loc + 1; i < uid_used; i++)
49168 + uid_set[i - 1] = uid_set[i];
49169 +
49170 + uid_used--;
49171 +
49172 + return;
49173 +}
49174 +
49175 +int
49176 +gr_check_crash_uid(const uid_t uid)
49177 +{
49178 + int loc;
49179 + int ret = 0;
49180 +
49181 + if (unlikely(!gr_acl_is_enabled()))
49182 + return 0;
49183 +
49184 + spin_lock(&gr_uid_lock);
49185 + loc = gr_find_uid(uid);
49186 +
49187 + if (loc < 0)
49188 + goto out_unlock;
49189 +
49190 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
49191 + gr_remove_uid(loc);
49192 + else
49193 + ret = 1;
49194 +
49195 +out_unlock:
49196 + spin_unlock(&gr_uid_lock);
49197 + return ret;
49198 +}
49199 +
49200 +static __inline__ int
49201 +proc_is_setxid(const struct cred *cred)
49202 +{
49203 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
49204 + cred->uid != cred->fsuid)
49205 + return 1;
49206 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
49207 + cred->gid != cred->fsgid)
49208 + return 1;
49209 +
49210 + return 0;
49211 +}
49212 +
49213 +void
49214 +gr_handle_crash(struct task_struct *task, const int sig)
49215 +{
49216 + struct acl_subject_label *curr;
49217 + struct acl_subject_label *curr2;
49218 + struct task_struct *tsk, *tsk2;
49219 + const struct cred *cred;
49220 + const struct cred *cred2;
49221 +
49222 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
49223 + return;
49224 +
49225 + if (unlikely(!gr_acl_is_enabled()))
49226 + return;
49227 +
49228 + curr = task->acl;
49229 +
49230 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
49231 + return;
49232 +
49233 + if (time_before_eq(curr->expires, get_seconds())) {
49234 + curr->expires = 0;
49235 + curr->crashes = 0;
49236 + }
49237 +
49238 + curr->crashes++;
49239 +
49240 + if (!curr->expires)
49241 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
49242 +
49243 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
49244 + time_after(curr->expires, get_seconds())) {
49245 + rcu_read_lock();
49246 + cred = __task_cred(task);
49247 + if (cred->uid && proc_is_setxid(cred)) {
49248 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
49249 + spin_lock(&gr_uid_lock);
49250 + gr_insert_uid(cred->uid, curr->expires);
49251 + spin_unlock(&gr_uid_lock);
49252 + curr->expires = 0;
49253 + curr->crashes = 0;
49254 + read_lock(&tasklist_lock);
49255 + do_each_thread(tsk2, tsk) {
49256 + cred2 = __task_cred(tsk);
49257 + if (tsk != task && cred2->uid == cred->uid)
49258 + gr_fake_force_sig(SIGKILL, tsk);
49259 + } while_each_thread(tsk2, tsk);
49260 + read_unlock(&tasklist_lock);
49261 + } else {
49262 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
49263 + read_lock(&tasklist_lock);
49264 + do_each_thread(tsk2, tsk) {
49265 + if (likely(tsk != task)) {
49266 + curr2 = tsk->acl;
49267 +
49268 + if (curr2->device == curr->device &&
49269 + curr2->inode == curr->inode)
49270 + gr_fake_force_sig(SIGKILL, tsk);
49271 + }
49272 + } while_each_thread(tsk2, tsk);
49273 + read_unlock(&tasklist_lock);
49274 + }
49275 + rcu_read_unlock();
49276 + }
49277 +
49278 + return;
49279 +}
49280 +
49281 +int
49282 +gr_check_crash_exec(const struct file *filp)
49283 +{
49284 + struct acl_subject_label *curr;
49285 +
49286 + if (unlikely(!gr_acl_is_enabled()))
49287 + return 0;
49288 +
49289 + read_lock(&gr_inode_lock);
49290 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
49291 + filp->f_path.dentry->d_inode->i_sb->s_dev,
49292 + current->role);
49293 + read_unlock(&gr_inode_lock);
49294 +
49295 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
49296 + (!curr->crashes && !curr->expires))
49297 + return 0;
49298 +
49299 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
49300 + time_after(curr->expires, get_seconds()))
49301 + return 1;
49302 + else if (time_before_eq(curr->expires, get_seconds())) {
49303 + curr->crashes = 0;
49304 + curr->expires = 0;
49305 + }
49306 +
49307 + return 0;
49308 +}
49309 +
49310 +void
49311 +gr_handle_alertkill(struct task_struct *task)
49312 +{
49313 + struct acl_subject_label *curracl;
49314 + __u32 curr_ip;
49315 + struct task_struct *p, *p2;
49316 +
49317 + if (unlikely(!gr_acl_is_enabled()))
49318 + return;
49319 +
49320 + curracl = task->acl;
49321 + curr_ip = task->signal->curr_ip;
49322 +
49323 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
49324 + read_lock(&tasklist_lock);
49325 + do_each_thread(p2, p) {
49326 + if (p->signal->curr_ip == curr_ip)
49327 + gr_fake_force_sig(SIGKILL, p);
49328 + } while_each_thread(p2, p);
49329 + read_unlock(&tasklist_lock);
49330 + } else if (curracl->mode & GR_KILLPROC)
49331 + gr_fake_force_sig(SIGKILL, task);
49332 +
49333 + return;
49334 +}
49335 diff -urNp linux-2.6.32.41/grsecurity/gracl_shm.c linux-2.6.32.41/grsecurity/gracl_shm.c
49336 --- linux-2.6.32.41/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
49337 +++ linux-2.6.32.41/grsecurity/gracl_shm.c 2011-04-17 15:56:46.000000000 -0400
49338 @@ -0,0 +1,40 @@
49339 +#include <linux/kernel.h>
49340 +#include <linux/mm.h>
49341 +#include <linux/sched.h>
49342 +#include <linux/file.h>
49343 +#include <linux/ipc.h>
49344 +#include <linux/gracl.h>
49345 +#include <linux/grsecurity.h>
49346 +#include <linux/grinternal.h>
49347 +
49348 +int
49349 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
49350 + const time_t shm_createtime, const uid_t cuid, const int shmid)
49351 +{
49352 + struct task_struct *task;
49353 +
49354 + if (!gr_acl_is_enabled())
49355 + return 1;
49356 +
49357 + rcu_read_lock();
49358 + read_lock(&tasklist_lock);
49359 +
49360 + task = find_task_by_vpid(shm_cprid);
49361 +
49362 + if (unlikely(!task))
49363 + task = find_task_by_vpid(shm_lapid);
49364 +
49365 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
49366 + (task->pid == shm_lapid)) &&
49367 + (task->acl->mode & GR_PROTSHM) &&
49368 + (task->acl != current->acl))) {
49369 + read_unlock(&tasklist_lock);
49370 + rcu_read_unlock();
49371 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
49372 + return 0;
49373 + }
49374 + read_unlock(&tasklist_lock);
49375 + rcu_read_unlock();
49376 +
49377 + return 1;
49378 +}
49379 diff -urNp linux-2.6.32.41/grsecurity/grsec_chdir.c linux-2.6.32.41/grsecurity/grsec_chdir.c
49380 --- linux-2.6.32.41/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
49381 +++ linux-2.6.32.41/grsecurity/grsec_chdir.c 2011-04-17 15:56:46.000000000 -0400
49382 @@ -0,0 +1,19 @@
49383 +#include <linux/kernel.h>
49384 +#include <linux/sched.h>
49385 +#include <linux/fs.h>
49386 +#include <linux/file.h>
49387 +#include <linux/grsecurity.h>
49388 +#include <linux/grinternal.h>
49389 +
49390 +void
49391 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
49392 +{
49393 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
49394 + if ((grsec_enable_chdir && grsec_enable_group &&
49395 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
49396 + !grsec_enable_group)) {
49397 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
49398 + }
49399 +#endif
49400 + return;
49401 +}
49402 diff -urNp linux-2.6.32.41/grsecurity/grsec_chroot.c linux-2.6.32.41/grsecurity/grsec_chroot.c
49403 --- linux-2.6.32.41/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
49404 +++ linux-2.6.32.41/grsecurity/grsec_chroot.c 2011-04-17 15:56:46.000000000 -0400
49405 @@ -0,0 +1,395 @@
49406 +#include <linux/kernel.h>
49407 +#include <linux/module.h>
49408 +#include <linux/sched.h>
49409 +#include <linux/file.h>
49410 +#include <linux/fs.h>
49411 +#include <linux/mount.h>
49412 +#include <linux/types.h>
49413 +#include <linux/pid_namespace.h>
49414 +#include <linux/grsecurity.h>
49415 +#include <linux/grinternal.h>
49416 +
49417 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
49418 +{
49419 +#ifdef CONFIG_GRKERNSEC
49420 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
49421 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
49422 + task->gr_is_chrooted = 1;
49423 + else
49424 + task->gr_is_chrooted = 0;
49425 +
49426 + task->gr_chroot_dentry = path->dentry;
49427 +#endif
49428 + return;
49429 +}
49430 +
49431 +void gr_clear_chroot_entries(struct task_struct *task)
49432 +{
49433 +#ifdef CONFIG_GRKERNSEC
49434 + task->gr_is_chrooted = 0;
49435 + task->gr_chroot_dentry = NULL;
49436 +#endif
49437 + return;
49438 +}
49439 +
49440 +int
49441 +gr_handle_chroot_unix(const pid_t pid)
49442 +{
49443 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
49444 + struct pid *spid = NULL;
49445 +
49446 + if (unlikely(!grsec_enable_chroot_unix))
49447 + return 1;
49448 +
49449 + if (likely(!proc_is_chrooted(current)))
49450 + return 1;
49451 +
49452 + rcu_read_lock();
49453 + read_lock(&tasklist_lock);
49454 +
49455 + spid = find_vpid(pid);
49456 + if (spid) {
49457 + struct task_struct *p;
49458 + p = pid_task(spid, PIDTYPE_PID);
49459 + if (unlikely(p && !have_same_root(current, p))) {
49460 + read_unlock(&tasklist_lock);
49461 + rcu_read_unlock();
49462 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
49463 + return 0;
49464 + }
49465 + }
49466 + read_unlock(&tasklist_lock);
49467 + rcu_read_unlock();
49468 +#endif
49469 + return 1;
49470 +}
49471 +
49472 +int
49473 +gr_handle_chroot_nice(void)
49474 +{
49475 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
49476 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
49477 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
49478 + return -EPERM;
49479 + }
49480 +#endif
49481 + return 0;
49482 +}
49483 +
49484 +int
49485 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
49486 +{
49487 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
49488 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
49489 + && proc_is_chrooted(current)) {
49490 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
49491 + return -EACCES;
49492 + }
49493 +#endif
49494 + return 0;
49495 +}
49496 +
49497 +int
49498 +gr_handle_chroot_rawio(const struct inode *inode)
49499 +{
49500 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
49501 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
49502 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
49503 + return 1;
49504 +#endif
49505 + return 0;
49506 +}
49507 +
49508 +int
49509 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
49510 +{
49511 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
49512 + struct task_struct *p;
49513 + int ret = 0;
49514 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
49515 + return ret;
49516 +
49517 + read_lock(&tasklist_lock);
49518 + do_each_pid_task(pid, type, p) {
49519 + if (!have_same_root(current, p)) {
49520 + ret = 1;
49521 + goto out;
49522 + }
49523 + } while_each_pid_task(pid, type, p);
49524 +out:
49525 + read_unlock(&tasklist_lock);
49526 + return ret;
49527 +#endif
49528 + return 0;
49529 +}
49530 +
49531 +int
49532 +gr_pid_is_chrooted(struct task_struct *p)
49533 +{
49534 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
49535 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
49536 + return 0;
49537 +
49538 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
49539 + !have_same_root(current, p)) {
49540 + return 1;
49541 + }
49542 +#endif
49543 + return 0;
49544 +}
49545 +
49546 +EXPORT_SYMBOL(gr_pid_is_chrooted);
49547 +
49548 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
49549 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
49550 +{
49551 + struct dentry *dentry = (struct dentry *)u_dentry;
49552 + struct vfsmount *mnt = (struct vfsmount *)u_mnt;
49553 + struct dentry *realroot;
49554 + struct vfsmount *realrootmnt;
49555 + struct dentry *currentroot;
49556 + struct vfsmount *currentmnt;
49557 + struct task_struct *reaper = &init_task;
49558 + int ret = 1;
49559 +
49560 + read_lock(&reaper->fs->lock);
49561 + realrootmnt = mntget(reaper->fs->root.mnt);
49562 + realroot = dget(reaper->fs->root.dentry);
49563 + read_unlock(&reaper->fs->lock);
49564 +
49565 + read_lock(&current->fs->lock);
49566 + currentmnt = mntget(current->fs->root.mnt);
49567 + currentroot = dget(current->fs->root.dentry);
49568 + read_unlock(&current->fs->lock);
49569 +
49570 + spin_lock(&dcache_lock);
49571 + for (;;) {
49572 + if (unlikely((dentry == realroot && mnt == realrootmnt)
49573 + || (dentry == currentroot && mnt == currentmnt)))
49574 + break;
49575 + if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
49576 + if (mnt->mnt_parent == mnt)
49577 + break;
49578 + dentry = mnt->mnt_mountpoint;
49579 + mnt = mnt->mnt_parent;
49580 + continue;
49581 + }
49582 + dentry = dentry->d_parent;
49583 + }
49584 + spin_unlock(&dcache_lock);
49585 +
49586 + dput(currentroot);
49587 + mntput(currentmnt);
49588 +
49589 + /* access is outside of chroot */
49590 + if (dentry == realroot && mnt == realrootmnt)
49591 + ret = 0;
49592 +
49593 + dput(realroot);
49594 + mntput(realrootmnt);
49595 + return ret;
49596 +}
49597 +#endif
49598 +
49599 +int
49600 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
49601 +{
49602 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
49603 + if (!grsec_enable_chroot_fchdir)
49604 + return 1;
49605 +
49606 + if (!proc_is_chrooted(current))
49607 + return 1;
49608 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
49609 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
49610 + return 0;
49611 + }
49612 +#endif
49613 + return 1;
49614 +}
49615 +
49616 +int
49617 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
49618 + const time_t shm_createtime)
49619 +{
49620 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
49621 + struct pid *pid = NULL;
49622 + time_t starttime;
49623 +
49624 + if (unlikely(!grsec_enable_chroot_shmat))
49625 + return 1;
49626 +
49627 + if (likely(!proc_is_chrooted(current)))
49628 + return 1;
49629 +
49630 + rcu_read_lock();
49631 + read_lock(&tasklist_lock);
49632 +
49633 + pid = find_vpid(shm_cprid);
49634 + if (pid) {
49635 + struct task_struct *p;
49636 + p = pid_task(pid, PIDTYPE_PID);
49637 + if (p == NULL)
49638 + goto unlock;
49639 + starttime = p->start_time.tv_sec;
49640 + if (unlikely(!have_same_root(current, p) &&
49641 + time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime))) {
49642 + read_unlock(&tasklist_lock);
49643 + rcu_read_unlock();
49644 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
49645 + return 0;
49646 + }
49647 + } else {
49648 + pid = find_vpid(shm_lapid);
49649 + if (pid) {
49650 + struct task_struct *p;
49651 + p = pid_task(pid, PIDTYPE_PID);
49652 + if (p == NULL)
49653 + goto unlock;
49654 + if (unlikely(!have_same_root(current, p))) {
49655 + read_unlock(&tasklist_lock);
49656 + rcu_read_unlock();
49657 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
49658 + return 0;
49659 + }
49660 + }
49661 + }
49662 +
49663 +unlock:
49664 + read_unlock(&tasklist_lock);
49665 + rcu_read_unlock();
49666 +#endif
49667 + return 1;
49668 +}
49669 +
49670 +void
49671 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
49672 +{
49673 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
49674 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
49675 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
49676 +#endif
49677 + return;
49678 +}
49679 +
49680 +int
49681 +gr_handle_chroot_mknod(const struct dentry *dentry,
49682 + const struct vfsmount *mnt, const int mode)
49683 +{
49684 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
49685 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
49686 + proc_is_chrooted(current)) {
49687 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
49688 + return -EPERM;
49689 + }
49690 +#endif
49691 + return 0;
49692 +}
49693 +
49694 +int
49695 +gr_handle_chroot_mount(const struct dentry *dentry,
49696 + const struct vfsmount *mnt, const char *dev_name)
49697 +{
49698 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
49699 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
49700 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name, dentry, mnt);
49701 + return -EPERM;
49702 + }
49703 +#endif
49704 + return 0;
49705 +}
49706 +
49707 +int
49708 +gr_handle_chroot_pivot(void)
49709 +{
49710 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
49711 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
49712 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
49713 + return -EPERM;
49714 + }
49715 +#endif
49716 + return 0;
49717 +}
49718 +
49719 +int
49720 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
49721 +{
49722 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
49723 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
49724 + !gr_is_outside_chroot(dentry, mnt)) {
49725 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
49726 + return -EPERM;
49727 + }
49728 +#endif
49729 + return 0;
49730 +}
49731 +
49732 +int
49733 +gr_handle_chroot_caps(struct path *path)
49734 +{
49735 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
49736 + if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
49737 + (init_task.fs->root.dentry != path->dentry) &&
49738 + (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
49739 +
49740 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
49741 + const struct cred *old = current_cred();
49742 + struct cred *new = prepare_creds();
49743 + if (new == NULL)
49744 + return 1;
49745 +
49746 + new->cap_permitted = cap_drop(old->cap_permitted,
49747 + chroot_caps);
49748 + new->cap_inheritable = cap_drop(old->cap_inheritable,
49749 + chroot_caps);
49750 + new->cap_effective = cap_drop(old->cap_effective,
49751 + chroot_caps);
49752 +
49753 + commit_creds(new);
49754 +
49755 + return 0;
49756 + }
49757 +#endif
49758 + return 0;
49759 +}
49760 +
49761 +int
49762 +gr_handle_chroot_sysctl(const int op)
49763 +{
49764 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
49765 + if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
49766 + && (op & MAY_WRITE))
49767 + return -EACCES;
49768 +#endif
49769 + return 0;
49770 +}
49771 +
49772 +void
49773 +gr_handle_chroot_chdir(struct path *path)
49774 +{
49775 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
49776 + if (grsec_enable_chroot_chdir)
49777 + set_fs_pwd(current->fs, path);
49778 +#endif
49779 + return;
49780 +}
49781 +
49782 +int
49783 +gr_handle_chroot_chmod(const struct dentry *dentry,
49784 + const struct vfsmount *mnt, const int mode)
49785 +{
49786 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
49787 + /* allow chmod +s on directories, but not on files */
49788 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
49789 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
49790 + proc_is_chrooted(current)) {
49791 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
49792 + return -EPERM;
49793 + }
49794 +#endif
49795 + return 0;
49796 +}
49797 +
49798 +#ifdef CONFIG_SECURITY
49799 +EXPORT_SYMBOL(gr_handle_chroot_caps);
49800 +#endif
49801 diff -urNp linux-2.6.32.41/grsecurity/grsec_disabled.c linux-2.6.32.41/grsecurity/grsec_disabled.c
49802 --- linux-2.6.32.41/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
49803 +++ linux-2.6.32.41/grsecurity/grsec_disabled.c 2011-04-17 15:56:46.000000000 -0400
49804 @@ -0,0 +1,447 @@
49805 +#include <linux/kernel.h>
49806 +#include <linux/module.h>
49807 +#include <linux/sched.h>
49808 +#include <linux/file.h>
49809 +#include <linux/fs.h>
49810 +#include <linux/kdev_t.h>
49811 +#include <linux/net.h>
49812 +#include <linux/in.h>
49813 +#include <linux/ip.h>
49814 +#include <linux/skbuff.h>
49815 +#include <linux/sysctl.h>
49816 +
49817 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
49818 +void
49819 +pax_set_initial_flags(struct linux_binprm *bprm)
49820 +{
49821 + return;
49822 +}
49823 +#endif
49824 +
49825 +#ifdef CONFIG_SYSCTL
49826 +__u32
49827 +gr_handle_sysctl(const struct ctl_table * table, const int op)
49828 +{
49829 + return 0;
49830 +}
49831 +#endif
49832 +
49833 +#ifdef CONFIG_TASKSTATS
49834 +int gr_is_taskstats_denied(int pid)
49835 +{
49836 + return 0;
49837 +}
49838 +#endif
49839 +
49840 +int
49841 +gr_acl_is_enabled(void)
49842 +{
49843 + return 0;
49844 +}
49845 +
49846 +int
49847 +gr_handle_rawio(const struct inode *inode)
49848 +{
49849 + return 0;
49850 +}
49851 +
49852 +void
49853 +gr_acl_handle_psacct(struct task_struct *task, const long code)
49854 +{
49855 + return;
49856 +}
49857 +
49858 +int
49859 +gr_handle_ptrace(struct task_struct *task, const long request)
49860 +{
49861 + return 0;
49862 +}
49863 +
49864 +int
49865 +gr_handle_proc_ptrace(struct task_struct *task)
49866 +{
49867 + return 0;
49868 +}
49869 +
49870 +void
49871 +gr_learn_resource(const struct task_struct *task,
49872 + const int res, const unsigned long wanted, const int gt)
49873 +{
49874 + return;
49875 +}
49876 +
49877 +int
49878 +gr_set_acls(const int type)
49879 +{
49880 + return 0;
49881 +}
49882 +
49883 +int
49884 +gr_check_hidden_task(const struct task_struct *tsk)
49885 +{
49886 + return 0;
49887 +}
49888 +
49889 +int
49890 +gr_check_protected_task(const struct task_struct *task)
49891 +{
49892 + return 0;
49893 +}
49894 +
49895 +int
49896 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
49897 +{
49898 + return 0;
49899 +}
49900 +
49901 +void
49902 +gr_copy_label(struct task_struct *tsk)
49903 +{
49904 + return;
49905 +}
49906 +
49907 +void
49908 +gr_set_pax_flags(struct task_struct *task)
49909 +{
49910 + return;
49911 +}
49912 +
49913 +int
49914 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
49915 + const int unsafe_share)
49916 +{
49917 + return 0;
49918 +}
49919 +
49920 +void
49921 +gr_handle_delete(const ino_t ino, const dev_t dev)
49922 +{
49923 + return;
49924 +}
49925 +
49926 +void
49927 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
49928 +{
49929 + return;
49930 +}
49931 +
49932 +void
49933 +gr_handle_crash(struct task_struct *task, const int sig)
49934 +{
49935 + return;
49936 +}
49937 +
49938 +int
49939 +gr_check_crash_exec(const struct file *filp)
49940 +{
49941 + return 0;
49942 +}
49943 +
49944 +int
49945 +gr_check_crash_uid(const uid_t uid)
49946 +{
49947 + return 0;
49948 +}
49949 +
49950 +void
49951 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
49952 + struct dentry *old_dentry,
49953 + struct dentry *new_dentry,
49954 + struct vfsmount *mnt, const __u8 replace)
49955 +{
49956 + return;
49957 +}
49958 +
49959 +int
49960 +gr_search_socket(const int family, const int type, const int protocol)
49961 +{
49962 + return 1;
49963 +}
49964 +
49965 +int
49966 +gr_search_connectbind(const int mode, const struct socket *sock,
49967 + const struct sockaddr_in *addr)
49968 +{
49969 + return 0;
49970 +}
49971 +
49972 +int
49973 +gr_is_capable(const int cap)
49974 +{
49975 + return 1;
49976 +}
49977 +
49978 +int
49979 +gr_is_capable_nolog(const int cap)
49980 +{
49981 + return 1;
49982 +}
49983 +
49984 +void
49985 +gr_handle_alertkill(struct task_struct *task)
49986 +{
49987 + return;
49988 +}
49989 +
49990 +__u32
49991 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
49992 +{
49993 + return 1;
49994 +}
49995 +
49996 +__u32
49997 +gr_acl_handle_hidden_file(const struct dentry * dentry,
49998 + const struct vfsmount * mnt)
49999 +{
50000 + return 1;
50001 +}
50002 +
50003 +__u32
50004 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
50005 + const int fmode)
50006 +{
50007 + return 1;
50008 +}
50009 +
50010 +__u32
50011 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
50012 +{
50013 + return 1;
50014 +}
50015 +
50016 +__u32
50017 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
50018 +{
50019 + return 1;
50020 +}
50021 +
50022 +int
50023 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
50024 + unsigned int *vm_flags)
50025 +{
50026 + return 1;
50027 +}
50028 +
50029 +__u32
50030 +gr_acl_handle_truncate(const struct dentry * dentry,
50031 + const struct vfsmount * mnt)
50032 +{
50033 + return 1;
50034 +}
50035 +
50036 +__u32
50037 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
50038 +{
50039 + return 1;
50040 +}
50041 +
50042 +__u32
50043 +gr_acl_handle_access(const struct dentry * dentry,
50044 + const struct vfsmount * mnt, const int fmode)
50045 +{
50046 + return 1;
50047 +}
50048 +
50049 +__u32
50050 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
50051 + mode_t mode)
50052 +{
50053 + return 1;
50054 +}
50055 +
50056 +__u32
50057 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
50058 + mode_t mode)
50059 +{
50060 + return 1;
50061 +}
50062 +
50063 +__u32
50064 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
50065 +{
50066 + return 1;
50067 +}
50068 +
50069 +__u32
50070 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
50071 +{
50072 + return 1;
50073 +}
50074 +
50075 +void
50076 +grsecurity_init(void)
50077 +{
50078 + return;
50079 +}
50080 +
50081 +__u32
50082 +gr_acl_handle_mknod(const struct dentry * new_dentry,
50083 + const struct dentry * parent_dentry,
50084 + const struct vfsmount * parent_mnt,
50085 + const int mode)
50086 +{
50087 + return 1;
50088 +}
50089 +
50090 +__u32
50091 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
50092 + const struct dentry * parent_dentry,
50093 + const struct vfsmount * parent_mnt)
50094 +{
50095 + return 1;
50096 +}
50097 +
50098 +__u32
50099 +gr_acl_handle_symlink(const struct dentry * new_dentry,
50100 + const struct dentry * parent_dentry,
50101 + const struct vfsmount * parent_mnt, const char *from)
50102 +{
50103 + return 1;
50104 +}
50105 +
50106 +__u32
50107 +gr_acl_handle_link(const struct dentry * new_dentry,
50108 + const struct dentry * parent_dentry,
50109 + const struct vfsmount * parent_mnt,
50110 + const struct dentry * old_dentry,
50111 + const struct vfsmount * old_mnt, const char *to)
50112 +{
50113 + return 1;
50114 +}
50115 +
50116 +int
50117 +gr_acl_handle_rename(const struct dentry *new_dentry,
50118 + const struct dentry *parent_dentry,
50119 + const struct vfsmount *parent_mnt,
50120 + const struct dentry *old_dentry,
50121 + const struct inode *old_parent_inode,
50122 + const struct vfsmount *old_mnt, const char *newname)
50123 +{
50124 + return 0;
50125 +}
50126 +
50127 +int
50128 +gr_acl_handle_filldir(const struct file *file, const char *name,
50129 + const int namelen, const ino_t ino)
50130 +{
50131 + return 1;
50132 +}
50133 +
50134 +int
50135 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
50136 + const time_t shm_createtime, const uid_t cuid, const int shmid)
50137 +{
50138 + return 1;
50139 +}
50140 +
50141 +int
50142 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
50143 +{
50144 + return 0;
50145 +}
50146 +
50147 +int
50148 +gr_search_accept(const struct socket *sock)
50149 +{
50150 + return 0;
50151 +}
50152 +
50153 +int
50154 +gr_search_listen(const struct socket *sock)
50155 +{
50156 + return 0;
50157 +}
50158 +
50159 +int
50160 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
50161 +{
50162 + return 0;
50163 +}
50164 +
50165 +__u32
50166 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
50167 +{
50168 + return 1;
50169 +}
50170 +
50171 +__u32
50172 +gr_acl_handle_creat(const struct dentry * dentry,
50173 + const struct dentry * p_dentry,
50174 + const struct vfsmount * p_mnt, const int fmode,
50175 + const int imode)
50176 +{
50177 + return 1;
50178 +}
50179 +
50180 +void
50181 +gr_acl_handle_exit(void)
50182 +{
50183 + return;
50184 +}
50185 +
50186 +int
50187 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
50188 +{
50189 + return 1;
50190 +}
50191 +
50192 +void
50193 +gr_set_role_label(const uid_t uid, const gid_t gid)
50194 +{
50195 + return;
50196 +}
50197 +
50198 +int
50199 +gr_acl_handle_procpidmem(const struct task_struct *task)
50200 +{
50201 + return 0;
50202 +}
50203 +
50204 +int
50205 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
50206 +{
50207 + return 0;
50208 +}
50209 +
50210 +int
50211 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
50212 +{
50213 + return 0;
50214 +}
50215 +
50216 +void
50217 +gr_set_kernel_label(struct task_struct *task)
50218 +{
50219 + return;
50220 +}
50221 +
50222 +int
50223 +gr_check_user_change(int real, int effective, int fs)
50224 +{
50225 + return 0;
50226 +}
50227 +
50228 +int
50229 +gr_check_group_change(int real, int effective, int fs)
50230 +{
50231 + return 0;
50232 +}
50233 +
50234 +int gr_acl_enable_at_secure(void)
50235 +{
50236 + return 0;
50237 +}
50238 +
50239 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
50240 +{
50241 + return dentry->d_inode->i_sb->s_dev;
50242 +}
50243 +
50244 +EXPORT_SYMBOL(gr_is_capable);
50245 +EXPORT_SYMBOL(gr_is_capable_nolog);
50246 +EXPORT_SYMBOL(gr_learn_resource);
50247 +EXPORT_SYMBOL(gr_set_kernel_label);
50248 +#ifdef CONFIG_SECURITY
50249 +EXPORT_SYMBOL(gr_check_user_change);
50250 +EXPORT_SYMBOL(gr_check_group_change);
50251 +#endif
50252 diff -urNp linux-2.6.32.41/grsecurity/grsec_exec.c linux-2.6.32.41/grsecurity/grsec_exec.c
50253 --- linux-2.6.32.41/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
50254 +++ linux-2.6.32.41/grsecurity/grsec_exec.c 2011-04-17 15:56:46.000000000 -0400
50255 @@ -0,0 +1,148 @@
50256 +#include <linux/kernel.h>
50257 +#include <linux/sched.h>
50258 +#include <linux/file.h>
50259 +#include <linux/binfmts.h>
50260 +#include <linux/smp_lock.h>
50261 +#include <linux/fs.h>
50262 +#include <linux/types.h>
50263 +#include <linux/grdefs.h>
50264 +#include <linux/grinternal.h>
50265 +#include <linux/capability.h>
50266 +#include <linux/compat.h>
50267 +
50268 +#include <asm/uaccess.h>
50269 +
50270 +#ifdef CONFIG_GRKERNSEC_EXECLOG
50271 +static char gr_exec_arg_buf[132];
50272 +static DEFINE_MUTEX(gr_exec_arg_mutex);
50273 +#endif
50274 +
50275 +int
50276 +gr_handle_nproc(void)
50277 +{
50278 +#ifdef CONFIG_GRKERNSEC_EXECVE
50279 + const struct cred *cred = current_cred();
50280 + if (grsec_enable_execve && cred->user &&
50281 + (atomic_read(&cred->user->processes) >
50282 + current->signal->rlim[RLIMIT_NPROC].rlim_cur) &&
50283 + !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) {
50284 + gr_log_noargs(GR_DONT_AUDIT, GR_NPROC_MSG);
50285 + return -EAGAIN;
50286 + }
50287 +#endif
50288 + return 0;
50289 +}
50290 +
50291 +void
50292 +gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
50293 +{
50294 +#ifdef CONFIG_GRKERNSEC_EXECLOG
50295 + char *grarg = gr_exec_arg_buf;
50296 + unsigned int i, x, execlen = 0;
50297 + char c;
50298 +
50299 + if (!((grsec_enable_execlog && grsec_enable_group &&
50300 + in_group_p(grsec_audit_gid))
50301 + || (grsec_enable_execlog && !grsec_enable_group)))
50302 + return;
50303 +
50304 + mutex_lock(&gr_exec_arg_mutex);
50305 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
50306 +
50307 + if (unlikely(argv == NULL))
50308 + goto log;
50309 +
50310 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
50311 + const char __user *p;
50312 + unsigned int len;
50313 +
50314 + if (copy_from_user(&p, argv + i, sizeof(p)))
50315 + goto log;
50316 + if (!p)
50317 + goto log;
50318 + len = strnlen_user(p, 128 - execlen);
50319 + if (len > 128 - execlen)
50320 + len = 128 - execlen;
50321 + else if (len > 0)
50322 + len--;
50323 + if (copy_from_user(grarg + execlen, p, len))
50324 + goto log;
50325 +
50326 + /* rewrite unprintable characters */
50327 + for (x = 0; x < len; x++) {
50328 + c = *(grarg + execlen + x);
50329 + if (c < 32 || c > 126)
50330 + *(grarg + execlen + x) = ' ';
50331 + }
50332 +
50333 + execlen += len;
50334 + *(grarg + execlen) = ' ';
50335 + *(grarg + execlen + 1) = '\0';
50336 + execlen++;
50337 + }
50338 +
50339 + log:
50340 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
50341 + bprm->file->f_path.mnt, grarg);
50342 + mutex_unlock(&gr_exec_arg_mutex);
50343 +#endif
50344 + return;
50345 +}
50346 +
50347 +#ifdef CONFIG_COMPAT
50348 +void
50349 +gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
50350 +{
50351 +#ifdef CONFIG_GRKERNSEC_EXECLOG
50352 + char *grarg = gr_exec_arg_buf;
50353 + unsigned int i, x, execlen = 0;
50354 + char c;
50355 +
50356 + if (!((grsec_enable_execlog && grsec_enable_group &&
50357 + in_group_p(grsec_audit_gid))
50358 + || (grsec_enable_execlog && !grsec_enable_group)))
50359 + return;
50360 +
50361 + mutex_lock(&gr_exec_arg_mutex);
50362 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
50363 +
50364 + if (unlikely(argv == NULL))
50365 + goto log;
50366 +
50367 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
50368 + compat_uptr_t p;
50369 + unsigned int len;
50370 +
50371 + if (get_user(p, argv + i))
50372 + goto log;
50373 + len = strnlen_user(compat_ptr(p), 128 - execlen);
50374 + if (len > 128 - execlen)
50375 + len = 128 - execlen;
50376 + else if (len > 0)
50377 + len--;
50378 + else
50379 + goto log;
50380 + if (copy_from_user(grarg + execlen, compat_ptr(p), len))
50381 + goto log;
50382 +
50383 + /* rewrite unprintable characters */
50384 + for (x = 0; x < len; x++) {
50385 + c = *(grarg + execlen + x);
50386 + if (c < 32 || c > 126)
50387 + *(grarg + execlen + x) = ' ';
50388 + }
50389 +
50390 + execlen += len;
50391 + *(grarg + execlen) = ' ';
50392 + *(grarg + execlen + 1) = '\0';
50393 + execlen++;
50394 + }
50395 +
50396 + log:
50397 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
50398 + bprm->file->f_path.mnt, grarg);
50399 + mutex_unlock(&gr_exec_arg_mutex);
50400 +#endif
50401 + return;
50402 +}
50403 +#endif
50404 diff -urNp linux-2.6.32.41/grsecurity/grsec_fifo.c linux-2.6.32.41/grsecurity/grsec_fifo.c
50405 --- linux-2.6.32.41/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
50406 +++ linux-2.6.32.41/grsecurity/grsec_fifo.c 2011-04-17 15:56:46.000000000 -0400
50407 @@ -0,0 +1,24 @@
50408 +#include <linux/kernel.h>
50409 +#include <linux/sched.h>
50410 +#include <linux/fs.h>
50411 +#include <linux/file.h>
50412 +#include <linux/grinternal.h>
50413 +
50414 +int
50415 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
50416 + const struct dentry *dir, const int flag, const int acc_mode)
50417 +{
50418 +#ifdef CONFIG_GRKERNSEC_FIFO
50419 + const struct cred *cred = current_cred();
50420 +
50421 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
50422 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
50423 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
50424 + (cred->fsuid != dentry->d_inode->i_uid)) {
50425 + if (!inode_permission(dentry->d_inode, acc_mode))
50426 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
50427 + return -EACCES;
50428 + }
50429 +#endif
50430 + return 0;
50431 +}
50432 diff -urNp linux-2.6.32.41/grsecurity/grsec_fork.c linux-2.6.32.41/grsecurity/grsec_fork.c
50433 --- linux-2.6.32.41/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
50434 +++ linux-2.6.32.41/grsecurity/grsec_fork.c 2011-04-17 15:56:46.000000000 -0400
50435 @@ -0,0 +1,23 @@
50436 +#include <linux/kernel.h>
50437 +#include <linux/sched.h>
50438 +#include <linux/grsecurity.h>
50439 +#include <linux/grinternal.h>
50440 +#include <linux/errno.h>
50441 +
50442 +void
50443 +gr_log_forkfail(const int retval)
50444 +{
50445 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
50446 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
50447 + switch (retval) {
50448 + case -EAGAIN:
50449 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
50450 + break;
50451 + case -ENOMEM:
50452 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
50453 + break;
50454 + }
50455 + }
50456 +#endif
50457 + return;
50458 +}
50459 diff -urNp linux-2.6.32.41/grsecurity/grsec_init.c linux-2.6.32.41/grsecurity/grsec_init.c
50460 --- linux-2.6.32.41/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
50461 +++ linux-2.6.32.41/grsecurity/grsec_init.c 2011-04-17 15:56:46.000000000 -0400
50462 @@ -0,0 +1,270 @@
50463 +#include <linux/kernel.h>
50464 +#include <linux/sched.h>
50465 +#include <linux/mm.h>
50466 +#include <linux/smp_lock.h>
50467 +#include <linux/gracl.h>
50468 +#include <linux/slab.h>
50469 +#include <linux/vmalloc.h>
50470 +#include <linux/percpu.h>
50471 +#include <linux/module.h>
50472 +
50473 +int grsec_enable_link;
50474 +int grsec_enable_dmesg;
50475 +int grsec_enable_harden_ptrace;
50476 +int grsec_enable_fifo;
50477 +int grsec_enable_execve;
50478 +int grsec_enable_execlog;
50479 +int grsec_enable_signal;
50480 +int grsec_enable_forkfail;
50481 +int grsec_enable_audit_ptrace;
50482 +int grsec_enable_time;
50483 +int grsec_enable_audit_textrel;
50484 +int grsec_enable_group;
50485 +int grsec_audit_gid;
50486 +int grsec_enable_chdir;
50487 +int grsec_enable_mount;
50488 +int grsec_enable_rofs;
50489 +int grsec_enable_chroot_findtask;
50490 +int grsec_enable_chroot_mount;
50491 +int grsec_enable_chroot_shmat;
50492 +int grsec_enable_chroot_fchdir;
50493 +int grsec_enable_chroot_double;
50494 +int grsec_enable_chroot_pivot;
50495 +int grsec_enable_chroot_chdir;
50496 +int grsec_enable_chroot_chmod;
50497 +int grsec_enable_chroot_mknod;
50498 +int grsec_enable_chroot_nice;
50499 +int grsec_enable_chroot_execlog;
50500 +int grsec_enable_chroot_caps;
50501 +int grsec_enable_chroot_sysctl;
50502 +int grsec_enable_chroot_unix;
50503 +int grsec_enable_tpe;
50504 +int grsec_tpe_gid;
50505 +int grsec_enable_blackhole;
50506 +#ifdef CONFIG_IPV6_MODULE
50507 +EXPORT_SYMBOL(grsec_enable_blackhole);
50508 +#endif
50509 +int grsec_lastack_retries;
50510 +int grsec_enable_tpe_all;
50511 +int grsec_enable_tpe_invert;
50512 +int grsec_enable_socket_all;
50513 +int grsec_socket_all_gid;
50514 +int grsec_enable_socket_client;
50515 +int grsec_socket_client_gid;
50516 +int grsec_enable_socket_server;
50517 +int grsec_socket_server_gid;
50518 +int grsec_resource_logging;
50519 +int grsec_disable_privio;
50520 +int grsec_enable_log_rwxmaps;
50521 +int grsec_lock;
50522 +
50523 +DEFINE_SPINLOCK(grsec_alert_lock);
50524 +unsigned long grsec_alert_wtime = 0;
50525 +unsigned long grsec_alert_fyet = 0;
50526 +
50527 +DEFINE_SPINLOCK(grsec_audit_lock);
50528 +
50529 +DEFINE_RWLOCK(grsec_exec_file_lock);
50530 +
50531 +char *gr_shared_page[4];
50532 +
50533 +char *gr_alert_log_fmt;
50534 +char *gr_audit_log_fmt;
50535 +char *gr_alert_log_buf;
50536 +char *gr_audit_log_buf;
50537 +
50538 +extern struct gr_arg *gr_usermode;
50539 +extern unsigned char *gr_system_salt;
50540 +extern unsigned char *gr_system_sum;
50541 +
50542 +void __init
50543 +grsecurity_init(void)
50544 +{
50545 + int j;
50546 + /* create the per-cpu shared pages */
50547 +
50548 +#ifdef CONFIG_X86
50549 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
50550 +#endif
50551 +
50552 + for (j = 0; j < 4; j++) {
50553 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
50554 + if (gr_shared_page[j] == NULL) {
50555 + panic("Unable to allocate grsecurity shared page");
50556 + return;
50557 + }
50558 + }
50559 +
50560 + /* allocate log buffers */
50561 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
50562 + if (!gr_alert_log_fmt) {
50563 + panic("Unable to allocate grsecurity alert log format buffer");
50564 + return;
50565 + }
50566 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
50567 + if (!gr_audit_log_fmt) {
50568 + panic("Unable to allocate grsecurity audit log format buffer");
50569 + return;
50570 + }
50571 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
50572 + if (!gr_alert_log_buf) {
50573 + panic("Unable to allocate grsecurity alert log buffer");
50574 + return;
50575 + }
50576 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
50577 + if (!gr_audit_log_buf) {
50578 + panic("Unable to allocate grsecurity audit log buffer");
50579 + return;
50580 + }
50581 +
50582 + /* allocate memory for authentication structure */
50583 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
50584 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
50585 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
50586 +
50587 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
50588 + panic("Unable to allocate grsecurity authentication structure");
50589 + return;
50590 + }
50591 +
50592 +
50593 +#ifdef CONFIG_GRKERNSEC_IO
50594 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
50595 + grsec_disable_privio = 1;
50596 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
50597 + grsec_disable_privio = 1;
50598 +#else
50599 + grsec_disable_privio = 0;
50600 +#endif
50601 +#endif
50602 +
50603 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
50604 + /* for backward compatibility, tpe_invert always defaults to on if
50605 + enabled in the kernel
50606 + */
50607 + grsec_enable_tpe_invert = 1;
50608 +#endif
50609 +
50610 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
50611 +#ifndef CONFIG_GRKERNSEC_SYSCTL
50612 + grsec_lock = 1;
50613 +#endif
50614 +
50615 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
50616 + grsec_enable_audit_textrel = 1;
50617 +#endif
50618 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
50619 + grsec_enable_log_rwxmaps = 1;
50620 +#endif
50621 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
50622 + grsec_enable_group = 1;
50623 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
50624 +#endif
50625 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
50626 + grsec_enable_chdir = 1;
50627 +#endif
50628 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
50629 + grsec_enable_harden_ptrace = 1;
50630 +#endif
50631 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
50632 + grsec_enable_mount = 1;
50633 +#endif
50634 +#ifdef CONFIG_GRKERNSEC_LINK
50635 + grsec_enable_link = 1;
50636 +#endif
50637 +#ifdef CONFIG_GRKERNSEC_DMESG
50638 + grsec_enable_dmesg = 1;
50639 +#endif
50640 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
50641 + grsec_enable_blackhole = 1;
50642 + grsec_lastack_retries = 4;
50643 +#endif
50644 +#ifdef CONFIG_GRKERNSEC_FIFO
50645 + grsec_enable_fifo = 1;
50646 +#endif
50647 +#ifdef CONFIG_GRKERNSEC_EXECVE
50648 + grsec_enable_execve = 1;
50649 +#endif
50650 +#ifdef CONFIG_GRKERNSEC_EXECLOG
50651 + grsec_enable_execlog = 1;
50652 +#endif
50653 +#ifdef CONFIG_GRKERNSEC_SIGNAL
50654 + grsec_enable_signal = 1;
50655 +#endif
50656 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
50657 + grsec_enable_forkfail = 1;
50658 +#endif
50659 +#ifdef CONFIG_GRKERNSEC_TIME
50660 + grsec_enable_time = 1;
50661 +#endif
50662 +#ifdef CONFIG_GRKERNSEC_RESLOG
50663 + grsec_resource_logging = 1;
50664 +#endif
50665 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
50666 + grsec_enable_chroot_findtask = 1;
50667 +#endif
50668 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
50669 + grsec_enable_chroot_unix = 1;
50670 +#endif
50671 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
50672 + grsec_enable_chroot_mount = 1;
50673 +#endif
50674 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
50675 + grsec_enable_chroot_fchdir = 1;
50676 +#endif
50677 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
50678 + grsec_enable_chroot_shmat = 1;
50679 +#endif
50680 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
50681 + grsec_enable_audit_ptrace = 1;
50682 +#endif
50683 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
50684 + grsec_enable_chroot_double = 1;
50685 +#endif
50686 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
50687 + grsec_enable_chroot_pivot = 1;
50688 +#endif
50689 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
50690 + grsec_enable_chroot_chdir = 1;
50691 +#endif
50692 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
50693 + grsec_enable_chroot_chmod = 1;
50694 +#endif
50695 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
50696 + grsec_enable_chroot_mknod = 1;
50697 +#endif
50698 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
50699 + grsec_enable_chroot_nice = 1;
50700 +#endif
50701 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
50702 + grsec_enable_chroot_execlog = 1;
50703 +#endif
50704 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50705 + grsec_enable_chroot_caps = 1;
50706 +#endif
50707 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
50708 + grsec_enable_chroot_sysctl = 1;
50709 +#endif
50710 +#ifdef CONFIG_GRKERNSEC_TPE
50711 + grsec_enable_tpe = 1;
50712 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
50713 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
50714 + grsec_enable_tpe_all = 1;
50715 +#endif
50716 +#endif
50717 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
50718 + grsec_enable_socket_all = 1;
50719 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
50720 +#endif
50721 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
50722 + grsec_enable_socket_client = 1;
50723 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
50724 +#endif
50725 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
50726 + grsec_enable_socket_server = 1;
50727 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
50728 +#endif
50729 +#endif
50730 +
50731 + return;
50732 +}
50733 diff -urNp linux-2.6.32.41/grsecurity/grsec_link.c linux-2.6.32.41/grsecurity/grsec_link.c
50734 --- linux-2.6.32.41/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
50735 +++ linux-2.6.32.41/grsecurity/grsec_link.c 2011-04-17 15:56:46.000000000 -0400
50736 @@ -0,0 +1,43 @@
50737 +#include <linux/kernel.h>
50738 +#include <linux/sched.h>
50739 +#include <linux/fs.h>
50740 +#include <linux/file.h>
50741 +#include <linux/grinternal.h>
50742 +
50743 +int
50744 +gr_handle_follow_link(const struct inode *parent,
50745 + const struct inode *inode,
50746 + const struct dentry *dentry, const struct vfsmount *mnt)
50747 +{
50748 +#ifdef CONFIG_GRKERNSEC_LINK
50749 + const struct cred *cred = current_cred();
50750 +
50751 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
50752 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
50753 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
50754 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
50755 + return -EACCES;
50756 + }
50757 +#endif
50758 + return 0;
50759 +}
50760 +
50761 +int
50762 +gr_handle_hardlink(const struct dentry *dentry,
50763 + const struct vfsmount *mnt,
50764 + struct inode *inode, const int mode, const char *to)
50765 +{
50766 +#ifdef CONFIG_GRKERNSEC_LINK
50767 + const struct cred *cred = current_cred();
50768 +
50769 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
50770 + (!S_ISREG(mode) || (mode & S_ISUID) ||
50771 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
50772 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
50773 + !capable(CAP_FOWNER) && cred->uid) {
50774 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
50775 + return -EPERM;
50776 + }
50777 +#endif
50778 + return 0;
50779 +}
50780 diff -urNp linux-2.6.32.41/grsecurity/grsec_log.c linux-2.6.32.41/grsecurity/grsec_log.c
50781 --- linux-2.6.32.41/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
50782 +++ linux-2.6.32.41/grsecurity/grsec_log.c 2011-05-10 21:58:49.000000000 -0400
50783 @@ -0,0 +1,310 @@
50784 +#include <linux/kernel.h>
50785 +#include <linux/sched.h>
50786 +#include <linux/file.h>
50787 +#include <linux/tty.h>
50788 +#include <linux/fs.h>
50789 +#include <linux/grinternal.h>
50790 +
50791 +#ifdef CONFIG_TREE_PREEMPT_RCU
50792 +#define DISABLE_PREEMPT() preempt_disable()
50793 +#define ENABLE_PREEMPT() preempt_enable()
50794 +#else
50795 +#define DISABLE_PREEMPT()
50796 +#define ENABLE_PREEMPT()
50797 +#endif
50798 +
50799 +#define BEGIN_LOCKS(x) \
50800 + DISABLE_PREEMPT(); \
50801 + rcu_read_lock(); \
50802 + read_lock(&tasklist_lock); \
50803 + read_lock(&grsec_exec_file_lock); \
50804 + if (x != GR_DO_AUDIT) \
50805 + spin_lock(&grsec_alert_lock); \
50806 + else \
50807 + spin_lock(&grsec_audit_lock)
50808 +
50809 +#define END_LOCKS(x) \
50810 + if (x != GR_DO_AUDIT) \
50811 + spin_unlock(&grsec_alert_lock); \
50812 + else \
50813 + spin_unlock(&grsec_audit_lock); \
50814 + read_unlock(&grsec_exec_file_lock); \
50815 + read_unlock(&tasklist_lock); \
50816 + rcu_read_unlock(); \
50817 + ENABLE_PREEMPT(); \
50818 + if (x == GR_DONT_AUDIT) \
50819 + gr_handle_alertkill(current)
50820 +
50821 +enum {
50822 + FLOODING,
50823 + NO_FLOODING
50824 +};
50825 +
50826 +extern char *gr_alert_log_fmt;
50827 +extern char *gr_audit_log_fmt;
50828 +extern char *gr_alert_log_buf;
50829 +extern char *gr_audit_log_buf;
50830 +
50831 +static int gr_log_start(int audit)
50832 +{
50833 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
50834 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
50835 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
50836 +
50837 + if (audit == GR_DO_AUDIT)
50838 + goto set_fmt;
50839 +
50840 + if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
50841 + grsec_alert_wtime = jiffies;
50842 + grsec_alert_fyet = 0;
50843 + } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
50844 + grsec_alert_fyet++;
50845 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
50846 + grsec_alert_wtime = jiffies;
50847 + grsec_alert_fyet++;
50848 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
50849 + return FLOODING;
50850 + } else return FLOODING;
50851 +
50852 +set_fmt:
50853 + memset(buf, 0, PAGE_SIZE);
50854 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
50855 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
50856 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
50857 + } else if (current->signal->curr_ip) {
50858 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
50859 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
50860 + } else if (gr_acl_is_enabled()) {
50861 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
50862 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
50863 + } else {
50864 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
50865 + strcpy(buf, fmt);
50866 + }
50867 +
50868 + return NO_FLOODING;
50869 +}
50870 +
50871 +static void gr_log_middle(int audit, const char *msg, va_list ap)
50872 + __attribute__ ((format (printf, 2, 0)));
50873 +
50874 +static void gr_log_middle(int audit, const char *msg, va_list ap)
50875 +{
50876 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
50877 + unsigned int len = strlen(buf);
50878 +
50879 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
50880 +
50881 + return;
50882 +}
50883 +
50884 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
50885 + __attribute__ ((format (printf, 2, 3)));
50886 +
50887 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
50888 +{
50889 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
50890 + unsigned int len = strlen(buf);
50891 + va_list ap;
50892 +
50893 + va_start(ap, msg);
50894 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
50895 + va_end(ap);
50896 +
50897 + return;
50898 +}
50899 +
50900 +static void gr_log_end(int audit)
50901 +{
50902 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
50903 + unsigned int len = strlen(buf);
50904 +
50905 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
50906 + printk("%s\n", buf);
50907 +
50908 + return;
50909 +}
50910 +
50911 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
50912 +{
50913 + int logtype;
50914 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
50915 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
50916 + void *voidptr = NULL;
50917 + int num1 = 0, num2 = 0;
50918 + unsigned long ulong1 = 0, ulong2 = 0;
50919 + struct dentry *dentry = NULL;
50920 + struct vfsmount *mnt = NULL;
50921 + struct file *file = NULL;
50922 + struct task_struct *task = NULL;
50923 + const struct cred *cred, *pcred;
50924 + va_list ap;
50925 +
50926 + BEGIN_LOCKS(audit);
50927 + logtype = gr_log_start(audit);
50928 + if (logtype == FLOODING) {
50929 + END_LOCKS(audit);
50930 + return;
50931 + }
50932 + va_start(ap, argtypes);
50933 + switch (argtypes) {
50934 + case GR_TTYSNIFF:
50935 + task = va_arg(ap, struct task_struct *);
50936 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
50937 + break;
50938 + case GR_SYSCTL_HIDDEN:
50939 + str1 = va_arg(ap, char *);
50940 + gr_log_middle_varargs(audit, msg, result, str1);
50941 + break;
50942 + case GR_RBAC:
50943 + dentry = va_arg(ap, struct dentry *);
50944 + mnt = va_arg(ap, struct vfsmount *);
50945 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
50946 + break;
50947 + case GR_RBAC_STR:
50948 + dentry = va_arg(ap, struct dentry *);
50949 + mnt = va_arg(ap, struct vfsmount *);
50950 + str1 = va_arg(ap, char *);
50951 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
50952 + break;
50953 + case GR_STR_RBAC:
50954 + str1 = va_arg(ap, char *);
50955 + dentry = va_arg(ap, struct dentry *);
50956 + mnt = va_arg(ap, struct vfsmount *);
50957 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
50958 + break;
50959 + case GR_RBAC_MODE2:
50960 + dentry = va_arg(ap, struct dentry *);
50961 + mnt = va_arg(ap, struct vfsmount *);
50962 + str1 = va_arg(ap, char *);
50963 + str2 = va_arg(ap, char *);
50964 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
50965 + break;
50966 + case GR_RBAC_MODE3:
50967 + dentry = va_arg(ap, struct dentry *);
50968 + mnt = va_arg(ap, struct vfsmount *);
50969 + str1 = va_arg(ap, char *);
50970 + str2 = va_arg(ap, char *);
50971 + str3 = va_arg(ap, char *);
50972 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
50973 + break;
50974 + case GR_FILENAME:
50975 + dentry = va_arg(ap, struct dentry *);
50976 + mnt = va_arg(ap, struct vfsmount *);
50977 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
50978 + break;
50979 + case GR_STR_FILENAME:
50980 + str1 = va_arg(ap, char *);
50981 + dentry = va_arg(ap, struct dentry *);
50982 + mnt = va_arg(ap, struct vfsmount *);
50983 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
50984 + break;
50985 + case GR_FILENAME_STR:
50986 + dentry = va_arg(ap, struct dentry *);
50987 + mnt = va_arg(ap, struct vfsmount *);
50988 + str1 = va_arg(ap, char *);
50989 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
50990 + break;
50991 + case GR_FILENAME_TWO_INT:
50992 + dentry = va_arg(ap, struct dentry *);
50993 + mnt = va_arg(ap, struct vfsmount *);
50994 + num1 = va_arg(ap, int);
50995 + num2 = va_arg(ap, int);
50996 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
50997 + break;
50998 + case GR_FILENAME_TWO_INT_STR:
50999 + dentry = va_arg(ap, struct dentry *);
51000 + mnt = va_arg(ap, struct vfsmount *);
51001 + num1 = va_arg(ap, int);
51002 + num2 = va_arg(ap, int);
51003 + str1 = va_arg(ap, char *);
51004 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
51005 + break;
51006 + case GR_TEXTREL:
51007 + file = va_arg(ap, struct file *);
51008 + ulong1 = va_arg(ap, unsigned long);
51009 + ulong2 = va_arg(ap, unsigned long);
51010 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
51011 + break;
51012 + case GR_PTRACE:
51013 + task = va_arg(ap, struct task_struct *);
51014 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
51015 + break;
51016 + case GR_RESOURCE:
51017 + task = va_arg(ap, struct task_struct *);
51018 + cred = __task_cred(task);
51019 + pcred = __task_cred(task->real_parent);
51020 + ulong1 = va_arg(ap, unsigned long);
51021 + str1 = va_arg(ap, char *);
51022 + ulong2 = va_arg(ap, unsigned long);
51023 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51024 + break;
51025 + case GR_CAP:
51026 + task = va_arg(ap, struct task_struct *);
51027 + cred = __task_cred(task);
51028 + pcred = __task_cred(task->real_parent);
51029 + str1 = va_arg(ap, char *);
51030 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51031 + break;
51032 + case GR_SIG:
51033 + str1 = va_arg(ap, char *);
51034 + voidptr = va_arg(ap, void *);
51035 + gr_log_middle_varargs(audit, msg, str1, voidptr);
51036 + break;
51037 + case GR_SIG2:
51038 + task = va_arg(ap, struct task_struct *);
51039 + cred = __task_cred(task);
51040 + pcred = __task_cred(task->real_parent);
51041 + num1 = va_arg(ap, int);
51042 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51043 + break;
51044 + case GR_CRASH1:
51045 + task = va_arg(ap, struct task_struct *);
51046 + cred = __task_cred(task);
51047 + pcred = __task_cred(task->real_parent);
51048 + ulong1 = va_arg(ap, unsigned long);
51049 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
51050 + break;
51051 + case GR_CRASH2:
51052 + task = va_arg(ap, struct task_struct *);
51053 + cred = __task_cred(task);
51054 + pcred = __task_cred(task->real_parent);
51055 + ulong1 = va_arg(ap, unsigned long);
51056 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
51057 + break;
51058 + case GR_RWXMAP:
51059 + file = va_arg(ap, struct file *);
51060 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
51061 + break;
51062 + case GR_PSACCT:
51063 + {
51064 + unsigned int wday, cday;
51065 + __u8 whr, chr;
51066 + __u8 wmin, cmin;
51067 + __u8 wsec, csec;
51068 + char cur_tty[64] = { 0 };
51069 + char parent_tty[64] = { 0 };
51070 +
51071 + task = va_arg(ap, struct task_struct *);
51072 + wday = va_arg(ap, unsigned int);
51073 + cday = va_arg(ap, unsigned int);
51074 + whr = va_arg(ap, int);
51075 + chr = va_arg(ap, int);
51076 + wmin = va_arg(ap, int);
51077 + cmin = va_arg(ap, int);
51078 + wsec = va_arg(ap, int);
51079 + csec = va_arg(ap, int);
51080 + ulong1 = va_arg(ap, unsigned long);
51081 + cred = __task_cred(task);
51082 + pcred = __task_cred(task->real_parent);
51083 +
51084 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51085 + }
51086 + break;
51087 + default:
51088 + gr_log_middle(audit, msg, ap);
51089 + }
51090 + va_end(ap);
51091 + gr_log_end(audit);
51092 + END_LOCKS(audit);
51093 +}
51094 diff -urNp linux-2.6.32.41/grsecurity/grsec_mem.c linux-2.6.32.41/grsecurity/grsec_mem.c
51095 --- linux-2.6.32.41/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
51096 +++ linux-2.6.32.41/grsecurity/grsec_mem.c 2011-04-17 15:56:46.000000000 -0400
51097 @@ -0,0 +1,33 @@
51098 +#include <linux/kernel.h>
51099 +#include <linux/sched.h>
51100 +#include <linux/mm.h>
51101 +#include <linux/mman.h>
51102 +#include <linux/grinternal.h>
51103 +
51104 +void
51105 +gr_handle_ioperm(void)
51106 +{
51107 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
51108 + return;
51109 +}
51110 +
51111 +void
51112 +gr_handle_iopl(void)
51113 +{
51114 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
51115 + return;
51116 +}
51117 +
51118 +void
51119 +gr_handle_mem_readwrite(u64 from, u64 to)
51120 +{
51121 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
51122 + return;
51123 +}
51124 +
51125 +void
51126 +gr_handle_vm86(void)
51127 +{
51128 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
51129 + return;
51130 +}
51131 diff -urNp linux-2.6.32.41/grsecurity/grsec_mount.c linux-2.6.32.41/grsecurity/grsec_mount.c
51132 --- linux-2.6.32.41/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
51133 +++ linux-2.6.32.41/grsecurity/grsec_mount.c 2011-04-17 15:56:46.000000000 -0400
51134 @@ -0,0 +1,62 @@
51135 +#include <linux/kernel.h>
51136 +#include <linux/sched.h>
51137 +#include <linux/mount.h>
51138 +#include <linux/grsecurity.h>
51139 +#include <linux/grinternal.h>
51140 +
51141 +void
51142 +gr_log_remount(const char *devname, const int retval)
51143 +{
51144 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51145 + if (grsec_enable_mount && (retval >= 0))
51146 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
51147 +#endif
51148 + return;
51149 +}
51150 +
51151 +void
51152 +gr_log_unmount(const char *devname, const int retval)
51153 +{
51154 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51155 + if (grsec_enable_mount && (retval >= 0))
51156 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
51157 +#endif
51158 + return;
51159 +}
51160 +
51161 +void
51162 +gr_log_mount(const char *from, const char *to, const int retval)
51163 +{
51164 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51165 + if (grsec_enable_mount && (retval >= 0))
51166 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from, to);
51167 +#endif
51168 + return;
51169 +}
51170 +
51171 +int
51172 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
51173 +{
51174 +#ifdef CONFIG_GRKERNSEC_ROFS
51175 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
51176 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
51177 + return -EPERM;
51178 + } else
51179 + return 0;
51180 +#endif
51181 + return 0;
51182 +}
51183 +
51184 +int
51185 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
51186 +{
51187 +#ifdef CONFIG_GRKERNSEC_ROFS
51188 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
51189 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
51190 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
51191 + return -EPERM;
51192 + } else
51193 + return 0;
51194 +#endif
51195 + return 0;
51196 +}
51197 diff -urNp linux-2.6.32.41/grsecurity/grsec_pax.c linux-2.6.32.41/grsecurity/grsec_pax.c
51198 --- linux-2.6.32.41/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
51199 +++ linux-2.6.32.41/grsecurity/grsec_pax.c 2011-04-17 15:56:46.000000000 -0400
51200 @@ -0,0 +1,36 @@
51201 +#include <linux/kernel.h>
51202 +#include <linux/sched.h>
51203 +#include <linux/mm.h>
51204 +#include <linux/file.h>
51205 +#include <linux/grinternal.h>
51206 +#include <linux/grsecurity.h>
51207 +
51208 +void
51209 +gr_log_textrel(struct vm_area_struct * vma)
51210 +{
51211 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
51212 + if (grsec_enable_audit_textrel)
51213 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
51214 +#endif
51215 + return;
51216 +}
51217 +
51218 +void
51219 +gr_log_rwxmmap(struct file *file)
51220 +{
51221 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
51222 + if (grsec_enable_log_rwxmaps)
51223 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
51224 +#endif
51225 + return;
51226 +}
51227 +
51228 +void
51229 +gr_log_rwxmprotect(struct file *file)
51230 +{
51231 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
51232 + if (grsec_enable_log_rwxmaps)
51233 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
51234 +#endif
51235 + return;
51236 +}
51237 diff -urNp linux-2.6.32.41/grsecurity/grsec_ptrace.c linux-2.6.32.41/grsecurity/grsec_ptrace.c
51238 --- linux-2.6.32.41/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
51239 +++ linux-2.6.32.41/grsecurity/grsec_ptrace.c 2011-04-17 15:56:46.000000000 -0400
51240 @@ -0,0 +1,14 @@
51241 +#include <linux/kernel.h>
51242 +#include <linux/sched.h>
51243 +#include <linux/grinternal.h>
51244 +#include <linux/grsecurity.h>
51245 +
51246 +void
51247 +gr_audit_ptrace(struct task_struct *task)
51248 +{
51249 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
51250 + if (grsec_enable_audit_ptrace)
51251 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
51252 +#endif
51253 + return;
51254 +}
51255 diff -urNp linux-2.6.32.41/grsecurity/grsec_sig.c linux-2.6.32.41/grsecurity/grsec_sig.c
51256 --- linux-2.6.32.41/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
51257 +++ linux-2.6.32.41/grsecurity/grsec_sig.c 2011-05-17 17:30:04.000000000 -0400
51258 @@ -0,0 +1,202 @@
51259 +#include <linux/kernel.h>
51260 +#include <linux/sched.h>
51261 +#include <linux/delay.h>
51262 +#include <linux/grsecurity.h>
51263 +#include <linux/grinternal.h>
51264 +#include <linux/hardirq.h>
51265 +
51266 +char *signames[] = {
51267 + [SIGSEGV] = "Segmentation fault",
51268 + [SIGILL] = "Illegal instruction",
51269 + [SIGABRT] = "Abort",
51270 + [SIGBUS] = "Invalid alignment/Bus error"
51271 +};
51272 +
51273 +void
51274 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
51275 +{
51276 +#ifdef CONFIG_GRKERNSEC_SIGNAL
51277 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
51278 + (sig == SIGABRT) || (sig == SIGBUS))) {
51279 + if (t->pid == current->pid) {
51280 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
51281 + } else {
51282 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
51283 + }
51284 + }
51285 +#endif
51286 + return;
51287 +}
51288 +
51289 +int
51290 +gr_handle_signal(const struct task_struct *p, const int sig)
51291 +{
51292 +#ifdef CONFIG_GRKERNSEC
51293 + if (current->pid > 1 && gr_check_protected_task(p)) {
51294 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
51295 + return -EPERM;
51296 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
51297 + return -EPERM;
51298 + }
51299 +#endif
51300 + return 0;
51301 +}
51302 +
51303 +#ifdef CONFIG_GRKERNSEC
51304 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
51305 +
51306 +int gr_fake_force_sig(int sig, struct task_struct *t)
51307 +{
51308 + unsigned long int flags;
51309 + int ret, blocked, ignored;
51310 + struct k_sigaction *action;
51311 +
51312 + spin_lock_irqsave(&t->sighand->siglock, flags);
51313 + action = &t->sighand->action[sig-1];
51314 + ignored = action->sa.sa_handler == SIG_IGN;
51315 + blocked = sigismember(&t->blocked, sig);
51316 + if (blocked || ignored) {
51317 + action->sa.sa_handler = SIG_DFL;
51318 + if (blocked) {
51319 + sigdelset(&t->blocked, sig);
51320 + recalc_sigpending_and_wake(t);
51321 + }
51322 + }
51323 + if (action->sa.sa_handler == SIG_DFL)
51324 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
51325 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
51326 +
51327 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
51328 +
51329 + return ret;
51330 +}
51331 +#endif
51332 +
51333 +#ifdef CONFIG_GRKERNSEC_BRUTE
51334 +#define GR_USER_BAN_TIME (15 * 60)
51335 +
51336 +static int __get_dumpable(unsigned long mm_flags)
51337 +{
51338 + int ret;
51339 +
51340 + ret = mm_flags & MMF_DUMPABLE_MASK;
51341 + return (ret >= 2) ? 2 : ret;
51342 +}
51343 +#endif
51344 +
51345 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
51346 +{
51347 +#ifdef CONFIG_GRKERNSEC_BRUTE
51348 + uid_t uid = 0;
51349 +
51350 + rcu_read_lock();
51351 + read_lock(&tasklist_lock);
51352 + read_lock(&grsec_exec_file_lock);
51353 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
51354 + p->real_parent->brute = 1;
51355 + else {
51356 + const struct cred *cred = __task_cred(p), *cred2;
51357 + struct task_struct *tsk, *tsk2;
51358 +
51359 + if (!__get_dumpable(mm_flags) && cred->uid) {
51360 + struct user_struct *user;
51361 +
51362 + uid = cred->uid;
51363 +
51364 + /* this is put upon execution past expiration */
51365 + user = find_user(uid);
51366 + if (user == NULL)
51367 + goto unlock;
51368 + user->banned = 1;
51369 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
51370 + if (user->ban_expires == ~0UL)
51371 + user->ban_expires--;
51372 +
51373 + do_each_thread(tsk2, tsk) {
51374 + cred2 = __task_cred(tsk);
51375 + if (tsk != p && cred2->uid == uid)
51376 + gr_fake_force_sig(SIGKILL, tsk);
51377 + } while_each_thread(tsk2, tsk);
51378 + }
51379 + }
51380 +unlock:
51381 + read_unlock(&grsec_exec_file_lock);
51382 + read_unlock(&tasklist_lock);
51383 + rcu_read_unlock();
51384 +
51385 + if (uid)
51386 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
51387 +#endif
51388 + return;
51389 +}
51390 +
51391 +void gr_handle_brute_check(void)
51392 +{
51393 +#ifdef CONFIG_GRKERNSEC_BRUTE
51394 + if (current->brute)
51395 + msleep(30 * 1000);
51396 +#endif
51397 + return;
51398 +}
51399 +
51400 +void gr_handle_kernel_exploit(void)
51401 +{
51402 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
51403 + const struct cred *cred;
51404 + struct task_struct *tsk, *tsk2;
51405 + struct user_struct *user;
51406 + uid_t uid;
51407 +
51408 + if (in_irq() || in_serving_softirq() || in_nmi())
51409 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
51410 +
51411 + uid = current_uid();
51412 +
51413 + if (uid == 0)
51414 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
51415 + else {
51416 + /* kill all the processes of this user, hold a reference
51417 + to their creds struct, and prevent them from creating
51418 + another process until system reset
51419 + */
51420 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
51421 + /* we intentionally leak this ref */
51422 + user = get_uid(current->cred->user);
51423 + if (user) {
51424 + user->banned = 1;
51425 + user->ban_expires = ~0UL;
51426 + }
51427 +
51428 + read_lock(&tasklist_lock);
51429 + do_each_thread(tsk2, tsk) {
51430 + cred = __task_cred(tsk);
51431 + if (cred->uid == uid)
51432 + gr_fake_force_sig(SIGKILL, tsk);
51433 + } while_each_thread(tsk2, tsk);
51434 + read_unlock(&tasklist_lock);
51435 + }
51436 +#endif
51437 +}
51438 +
51439 +int __gr_process_user_ban(struct user_struct *user)
51440 +{
51441 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
51442 + if (unlikely(user->banned)) {
51443 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
51444 + user->banned = 0;
51445 + user->ban_expires = 0;
51446 + free_uid(user);
51447 + } else
51448 + return -EPERM;
51449 + }
51450 +#endif
51451 + return 0;
51452 +}
51453 +
51454 +int gr_process_user_ban(void)
51455 +{
51456 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
51457 + return __gr_process_user_ban(current->cred->user);
51458 +#endif
51459 + return 0;
51460 +}
51461 diff -urNp linux-2.6.32.41/grsecurity/grsec_sock.c linux-2.6.32.41/grsecurity/grsec_sock.c
51462 --- linux-2.6.32.41/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
51463 +++ linux-2.6.32.41/grsecurity/grsec_sock.c 2011-04-17 15:56:46.000000000 -0400
51464 @@ -0,0 +1,275 @@
51465 +#include <linux/kernel.h>
51466 +#include <linux/module.h>
51467 +#include <linux/sched.h>
51468 +#include <linux/file.h>
51469 +#include <linux/net.h>
51470 +#include <linux/in.h>
51471 +#include <linux/ip.h>
51472 +#include <net/sock.h>
51473 +#include <net/inet_sock.h>
51474 +#include <linux/grsecurity.h>
51475 +#include <linux/grinternal.h>
51476 +#include <linux/gracl.h>
51477 +
51478 +kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
51479 +EXPORT_SYMBOL(gr_cap_rtnetlink);
51480 +
51481 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
51482 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
51483 +
51484 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
51485 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
51486 +
51487 +#ifdef CONFIG_UNIX_MODULE
51488 +EXPORT_SYMBOL(gr_acl_handle_unix);
51489 +EXPORT_SYMBOL(gr_acl_handle_mknod);
51490 +EXPORT_SYMBOL(gr_handle_chroot_unix);
51491 +EXPORT_SYMBOL(gr_handle_create);
51492 +#endif
51493 +
51494 +#ifdef CONFIG_GRKERNSEC
51495 +#define gr_conn_table_size 32749
51496 +struct conn_table_entry {
51497 + struct conn_table_entry *next;
51498 + struct signal_struct *sig;
51499 +};
51500 +
51501 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
51502 +DEFINE_SPINLOCK(gr_conn_table_lock);
51503 +
51504 +extern const char * gr_socktype_to_name(unsigned char type);
51505 +extern const char * gr_proto_to_name(unsigned char proto);
51506 +extern const char * gr_sockfamily_to_name(unsigned char family);
51507 +
51508 +static __inline__ int
51509 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
51510 +{
51511 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
51512 +}
51513 +
51514 +static __inline__ int
51515 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
51516 + __u16 sport, __u16 dport)
51517 +{
51518 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
51519 + sig->gr_sport == sport && sig->gr_dport == dport))
51520 + return 1;
51521 + else
51522 + return 0;
51523 +}
51524 +
51525 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
51526 +{
51527 + struct conn_table_entry **match;
51528 + unsigned int index;
51529 +
51530 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
51531 + sig->gr_sport, sig->gr_dport,
51532 + gr_conn_table_size);
51533 +
51534 + newent->sig = sig;
51535 +
51536 + match = &gr_conn_table[index];
51537 + newent->next = *match;
51538 + *match = newent;
51539 +
51540 + return;
51541 +}
51542 +
51543 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
51544 +{
51545 + struct conn_table_entry *match, *last = NULL;
51546 + unsigned int index;
51547 +
51548 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
51549 + sig->gr_sport, sig->gr_dport,
51550 + gr_conn_table_size);
51551 +
51552 + match = gr_conn_table[index];
51553 + while (match && !conn_match(match->sig,
51554 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
51555 + sig->gr_dport)) {
51556 + last = match;
51557 + match = match->next;
51558 + }
51559 +
51560 + if (match) {
51561 + if (last)
51562 + last->next = match->next;
51563 + else
51564 + gr_conn_table[index] = NULL;
51565 + kfree(match);
51566 + }
51567 +
51568 + return;
51569 +}
51570 +
51571 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
51572 + __u16 sport, __u16 dport)
51573 +{
51574 + struct conn_table_entry *match;
51575 + unsigned int index;
51576 +
51577 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
51578 +
51579 + match = gr_conn_table[index];
51580 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
51581 + match = match->next;
51582 +
51583 + if (match)
51584 + return match->sig;
51585 + else
51586 + return NULL;
51587 +}
51588 +
51589 +#endif
51590 +
51591 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
51592 +{
51593 +#ifdef CONFIG_GRKERNSEC
51594 + struct signal_struct *sig = task->signal;
51595 + struct conn_table_entry *newent;
51596 +
51597 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
51598 + if (newent == NULL)
51599 + return;
51600 + /* no bh lock needed since we are called with bh disabled */
51601 + spin_lock(&gr_conn_table_lock);
51602 + gr_del_task_from_ip_table_nolock(sig);
51603 + sig->gr_saddr = inet->rcv_saddr;
51604 + sig->gr_daddr = inet->daddr;
51605 + sig->gr_sport = inet->sport;
51606 + sig->gr_dport = inet->dport;
51607 + gr_add_to_task_ip_table_nolock(sig, newent);
51608 + spin_unlock(&gr_conn_table_lock);
51609 +#endif
51610 + return;
51611 +}
51612 +
51613 +void gr_del_task_from_ip_table(struct task_struct *task)
51614 +{
51615 +#ifdef CONFIG_GRKERNSEC
51616 + spin_lock_bh(&gr_conn_table_lock);
51617 + gr_del_task_from_ip_table_nolock(task->signal);
51618 + spin_unlock_bh(&gr_conn_table_lock);
51619 +#endif
51620 + return;
51621 +}
51622 +
51623 +void
51624 +gr_attach_curr_ip(const struct sock *sk)
51625 +{
51626 +#ifdef CONFIG_GRKERNSEC
51627 + struct signal_struct *p, *set;
51628 + const struct inet_sock *inet = inet_sk(sk);
51629 +
51630 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
51631 + return;
51632 +
51633 + set = current->signal;
51634 +
51635 + spin_lock_bh(&gr_conn_table_lock);
51636 + p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
51637 + inet->dport, inet->sport);
51638 + if (unlikely(p != NULL)) {
51639 + set->curr_ip = p->curr_ip;
51640 + set->used_accept = 1;
51641 + gr_del_task_from_ip_table_nolock(p);
51642 + spin_unlock_bh(&gr_conn_table_lock);
51643 + return;
51644 + }
51645 + spin_unlock_bh(&gr_conn_table_lock);
51646 +
51647 + set->curr_ip = inet->daddr;
51648 + set->used_accept = 1;
51649 +#endif
51650 + return;
51651 +}
51652 +
51653 +int
51654 +gr_handle_sock_all(const int family, const int type, const int protocol)
51655 +{
51656 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
51657 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
51658 + (family != AF_UNIX)) {
51659 + if (family == AF_INET)
51660 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
51661 + else
51662 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
51663 + return -EACCES;
51664 + }
51665 +#endif
51666 + return 0;
51667 +}
51668 +
51669 +int
51670 +gr_handle_sock_server(const struct sockaddr *sck)
51671 +{
51672 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
51673 + if (grsec_enable_socket_server &&
51674 + in_group_p(grsec_socket_server_gid) &&
51675 + sck && (sck->sa_family != AF_UNIX) &&
51676 + (sck->sa_family != AF_LOCAL)) {
51677 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
51678 + return -EACCES;
51679 + }
51680 +#endif
51681 + return 0;
51682 +}
51683 +
51684 +int
51685 +gr_handle_sock_server_other(const struct sock *sck)
51686 +{
51687 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
51688 + if (grsec_enable_socket_server &&
51689 + in_group_p(grsec_socket_server_gid) &&
51690 + sck && (sck->sk_family != AF_UNIX) &&
51691 + (sck->sk_family != AF_LOCAL)) {
51692 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
51693 + return -EACCES;
51694 + }
51695 +#endif
51696 + return 0;
51697 +}
51698 +
51699 +int
51700 +gr_handle_sock_client(const struct sockaddr *sck)
51701 +{
51702 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
51703 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
51704 + sck && (sck->sa_family != AF_UNIX) &&
51705 + (sck->sa_family != AF_LOCAL)) {
51706 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
51707 + return -EACCES;
51708 + }
51709 +#endif
51710 + return 0;
51711 +}
51712 +
51713 +kernel_cap_t
51714 +gr_cap_rtnetlink(struct sock *sock)
51715 +{
51716 +#ifdef CONFIG_GRKERNSEC
51717 + if (!gr_acl_is_enabled())
51718 + return current_cap();
51719 + else if (sock->sk_protocol == NETLINK_ISCSI &&
51720 + cap_raised(current_cap(), CAP_SYS_ADMIN) &&
51721 + gr_is_capable(CAP_SYS_ADMIN))
51722 + return current_cap();
51723 + else if (sock->sk_protocol == NETLINK_AUDIT &&
51724 + cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
51725 + gr_is_capable(CAP_AUDIT_WRITE) &&
51726 + cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
51727 + gr_is_capable(CAP_AUDIT_CONTROL))
51728 + return current_cap();
51729 + else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
51730 + ((sock->sk_protocol == NETLINK_ROUTE) ?
51731 + gr_is_capable_nolog(CAP_NET_ADMIN) :
51732 + gr_is_capable(CAP_NET_ADMIN)))
51733 + return current_cap();
51734 + else
51735 + return __cap_empty_set;
51736 +#else
51737 + return current_cap();
51738 +#endif
51739 +}
51740 diff -urNp linux-2.6.32.41/grsecurity/grsec_sysctl.c linux-2.6.32.41/grsecurity/grsec_sysctl.c
51741 --- linux-2.6.32.41/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
51742 +++ linux-2.6.32.41/grsecurity/grsec_sysctl.c 2011-04-17 15:56:46.000000000 -0400
51743 @@ -0,0 +1,479 @@
51744 +#include <linux/kernel.h>
51745 +#include <linux/sched.h>
51746 +#include <linux/sysctl.h>
51747 +#include <linux/grsecurity.h>
51748 +#include <linux/grinternal.h>
51749 +
51750 +int
51751 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
51752 +{
51753 +#ifdef CONFIG_GRKERNSEC_SYSCTL
51754 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
51755 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
51756 + return -EACCES;
51757 + }
51758 +#endif
51759 + return 0;
51760 +}
51761 +
51762 +#ifdef CONFIG_GRKERNSEC_ROFS
51763 +static int __maybe_unused one = 1;
51764 +#endif
51765 +
51766 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
51767 +ctl_table grsecurity_table[] = {
51768 +#ifdef CONFIG_GRKERNSEC_SYSCTL
51769 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
51770 +#ifdef CONFIG_GRKERNSEC_IO
51771 + {
51772 + .ctl_name = CTL_UNNUMBERED,
51773 + .procname = "disable_priv_io",
51774 + .data = &grsec_disable_privio,
51775 + .maxlen = sizeof(int),
51776 + .mode = 0600,
51777 + .proc_handler = &proc_dointvec,
51778 + },
51779 +#endif
51780 +#endif
51781 +#ifdef CONFIG_GRKERNSEC_LINK
51782 + {
51783 + .ctl_name = CTL_UNNUMBERED,
51784 + .procname = "linking_restrictions",
51785 + .data = &grsec_enable_link,
51786 + .maxlen = sizeof(int),
51787 + .mode = 0600,
51788 + .proc_handler = &proc_dointvec,
51789 + },
51790 +#endif
51791 +#ifdef CONFIG_GRKERNSEC_FIFO
51792 + {
51793 + .ctl_name = CTL_UNNUMBERED,
51794 + .procname = "fifo_restrictions",
51795 + .data = &grsec_enable_fifo,
51796 + .maxlen = sizeof(int),
51797 + .mode = 0600,
51798 + .proc_handler = &proc_dointvec,
51799 + },
51800 +#endif
51801 +#ifdef CONFIG_GRKERNSEC_EXECVE
51802 + {
51803 + .ctl_name = CTL_UNNUMBERED,
51804 + .procname = "execve_limiting",
51805 + .data = &grsec_enable_execve,
51806 + .maxlen = sizeof(int),
51807 + .mode = 0600,
51808 + .proc_handler = &proc_dointvec,
51809 + },
51810 +#endif
51811 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
51812 + {
51813 + .ctl_name = CTL_UNNUMBERED,
51814 + .procname = "ip_blackhole",
51815 + .data = &grsec_enable_blackhole,
51816 + .maxlen = sizeof(int),
51817 + .mode = 0600,
51818 + .proc_handler = &proc_dointvec,
51819 + },
51820 + {
51821 + .ctl_name = CTL_UNNUMBERED,
51822 + .procname = "lastack_retries",
51823 + .data = &grsec_lastack_retries,
51824 + .maxlen = sizeof(int),
51825 + .mode = 0600,
51826 + .proc_handler = &proc_dointvec,
51827 + },
51828 +#endif
51829 +#ifdef CONFIG_GRKERNSEC_EXECLOG
51830 + {
51831 + .ctl_name = CTL_UNNUMBERED,
51832 + .procname = "exec_logging",
51833 + .data = &grsec_enable_execlog,
51834 + .maxlen = sizeof(int),
51835 + .mode = 0600,
51836 + .proc_handler = &proc_dointvec,
51837 + },
51838 +#endif
51839 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
51840 + {
51841 + .ctl_name = CTL_UNNUMBERED,
51842 + .procname = "rwxmap_logging",
51843 + .data = &grsec_enable_log_rwxmaps,
51844 + .maxlen = sizeof(int),
51845 + .mode = 0600,
51846 + .proc_handler = &proc_dointvec,
51847 + },
51848 +#endif
51849 +#ifdef CONFIG_GRKERNSEC_SIGNAL
51850 + {
51851 + .ctl_name = CTL_UNNUMBERED,
51852 + .procname = "signal_logging",
51853 + .data = &grsec_enable_signal,
51854 + .maxlen = sizeof(int),
51855 + .mode = 0600,
51856 + .proc_handler = &proc_dointvec,
51857 + },
51858 +#endif
51859 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
51860 + {
51861 + .ctl_name = CTL_UNNUMBERED,
51862 + .procname = "forkfail_logging",
51863 + .data = &grsec_enable_forkfail,
51864 + .maxlen = sizeof(int),
51865 + .mode = 0600,
51866 + .proc_handler = &proc_dointvec,
51867 + },
51868 +#endif
51869 +#ifdef CONFIG_GRKERNSEC_TIME
51870 + {
51871 + .ctl_name = CTL_UNNUMBERED,
51872 + .procname = "timechange_logging",
51873 + .data = &grsec_enable_time,
51874 + .maxlen = sizeof(int),
51875 + .mode = 0600,
51876 + .proc_handler = &proc_dointvec,
51877 + },
51878 +#endif
51879 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
51880 + {
51881 + .ctl_name = CTL_UNNUMBERED,
51882 + .procname = "chroot_deny_shmat",
51883 + .data = &grsec_enable_chroot_shmat,
51884 + .maxlen = sizeof(int),
51885 + .mode = 0600,
51886 + .proc_handler = &proc_dointvec,
51887 + },
51888 +#endif
51889 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
51890 + {
51891 + .ctl_name = CTL_UNNUMBERED,
51892 + .procname = "chroot_deny_unix",
51893 + .data = &grsec_enable_chroot_unix,
51894 + .maxlen = sizeof(int),
51895 + .mode = 0600,
51896 + .proc_handler = &proc_dointvec,
51897 + },
51898 +#endif
51899 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
51900 + {
51901 + .ctl_name = CTL_UNNUMBERED,
51902 + .procname = "chroot_deny_mount",
51903 + .data = &grsec_enable_chroot_mount,
51904 + .maxlen = sizeof(int),
51905 + .mode = 0600,
51906 + .proc_handler = &proc_dointvec,
51907 + },
51908 +#endif
51909 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
51910 + {
51911 + .ctl_name = CTL_UNNUMBERED,
51912 + .procname = "chroot_deny_fchdir",
51913 + .data = &grsec_enable_chroot_fchdir,
51914 + .maxlen = sizeof(int),
51915 + .mode = 0600,
51916 + .proc_handler = &proc_dointvec,
51917 + },
51918 +#endif
51919 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
51920 + {
51921 + .ctl_name = CTL_UNNUMBERED,
51922 + .procname = "chroot_deny_chroot",
51923 + .data = &grsec_enable_chroot_double,
51924 + .maxlen = sizeof(int),
51925 + .mode = 0600,
51926 + .proc_handler = &proc_dointvec,
51927 + },
51928 +#endif
51929 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
51930 + {
51931 + .ctl_name = CTL_UNNUMBERED,
51932 + .procname = "chroot_deny_pivot",
51933 + .data = &grsec_enable_chroot_pivot,
51934 + .maxlen = sizeof(int),
51935 + .mode = 0600,
51936 + .proc_handler = &proc_dointvec,
51937 + },
51938 +#endif
51939 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
51940 + {
51941 + .ctl_name = CTL_UNNUMBERED,
51942 + .procname = "chroot_enforce_chdir",
51943 + .data = &grsec_enable_chroot_chdir,
51944 + .maxlen = sizeof(int),
51945 + .mode = 0600,
51946 + .proc_handler = &proc_dointvec,
51947 + },
51948 +#endif
51949 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
51950 + {
51951 + .ctl_name = CTL_UNNUMBERED,
51952 + .procname = "chroot_deny_chmod",
51953 + .data = &grsec_enable_chroot_chmod,
51954 + .maxlen = sizeof(int),
51955 + .mode = 0600,
51956 + .proc_handler = &proc_dointvec,
51957 + },
51958 +#endif
51959 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
51960 + {
51961 + .ctl_name = CTL_UNNUMBERED,
51962 + .procname = "chroot_deny_mknod",
51963 + .data = &grsec_enable_chroot_mknod,
51964 + .maxlen = sizeof(int),
51965 + .mode = 0600,
51966 + .proc_handler = &proc_dointvec,
51967 + },
51968 +#endif
51969 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
51970 + {
51971 + .ctl_name = CTL_UNNUMBERED,
51972 + .procname = "chroot_restrict_nice",
51973 + .data = &grsec_enable_chroot_nice,
51974 + .maxlen = sizeof(int),
51975 + .mode = 0600,
51976 + .proc_handler = &proc_dointvec,
51977 + },
51978 +#endif
51979 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
51980 + {
51981 + .ctl_name = CTL_UNNUMBERED,
51982 + .procname = "chroot_execlog",
51983 + .data = &grsec_enable_chroot_execlog,
51984 + .maxlen = sizeof(int),
51985 + .mode = 0600,
51986 + .proc_handler = &proc_dointvec,
51987 + },
51988 +#endif
51989 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
51990 + {
51991 + .ctl_name = CTL_UNNUMBERED,
51992 + .procname = "chroot_caps",
51993 + .data = &grsec_enable_chroot_caps,
51994 + .maxlen = sizeof(int),
51995 + .mode = 0600,
51996 + .proc_handler = &proc_dointvec,
51997 + },
51998 +#endif
51999 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
52000 + {
52001 + .ctl_name = CTL_UNNUMBERED,
52002 + .procname = "chroot_deny_sysctl",
52003 + .data = &grsec_enable_chroot_sysctl,
52004 + .maxlen = sizeof(int),
52005 + .mode = 0600,
52006 + .proc_handler = &proc_dointvec,
52007 + },
52008 +#endif
52009 +#ifdef CONFIG_GRKERNSEC_TPE
52010 + {
52011 + .ctl_name = CTL_UNNUMBERED,
52012 + .procname = "tpe",
52013 + .data = &grsec_enable_tpe,
52014 + .maxlen = sizeof(int),
52015 + .mode = 0600,
52016 + .proc_handler = &proc_dointvec,
52017 + },
52018 + {
52019 + .ctl_name = CTL_UNNUMBERED,
52020 + .procname = "tpe_gid",
52021 + .data = &grsec_tpe_gid,
52022 + .maxlen = sizeof(int),
52023 + .mode = 0600,
52024 + .proc_handler = &proc_dointvec,
52025 + },
52026 +#endif
52027 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
52028 + {
52029 + .ctl_name = CTL_UNNUMBERED,
52030 + .procname = "tpe_invert",
52031 + .data = &grsec_enable_tpe_invert,
52032 + .maxlen = sizeof(int),
52033 + .mode = 0600,
52034 + .proc_handler = &proc_dointvec,
52035 + },
52036 +#endif
52037 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
52038 + {
52039 + .ctl_name = CTL_UNNUMBERED,
52040 + .procname = "tpe_restrict_all",
52041 + .data = &grsec_enable_tpe_all,
52042 + .maxlen = sizeof(int),
52043 + .mode = 0600,
52044 + .proc_handler = &proc_dointvec,
52045 + },
52046 +#endif
52047 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
52048 + {
52049 + .ctl_name = CTL_UNNUMBERED,
52050 + .procname = "socket_all",
52051 + .data = &grsec_enable_socket_all,
52052 + .maxlen = sizeof(int),
52053 + .mode = 0600,
52054 + .proc_handler = &proc_dointvec,
52055 + },
52056 + {
52057 + .ctl_name = CTL_UNNUMBERED,
52058 + .procname = "socket_all_gid",
52059 + .data = &grsec_socket_all_gid,
52060 + .maxlen = sizeof(int),
52061 + .mode = 0600,
52062 + .proc_handler = &proc_dointvec,
52063 + },
52064 +#endif
52065 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
52066 + {
52067 + .ctl_name = CTL_UNNUMBERED,
52068 + .procname = "socket_client",
52069 + .data = &grsec_enable_socket_client,
52070 + .maxlen = sizeof(int),
52071 + .mode = 0600,
52072 + .proc_handler = &proc_dointvec,
52073 + },
52074 + {
52075 + .ctl_name = CTL_UNNUMBERED,
52076 + .procname = "socket_client_gid",
52077 + .data = &grsec_socket_client_gid,
52078 + .maxlen = sizeof(int),
52079 + .mode = 0600,
52080 + .proc_handler = &proc_dointvec,
52081 + },
52082 +#endif
52083 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
52084 + {
52085 + .ctl_name = CTL_UNNUMBERED,
52086 + .procname = "socket_server",
52087 + .data = &grsec_enable_socket_server,
52088 + .maxlen = sizeof(int),
52089 + .mode = 0600,
52090 + .proc_handler = &proc_dointvec,
52091 + },
52092 + {
52093 + .ctl_name = CTL_UNNUMBERED,
52094 + .procname = "socket_server_gid",
52095 + .data = &grsec_socket_server_gid,
52096 + .maxlen = sizeof(int),
52097 + .mode = 0600,
52098 + .proc_handler = &proc_dointvec,
52099 + },
52100 +#endif
52101 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
52102 + {
52103 + .ctl_name = CTL_UNNUMBERED,
52104 + .procname = "audit_group",
52105 + .data = &grsec_enable_group,
52106 + .maxlen = sizeof(int),
52107 + .mode = 0600,
52108 + .proc_handler = &proc_dointvec,
52109 + },
52110 + {
52111 + .ctl_name = CTL_UNNUMBERED,
52112 + .procname = "audit_gid",
52113 + .data = &grsec_audit_gid,
52114 + .maxlen = sizeof(int),
52115 + .mode = 0600,
52116 + .proc_handler = &proc_dointvec,
52117 + },
52118 +#endif
52119 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
52120 + {
52121 + .ctl_name = CTL_UNNUMBERED,
52122 + .procname = "audit_chdir",
52123 + .data = &grsec_enable_chdir,
52124 + .maxlen = sizeof(int),
52125 + .mode = 0600,
52126 + .proc_handler = &proc_dointvec,
52127 + },
52128 +#endif
52129 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
52130 + {
52131 + .ctl_name = CTL_UNNUMBERED,
52132 + .procname = "audit_mount",
52133 + .data = &grsec_enable_mount,
52134 + .maxlen = sizeof(int),
52135 + .mode = 0600,
52136 + .proc_handler = &proc_dointvec,
52137 + },
52138 +#endif
52139 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
52140 + {
52141 + .ctl_name = CTL_UNNUMBERED,
52142 + .procname = "audit_textrel",
52143 + .data = &grsec_enable_audit_textrel,
52144 + .maxlen = sizeof(int),
52145 + .mode = 0600,
52146 + .proc_handler = &proc_dointvec,
52147 + },
52148 +#endif
52149 +#ifdef CONFIG_GRKERNSEC_DMESG
52150 + {
52151 + .ctl_name = CTL_UNNUMBERED,
52152 + .procname = "dmesg",
52153 + .data = &grsec_enable_dmesg,
52154 + .maxlen = sizeof(int),
52155 + .mode = 0600,
52156 + .proc_handler = &proc_dointvec,
52157 + },
52158 +#endif
52159 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52160 + {
52161 + .ctl_name = CTL_UNNUMBERED,
52162 + .procname = "chroot_findtask",
52163 + .data = &grsec_enable_chroot_findtask,
52164 + .maxlen = sizeof(int),
52165 + .mode = 0600,
52166 + .proc_handler = &proc_dointvec,
52167 + },
52168 +#endif
52169 +#ifdef CONFIG_GRKERNSEC_RESLOG
52170 + {
52171 + .ctl_name = CTL_UNNUMBERED,
52172 + .procname = "resource_logging",
52173 + .data = &grsec_resource_logging,
52174 + .maxlen = sizeof(int),
52175 + .mode = 0600,
52176 + .proc_handler = &proc_dointvec,
52177 + },
52178 +#endif
52179 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
52180 + {
52181 + .ctl_name = CTL_UNNUMBERED,
52182 + .procname = "audit_ptrace",
52183 + .data = &grsec_enable_audit_ptrace,
52184 + .maxlen = sizeof(int),
52185 + .mode = 0600,
52186 + .proc_handler = &proc_dointvec,
52187 + },
52188 +#endif
52189 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
52190 + {
52191 + .ctl_name = CTL_UNNUMBERED,
52192 + .procname = "harden_ptrace",
52193 + .data = &grsec_enable_harden_ptrace,
52194 + .maxlen = sizeof(int),
52195 + .mode = 0600,
52196 + .proc_handler = &proc_dointvec,
52197 + },
52198 +#endif
52199 + {
52200 + .ctl_name = CTL_UNNUMBERED,
52201 + .procname = "grsec_lock",
52202 + .data = &grsec_lock,
52203 + .maxlen = sizeof(int),
52204 + .mode = 0600,
52205 + .proc_handler = &proc_dointvec,
52206 + },
52207 +#endif
52208 +#ifdef CONFIG_GRKERNSEC_ROFS
52209 + {
52210 + .ctl_name = CTL_UNNUMBERED,
52211 + .procname = "romount_protect",
52212 + .data = &grsec_enable_rofs,
52213 + .maxlen = sizeof(int),
52214 + .mode = 0600,
52215 + .proc_handler = &proc_dointvec_minmax,
52216 + .extra1 = &one,
52217 + .extra2 = &one,
52218 + },
52219 +#endif
52220 + { .ctl_name = 0 }
52221 +};
52222 +#endif
52223 diff -urNp linux-2.6.32.41/grsecurity/grsec_time.c linux-2.6.32.41/grsecurity/grsec_time.c
52224 --- linux-2.6.32.41/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
52225 +++ linux-2.6.32.41/grsecurity/grsec_time.c 2011-04-17 15:56:46.000000000 -0400
52226 @@ -0,0 +1,16 @@
52227 +#include <linux/kernel.h>
52228 +#include <linux/sched.h>
52229 +#include <linux/grinternal.h>
52230 +#include <linux/module.h>
52231 +
52232 +void
52233 +gr_log_timechange(void)
52234 +{
52235 +#ifdef CONFIG_GRKERNSEC_TIME
52236 + if (grsec_enable_time)
52237 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
52238 +#endif
52239 + return;
52240 +}
52241 +
52242 +EXPORT_SYMBOL(gr_log_timechange);
52243 diff -urNp linux-2.6.32.41/grsecurity/grsec_tpe.c linux-2.6.32.41/grsecurity/grsec_tpe.c
52244 --- linux-2.6.32.41/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
52245 +++ linux-2.6.32.41/grsecurity/grsec_tpe.c 2011-04-17 15:56:46.000000000 -0400
52246 @@ -0,0 +1,39 @@
52247 +#include <linux/kernel.h>
52248 +#include <linux/sched.h>
52249 +#include <linux/file.h>
52250 +#include <linux/fs.h>
52251 +#include <linux/grinternal.h>
52252 +
52253 +extern int gr_acl_tpe_check(void);
52254 +
52255 +int
52256 +gr_tpe_allow(const struct file *file)
52257 +{
52258 +#ifdef CONFIG_GRKERNSEC
52259 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
52260 + const struct cred *cred = current_cred();
52261 +
52262 + if (cred->uid && ((grsec_enable_tpe &&
52263 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
52264 + ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
52265 + (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
52266 +#else
52267 + in_group_p(grsec_tpe_gid)
52268 +#endif
52269 + ) || gr_acl_tpe_check()) &&
52270 + (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
52271 + (inode->i_mode & S_IWOTH))))) {
52272 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
52273 + return 0;
52274 + }
52275 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
52276 + if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
52277 + ((inode->i_uid && (inode->i_uid != cred->uid)) ||
52278 + (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
52279 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
52280 + return 0;
52281 + }
52282 +#endif
52283 +#endif
52284 + return 1;
52285 +}
52286 diff -urNp linux-2.6.32.41/grsecurity/grsum.c linux-2.6.32.41/grsecurity/grsum.c
52287 --- linux-2.6.32.41/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
52288 +++ linux-2.6.32.41/grsecurity/grsum.c 2011-04-17 15:56:46.000000000 -0400
52289 @@ -0,0 +1,61 @@
52290 +#include <linux/err.h>
52291 +#include <linux/kernel.h>
52292 +#include <linux/sched.h>
52293 +#include <linux/mm.h>
52294 +#include <linux/scatterlist.h>
52295 +#include <linux/crypto.h>
52296 +#include <linux/gracl.h>
52297 +
52298 +
52299 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
52300 +#error "crypto and sha256 must be built into the kernel"
52301 +#endif
52302 +
52303 +int
52304 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
52305 +{
52306 + char *p;
52307 + struct crypto_hash *tfm;
52308 + struct hash_desc desc;
52309 + struct scatterlist sg;
52310 + unsigned char temp_sum[GR_SHA_LEN];
52311 + volatile int retval = 0;
52312 + volatile int dummy = 0;
52313 + unsigned int i;
52314 +
52315 + sg_init_table(&sg, 1);
52316 +
52317 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
52318 + if (IS_ERR(tfm)) {
52319 + /* should never happen, since sha256 should be built in */
52320 + return 1;
52321 + }
52322 +
52323 + desc.tfm = tfm;
52324 + desc.flags = 0;
52325 +
52326 + crypto_hash_init(&desc);
52327 +
52328 + p = salt;
52329 + sg_set_buf(&sg, p, GR_SALT_LEN);
52330 + crypto_hash_update(&desc, &sg, sg.length);
52331 +
52332 + p = entry->pw;
52333 + sg_set_buf(&sg, p, strlen(p));
52334 +
52335 + crypto_hash_update(&desc, &sg, sg.length);
52336 +
52337 + crypto_hash_final(&desc, temp_sum);
52338 +
52339 + memset(entry->pw, 0, GR_PW_LEN);
52340 +
52341 + for (i = 0; i < GR_SHA_LEN; i++)
52342 + if (sum[i] != temp_sum[i])
52343 + retval = 1;
52344 + else
52345 + dummy = 1; // waste a cycle
52346 +
52347 + crypto_free_hash(tfm);
52348 +
52349 + return retval;
52350 +}
52351 diff -urNp linux-2.6.32.41/grsecurity/Kconfig linux-2.6.32.41/grsecurity/Kconfig
52352 --- linux-2.6.32.41/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
52353 +++ linux-2.6.32.41/grsecurity/Kconfig 2011-04-17 15:56:46.000000000 -0400
52354 @@ -0,0 +1,1045 @@
52355 +#
52356 +# grecurity configuration
52357 +#
52358 +
52359 +menu "Grsecurity"
52360 +
52361 +config GRKERNSEC
52362 + bool "Grsecurity"
52363 + select CRYPTO
52364 + select CRYPTO_SHA256
52365 + help
52366 + If you say Y here, you will be able to configure many features
52367 + that will enhance the security of your system. It is highly
52368 + recommended that you say Y here and read through the help
52369 + for each option so that you fully understand the features and
52370 + can evaluate their usefulness for your machine.
52371 +
52372 +choice
52373 + prompt "Security Level"
52374 + depends on GRKERNSEC
52375 + default GRKERNSEC_CUSTOM
52376 +
52377 +config GRKERNSEC_LOW
52378 + bool "Low"
52379 + select GRKERNSEC_LINK
52380 + select GRKERNSEC_FIFO
52381 + select GRKERNSEC_EXECVE
52382 + select GRKERNSEC_RANDNET
52383 + select GRKERNSEC_DMESG
52384 + select GRKERNSEC_CHROOT
52385 + select GRKERNSEC_CHROOT_CHDIR
52386 +
52387 + help
52388 + If you choose this option, several of the grsecurity options will
52389 + be enabled that will give you greater protection against a number
52390 + of attacks, while assuring that none of your software will have any
52391 + conflicts with the additional security measures. If you run a lot
52392 + of unusual software, or you are having problems with the higher
52393 + security levels, you should say Y here. With this option, the
52394 + following features are enabled:
52395 +
52396 + - Linking restrictions
52397 + - FIFO restrictions
52398 + - Enforcing RLIMIT_NPROC on execve
52399 + - Restricted dmesg
52400 + - Enforced chdir("/") on chroot
52401 + - Runtime module disabling
52402 +
52403 +config GRKERNSEC_MEDIUM
52404 + bool "Medium"
52405 + select PAX
52406 + select PAX_EI_PAX
52407 + select PAX_PT_PAX_FLAGS
52408 + select PAX_HAVE_ACL_FLAGS
52409 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
52410 + select GRKERNSEC_CHROOT
52411 + select GRKERNSEC_CHROOT_SYSCTL
52412 + select GRKERNSEC_LINK
52413 + select GRKERNSEC_FIFO
52414 + select GRKERNSEC_EXECVE
52415 + select GRKERNSEC_DMESG
52416 + select GRKERNSEC_RANDNET
52417 + select GRKERNSEC_FORKFAIL
52418 + select GRKERNSEC_TIME
52419 + select GRKERNSEC_SIGNAL
52420 + select GRKERNSEC_CHROOT
52421 + select GRKERNSEC_CHROOT_UNIX
52422 + select GRKERNSEC_CHROOT_MOUNT
52423 + select GRKERNSEC_CHROOT_PIVOT
52424 + select GRKERNSEC_CHROOT_DOUBLE
52425 + select GRKERNSEC_CHROOT_CHDIR
52426 + select GRKERNSEC_CHROOT_MKNOD
52427 + select GRKERNSEC_PROC
52428 + select GRKERNSEC_PROC_USERGROUP
52429 + select PAX_RANDUSTACK
52430 + select PAX_ASLR
52431 + select PAX_RANDMMAP
52432 + select PAX_REFCOUNT if (X86 || SPARC64)
52433 + select PAX_USERCOPY if ((X86 || SPARC32 || SPARC64 || PPC) && (SLAB || SLUB || SLOB))
52434 +
52435 + help
52436 + If you say Y here, several features in addition to those included
52437 + in the low additional security level will be enabled. These
52438 + features provide even more security to your system, though in rare
52439 + cases they may be incompatible with very old or poorly written
52440 + software. If you enable this option, make sure that your auth
52441 + service (identd) is running as gid 1001. With this option,
52442 + the following features (in addition to those provided in the
52443 + low additional security level) will be enabled:
52444 +
52445 + - Failed fork logging
52446 + - Time change logging
52447 + - Signal logging
52448 + - Deny mounts in chroot
52449 + - Deny double chrooting
52450 + - Deny sysctl writes in chroot
52451 + - Deny mknod in chroot
52452 + - Deny access to abstract AF_UNIX sockets out of chroot
52453 + - Deny pivot_root in chroot
52454 + - Denied writes of /dev/kmem, /dev/mem, and /dev/port
52455 + - /proc restrictions with special GID set to 10 (usually wheel)
52456 + - Address Space Layout Randomization (ASLR)
52457 + - Prevent exploitation of most refcount overflows
52458 + - Bounds checking of copying between the kernel and userland
52459 +
52460 +config GRKERNSEC_HIGH
52461 + bool "High"
52462 + select GRKERNSEC_LINK
52463 + select GRKERNSEC_FIFO
52464 + select GRKERNSEC_EXECVE
52465 + select GRKERNSEC_DMESG
52466 + select GRKERNSEC_FORKFAIL
52467 + select GRKERNSEC_TIME
52468 + select GRKERNSEC_SIGNAL
52469 + select GRKERNSEC_CHROOT
52470 + select GRKERNSEC_CHROOT_SHMAT
52471 + select GRKERNSEC_CHROOT_UNIX
52472 + select GRKERNSEC_CHROOT_MOUNT
52473 + select GRKERNSEC_CHROOT_FCHDIR
52474 + select GRKERNSEC_CHROOT_PIVOT
52475 + select GRKERNSEC_CHROOT_DOUBLE
52476 + select GRKERNSEC_CHROOT_CHDIR
52477 + select GRKERNSEC_CHROOT_MKNOD
52478 + select GRKERNSEC_CHROOT_CAPS
52479 + select GRKERNSEC_CHROOT_SYSCTL
52480 + select GRKERNSEC_CHROOT_FINDTASK
52481 + select GRKERNSEC_SYSFS_RESTRICT
52482 + select GRKERNSEC_PROC
52483 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
52484 + select GRKERNSEC_HIDESYM
52485 + select GRKERNSEC_BRUTE
52486 + select GRKERNSEC_PROC_USERGROUP
52487 + select GRKERNSEC_KMEM
52488 + select GRKERNSEC_RESLOG
52489 + select GRKERNSEC_RANDNET
52490 + select GRKERNSEC_PROC_ADD
52491 + select GRKERNSEC_CHROOT_CHMOD
52492 + select GRKERNSEC_CHROOT_NICE
52493 + select GRKERNSEC_AUDIT_MOUNT
52494 + select GRKERNSEC_MODHARDEN if (MODULES)
52495 + select GRKERNSEC_HARDEN_PTRACE
52496 + select GRKERNSEC_VM86 if (X86_32)
52497 + select GRKERNSEC_KERN_LOCKOUT if (X86)
52498 + select PAX
52499 + select PAX_RANDUSTACK
52500 + select PAX_ASLR
52501 + select PAX_RANDMMAP
52502 + select PAX_NOEXEC
52503 + select PAX_MPROTECT
52504 + select PAX_EI_PAX
52505 + select PAX_PT_PAX_FLAGS
52506 + select PAX_HAVE_ACL_FLAGS
52507 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
52508 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
52509 + select PAX_RANDKSTACK if (X86_TSC && X86)
52510 + select PAX_SEGMEXEC if (X86_32)
52511 + select PAX_PAGEEXEC
52512 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC32 || SPARC64)
52513 + select PAX_EMUTRAMP if (PARISC)
52514 + select PAX_EMUSIGRT if (PARISC)
52515 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
52516 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
52517 + select PAX_REFCOUNT if (X86 || SPARC64)
52518 + select PAX_USERCOPY if ((X86 || PPC || SPARC32 || SPARC64) && (SLAB || SLUB || SLOB))
52519 + help
52520 + If you say Y here, many of the features of grsecurity will be
52521 + enabled, which will protect you against many kinds of attacks
52522 + against your system. The heightened security comes at a cost
52523 + of an increased chance of incompatibilities with rare software
52524 + on your machine. Since this security level enables PaX, you should
52525 + view <http://pax.grsecurity.net> and read about the PaX
52526 + project. While you are there, download chpax and run it on
52527 + binaries that cause problems with PaX. Also remember that
52528 + since the /proc restrictions are enabled, you must run your
52529 + identd as gid 1001. This security level enables the following
52530 + features in addition to those listed in the low and medium
52531 + security levels:
52532 +
52533 + - Additional /proc restrictions
52534 + - Chmod restrictions in chroot
52535 + - No signals, ptrace, or viewing of processes outside of chroot
52536 + - Capability restrictions in chroot
52537 + - Deny fchdir out of chroot
52538 + - Priority restrictions in chroot
52539 + - Segmentation-based implementation of PaX
52540 + - Mprotect restrictions
52541 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
52542 + - Kernel stack randomization
52543 + - Mount/unmount/remount logging
52544 + - Kernel symbol hiding
52545 + - Prevention of memory exhaustion-based exploits
52546 + - Hardening of module auto-loading
52547 + - Ptrace restrictions
52548 + - Restricted vm86 mode
52549 + - Restricted sysfs/debugfs
52550 + - Active kernel exploit response
52551 +
52552 +config GRKERNSEC_CUSTOM
52553 + bool "Custom"
52554 + help
52555 + If you say Y here, you will be able to configure every grsecurity
52556 + option, which allows you to enable many more features that aren't
52557 + covered in the basic security levels. These additional features
52558 + include TPE, socket restrictions, and the sysctl system for
52559 + grsecurity. It is advised that you read through the help for
52560 + each option to determine its usefulness in your situation.
52561 +
52562 +endchoice
52563 +
52564 +menu "Address Space Protection"
52565 +depends on GRKERNSEC
52566 +
52567 +config GRKERNSEC_KMEM
52568 + bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
52569 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
52570 + help
52571 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
52572 + be written to via mmap or otherwise to modify the running kernel.
52573 + /dev/port will also not be allowed to be opened. If you have module
52574 + support disabled, enabling this will close up four ways that are
52575 + currently used to insert malicious code into the running kernel.
52576 + Even with all these features enabled, we still highly recommend that
52577 + you use the RBAC system, as it is still possible for an attacker to
52578 + modify the running kernel through privileged I/O granted by ioperm/iopl.
52579 + If you are not using XFree86, you may be able to stop this additional
52580 + case by enabling the 'Disable privileged I/O' option. Though nothing
52581 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
52582 + but only to video memory, which is the only writing we allow in this
52583 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
52584 + not be allowed to mprotect it with PROT_WRITE later.
52585 + It is highly recommended that you say Y here if you meet all the
52586 + conditions above.
52587 +
52588 +config GRKERNSEC_VM86
52589 + bool "Restrict VM86 mode"
52590 + depends on X86_32
52591 +
52592 + help
52593 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
52594 + make use of a special execution mode on 32bit x86 processors called
52595 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
52596 + video cards and will still work with this option enabled. The purpose
52597 + of the option is to prevent exploitation of emulation errors in
52598 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
52599 + Nearly all users should be able to enable this option.
52600 +
52601 +config GRKERNSEC_IO
52602 + bool "Disable privileged I/O"
52603 + depends on X86
52604 + select RTC_CLASS
52605 + select RTC_INTF_DEV
52606 + select RTC_DRV_CMOS
52607 +
52608 + help
52609 + If you say Y here, all ioperm and iopl calls will return an error.
52610 + Ioperm and iopl can be used to modify the running kernel.
52611 + Unfortunately, some programs need this access to operate properly,
52612 + the most notable of which are XFree86 and hwclock. hwclock can be
52613 + remedied by having RTC support in the kernel, so real-time
52614 + clock support is enabled if this option is enabled, to ensure
52615 + that hwclock operates correctly. XFree86 still will not
52616 + operate correctly with this option enabled, so DO NOT CHOOSE Y
52617 + IF YOU USE XFree86. If you use XFree86 and you still want to
52618 + protect your kernel against modification, use the RBAC system.
52619 +
52620 +config GRKERNSEC_PROC_MEMMAP
52621 + bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
52622 + default y if (PAX_NOEXEC || PAX_ASLR)
52623 + depends on PAX_NOEXEC || PAX_ASLR
52624 + help
52625 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
52626 + give no information about the addresses of its mappings if
52627 + PaX features that rely on random addresses are enabled on the task.
52628 + If you use PaX it is greatly recommended that you say Y here as it
52629 + closes up a hole that makes the full ASLR useless for suid
52630 + binaries.
52631 +
52632 +config GRKERNSEC_BRUTE
52633 + bool "Deter exploit bruteforcing"
52634 + help
52635 + If you say Y here, attempts to bruteforce exploits against forking
52636 + daemons such as apache or sshd, as well as against suid/sgid binaries
52637 + will be deterred. When a child of a forking daemon is killed by PaX
52638 + or crashes due to an illegal instruction or other suspicious signal,
52639 + the parent process will be delayed 30 seconds upon every subsequent
52640 + fork until the administrator is able to assess the situation and
52641 + restart the daemon.
52642 + In the suid/sgid case, the attempt is logged, the user has all their
52643 + processes terminated, and they are prevented from executing any further
52644 + processes for 15 minutes.
52645 + It is recommended that you also enable signal logging in the auditing
52646 + section so that logs are generated when a process triggers a suspicious
52647 + signal.
52648 +
52649 +config GRKERNSEC_MODHARDEN
52650 + bool "Harden module auto-loading"
52651 + depends on MODULES
52652 + help
52653 + If you say Y here, module auto-loading in response to use of some
52654 + feature implemented by an unloaded module will be restricted to
52655 + root users. Enabling this option helps defend against attacks
52656 + by unprivileged users who abuse the auto-loading behavior to
52657 + cause a vulnerable module to load that is then exploited.
52658 +
52659 + If this option prevents a legitimate use of auto-loading for a
52660 + non-root user, the administrator can execute modprobe manually
52661 + with the exact name of the module mentioned in the alert log.
52662 + Alternatively, the administrator can add the module to the list
52663 + of modules loaded at boot by modifying init scripts.
52664 +
52665 + Modification of init scripts will most likely be needed on
52666 + Ubuntu servers with encrypted home directory support enabled,
52667 + as the first non-root user logging in will cause the ecb(aes),
52668 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
52669 +
52670 +config GRKERNSEC_HIDESYM
52671 + bool "Hide kernel symbols"
52672 + help
52673 + If you say Y here, getting information on loaded modules, and
52674 + displaying all kernel symbols through a syscall will be restricted
52675 + to users with CAP_SYS_MODULE. For software compatibility reasons,
52676 + /proc/kallsyms will be restricted to the root user. The RBAC
52677 + system can hide that entry even from root.
52678 +
52679 + This option also prevents leaking of kernel addresses through
52680 + several /proc entries.
52681 +
52682 + Note that this option is only effective provided the following
52683 + conditions are met:
52684 + 1) The kernel using grsecurity is not precompiled by some distribution
52685 + 2) You have also enabled GRKERNSEC_DMESG
52686 + 3) You are using the RBAC system and hiding other files such as your
52687 + kernel image and System.map. Alternatively, enabling this option
52688 + causes the permissions on /boot, /lib/modules, and the kernel
52689 + source directory to change at compile time to prevent
52690 + reading by non-root users.
52691 + If the above conditions are met, this option will aid in providing a
52692 + useful protection against local kernel exploitation of overflows
52693 + and arbitrary read/write vulnerabilities.
52694 +
52695 +config GRKERNSEC_KERN_LOCKOUT
52696 + bool "Active kernel exploit response"
52697 + depends on X86
52698 + help
52699 + If you say Y here, when a PaX alert is triggered due to suspicious
52700 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
52701 + or an OOPs occurs due to bad memory accesses, instead of just
52702 + terminating the offending process (and potentially allowing
52703 + a subsequent exploit from the same user), we will take one of two
52704 + actions:
52705 + If the user was root, we will panic the system
52706 + If the user was non-root, we will log the attempt, terminate
52707 + all processes owned by the user, then prevent them from creating
52708 + any new processes until the system is restarted
52709 + This deters repeated kernel exploitation/bruteforcing attempts
52710 + and is useful for later forensics.
52711 +
52712 +endmenu
52713 +menu "Role Based Access Control Options"
52714 +depends on GRKERNSEC
52715 +
52716 +config GRKERNSEC_RBAC_DEBUG
52717 + bool
52718 +
52719 +config GRKERNSEC_NO_RBAC
52720 + bool "Disable RBAC system"
52721 + help
52722 + If you say Y here, the /dev/grsec device will be removed from the kernel,
52723 + preventing the RBAC system from being enabled. You should only say Y
52724 + here if you have no intention of using the RBAC system, so as to prevent
52725 + an attacker with root access from misusing the RBAC system to hide files
52726 + and processes when loadable module support and /dev/[k]mem have been
52727 + locked down.
52728 +
52729 +config GRKERNSEC_ACL_HIDEKERN
52730 + bool "Hide kernel processes"
52731 + help
52732 + If you say Y here, all kernel threads will be hidden to all
52733 + processes but those whose subject has the "view hidden processes"
52734 + flag.
52735 +
52736 +config GRKERNSEC_ACL_MAXTRIES
52737 + int "Maximum tries before password lockout"
52738 + default 3
52739 + help
52740 + This option enforces the maximum number of times a user can attempt
52741 + to authorize themselves with the grsecurity RBAC system before being
52742 + denied the ability to attempt authorization again for a specified time.
52743 + The lower the number, the harder it will be to brute-force a password.
52744 +
52745 +config GRKERNSEC_ACL_TIMEOUT
52746 + int "Time to wait after max password tries, in seconds"
52747 + default 30
52748 + help
52749 + This option specifies the time the user must wait after attempting to
52750 + authorize to the RBAC system with the maximum number of invalid
52751 + passwords. The higher the number, the harder it will be to brute-force
52752 + a password.
52753 +
52754 +endmenu
52755 +menu "Filesystem Protections"
52756 +depends on GRKERNSEC
52757 +
52758 +config GRKERNSEC_PROC
52759 + bool "Proc restrictions"
52760 + help
52761 + If you say Y here, the permissions of the /proc filesystem
52762 + will be altered to enhance system security and privacy. You MUST
52763 + choose either a user only restriction or a user and group restriction.
52764 + Depending upon the option you choose, you can either restrict users to
52765 + see only the processes they themselves run, or choose a group that can
52766 + view all processes and files normally restricted to root if you choose
52767 + the "restrict to user only" option. NOTE: If you're running identd as
52768 + a non-root user, you will have to run it as the group you specify here.
52769 +
52770 +config GRKERNSEC_PROC_USER
52771 + bool "Restrict /proc to user only"
52772 + depends on GRKERNSEC_PROC
52773 + help
52774 + If you say Y here, non-root users will only be able to view their own
52775 + processes, and restricts them from viewing network-related information,
52776 + and viewing kernel symbol and module information.
52777 +
52778 +config GRKERNSEC_PROC_USERGROUP
52779 + bool "Allow special group"
52780 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
52781 + help
52782 + If you say Y here, you will be able to select a group that will be
52783 + able to view all processes and network-related information. If you've
52784 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
52785 + remain hidden. This option is useful if you want to run identd as
52786 + a non-root user.
52787 +
52788 +config GRKERNSEC_PROC_GID
52789 + int "GID for special group"
52790 + depends on GRKERNSEC_PROC_USERGROUP
52791 + default 1001
52792 +
52793 +config GRKERNSEC_PROC_ADD
52794 + bool "Additional restrictions"
52795 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
52796 + help
52797 + If you say Y here, additional restrictions will be placed on
52798 + /proc that keep normal users from viewing device information and
52799 + slabinfo information that could be useful for exploits.
52800 +
52801 +config GRKERNSEC_LINK
52802 + bool "Linking restrictions"
52803 + help
52804 + If you say Y here, /tmp race exploits will be prevented, since users
52805 + will no longer be able to follow symlinks owned by other users in
52806 + world-writable +t directories (e.g. /tmp), unless the owner of the
52807 + symlink is the owner of the directory. users will also not be
52808 + able to hardlink to files they do not own. If the sysctl option is
52809 + enabled, a sysctl option with name "linking_restrictions" is created.
52810 +
52811 +config GRKERNSEC_FIFO
52812 + bool "FIFO restrictions"
52813 + help
52814 + If you say Y here, users will not be able to write to FIFOs they don't
52815 + own in world-writable +t directories (e.g. /tmp), unless the owner of
52816 + the FIFO is the same owner of the directory it's held in. If the sysctl
52817 + option is enabled, a sysctl option with name "fifo_restrictions" is
52818 + created.
52819 +
52820 +config GRKERNSEC_SYSFS_RESTRICT
52821 + bool "Sysfs/debugfs restriction"
52822 + depends on SYSFS
52823 + help
52824 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
52825 + any filesystem normally mounted under it (e.g. debugfs) will only
52826 + be accessible by root. These filesystems generally provide access
52827 + to hardware and debug information that isn't appropriate for unprivileged
52828 + users of the system. Sysfs and debugfs have also become a large source
52829 + of new vulnerabilities, ranging from infoleaks to local compromise.
52830 + There has been very little oversight with an eye toward security involved
52831 + in adding new exporters of information to these filesystems, so their
52832 + use is discouraged.
52833 + This option is equivalent to a chmod 0700 of the mount paths.
52834 +
52835 +config GRKERNSEC_ROFS
52836 + bool "Runtime read-only mount protection"
52837 + help
52838 + If you say Y here, a sysctl option with name "romount_protect" will
52839 + be created. By setting this option to 1 at runtime, filesystems
52840 + will be protected in the following ways:
52841 + * No new writable mounts will be allowed
52842 + * Existing read-only mounts won't be able to be remounted read/write
52843 + * Write operations will be denied on all block devices
52844 + This option acts independently of grsec_lock: once it is set to 1,
52845 + it cannot be turned off. Therefore, please be mindful of the resulting
52846 + behavior if this option is enabled in an init script on a read-only
52847 + filesystem. This feature is mainly intended for secure embedded systems.
52848 +
52849 +config GRKERNSEC_CHROOT
52850 + bool "Chroot jail restrictions"
52851 + help
52852 + If you say Y here, you will be able to choose several options that will
52853 + make breaking out of a chrooted jail much more difficult. If you
52854 + encounter no software incompatibilities with the following options, it
52855 + is recommended that you enable each one.
52856 +
52857 +config GRKERNSEC_CHROOT_MOUNT
52858 + bool "Deny mounts"
52859 + depends on GRKERNSEC_CHROOT
52860 + help
52861 + If you say Y here, processes inside a chroot will not be able to
52862 + mount or remount filesystems. If the sysctl option is enabled, a
52863 + sysctl option with name "chroot_deny_mount" is created.
52864 +
52865 +config GRKERNSEC_CHROOT_DOUBLE
52866 + bool "Deny double-chroots"
52867 + depends on GRKERNSEC_CHROOT
52868 + help
52869 + If you say Y here, processes inside a chroot will not be able to chroot
52870 + again outside the chroot. This is a widely used method of breaking
52871 + out of a chroot jail and should not be allowed. If the sysctl
52872 + option is enabled, a sysctl option with name
52873 + "chroot_deny_chroot" is created.
52874 +
52875 +config GRKERNSEC_CHROOT_PIVOT
52876 + bool "Deny pivot_root in chroot"
52877 + depends on GRKERNSEC_CHROOT
52878 + help
52879 + If you say Y here, processes inside a chroot will not be able to use
52880 + a function called pivot_root() that was introduced in Linux 2.3.41. It
52881 + works similar to chroot in that it changes the root filesystem. This
52882 + function could be misused in a chrooted process to attempt to break out
52883 + of the chroot, and therefore should not be allowed. If the sysctl
52884 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
52885 + created.
52886 +
52887 +config GRKERNSEC_CHROOT_CHDIR
52888 + bool "Enforce chdir(\"/\") on all chroots"
52889 + depends on GRKERNSEC_CHROOT
52890 + help
52891 + If you say Y here, the current working directory of all newly-chrooted
52892 + applications will be set to the the root directory of the chroot.
52893 + The man page on chroot(2) states:
52894 + Note that this call does not change the current working
52895 + directory, so that `.' can be outside the tree rooted at
52896 + `/'. In particular, the super-user can escape from a
52897 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
52898 +
52899 + It is recommended that you say Y here, since it's not known to break
52900 + any software. If the sysctl option is enabled, a sysctl option with
52901 + name "chroot_enforce_chdir" is created.
52902 +
52903 +config GRKERNSEC_CHROOT_CHMOD
52904 + bool "Deny (f)chmod +s"
52905 + depends on GRKERNSEC_CHROOT
52906 + help
52907 + If you say Y here, processes inside a chroot will not be able to chmod
52908 + or fchmod files to make them have suid or sgid bits. This protects
52909 + against another published method of breaking a chroot. If the sysctl
52910 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
52911 + created.
52912 +
52913 +config GRKERNSEC_CHROOT_FCHDIR
52914 + bool "Deny fchdir out of chroot"
52915 + depends on GRKERNSEC_CHROOT
52916 + help
52917 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
52918 + to a file descriptor of the chrooting process that points to a directory
52919 + outside the filesystem will be stopped. If the sysctl option
52920 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
52921 +
52922 +config GRKERNSEC_CHROOT_MKNOD
52923 + bool "Deny mknod"
52924 + depends on GRKERNSEC_CHROOT
52925 + help
52926 + If you say Y here, processes inside a chroot will not be allowed to
52927 + mknod. The problem with using mknod inside a chroot is that it
52928 + would allow an attacker to create a device entry that is the same
52929 + as one on the physical root of your system, which could range from
52930 + anything from the console device to a device for your harddrive (which
52931 + they could then use to wipe the drive or steal data). It is recommended
52932 + that you say Y here, unless you run into software incompatibilities.
52933 + If the sysctl option is enabled, a sysctl option with name
52934 + "chroot_deny_mknod" is created.
52935 +
52936 +config GRKERNSEC_CHROOT_SHMAT
52937 + bool "Deny shmat() out of chroot"
52938 + depends on GRKERNSEC_CHROOT
52939 + help
52940 + If you say Y here, processes inside a chroot will not be able to attach
52941 + to shared memory segments that were created outside of the chroot jail.
52942 + It is recommended that you say Y here. If the sysctl option is enabled,
52943 + a sysctl option with name "chroot_deny_shmat" is created.
52944 +
52945 +config GRKERNSEC_CHROOT_UNIX
52946 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
52947 + depends on GRKERNSEC_CHROOT
52948 + help
52949 + If you say Y here, processes inside a chroot will not be able to
52950 + connect to abstract (meaning not belonging to a filesystem) Unix
52951 + domain sockets that were bound outside of a chroot. It is recommended
52952 + that you say Y here. If the sysctl option is enabled, a sysctl option
52953 + with name "chroot_deny_unix" is created.
52954 +
52955 +config GRKERNSEC_CHROOT_FINDTASK
52956 + bool "Protect outside processes"
52957 + depends on GRKERNSEC_CHROOT
52958 + help
52959 + If you say Y here, processes inside a chroot will not be able to
52960 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
52961 + getsid, or view any process outside of the chroot. If the sysctl
52962 + option is enabled, a sysctl option with name "chroot_findtask" is
52963 + created.
52964 +
52965 +config GRKERNSEC_CHROOT_NICE
52966 + bool "Restrict priority changes"
52967 + depends on GRKERNSEC_CHROOT
52968 + help
52969 + If you say Y here, processes inside a chroot will not be able to raise
52970 + the priority of processes in the chroot, or alter the priority of
52971 + processes outside the chroot. This provides more security than simply
52972 + removing CAP_SYS_NICE from the process' capability set. If the
52973 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
52974 + is created.
52975 +
52976 +config GRKERNSEC_CHROOT_SYSCTL
52977 + bool "Deny sysctl writes"
52978 + depends on GRKERNSEC_CHROOT
52979 + help
52980 + If you say Y here, an attacker in a chroot will not be able to
52981 + write to sysctl entries, either by sysctl(2) or through a /proc
52982 + interface. It is strongly recommended that you say Y here. If the
52983 + sysctl option is enabled, a sysctl option with name
52984 + "chroot_deny_sysctl" is created.
52985 +
52986 +config GRKERNSEC_CHROOT_CAPS
52987 + bool "Capability restrictions"
52988 + depends on GRKERNSEC_CHROOT
52989 + help
52990 + If you say Y here, the capabilities on all root processes within a
52991 + chroot jail will be lowered to stop module insertion, raw i/o,
52992 + system and net admin tasks, rebooting the system, modifying immutable
52993 + files, modifying IPC owned by another, and changing the system time.
52994 + This is left an option because it can break some apps. Disable this
52995 + if your chrooted apps are having problems performing those kinds of
52996 + tasks. If the sysctl option is enabled, a sysctl option with
52997 + name "chroot_caps" is created.
52998 +
52999 +endmenu
53000 +menu "Kernel Auditing"
53001 +depends on GRKERNSEC
53002 +
53003 +config GRKERNSEC_AUDIT_GROUP
53004 + bool "Single group for auditing"
53005 + help
53006 + If you say Y here, the exec, chdir, and (un)mount logging features
53007 + will only operate on a group you specify. This option is recommended
53008 + if you only want to watch certain users instead of having a large
53009 + amount of logs from the entire system. If the sysctl option is enabled,
53010 + a sysctl option with name "audit_group" is created.
53011 +
53012 +config GRKERNSEC_AUDIT_GID
53013 + int "GID for auditing"
53014 + depends on GRKERNSEC_AUDIT_GROUP
53015 + default 1007
53016 +
53017 +config GRKERNSEC_EXECLOG
53018 + bool "Exec logging"
53019 + help
53020 + If you say Y here, all execve() calls will be logged (since the
53021 + other exec*() calls are frontends to execve(), all execution
53022 + will be logged). Useful for shell-servers that like to keep track
53023 + of their users. If the sysctl option is enabled, a sysctl option with
53024 + name "exec_logging" is created.
53025 + WARNING: This option when enabled will produce a LOT of logs, especially
53026 + on an active system.
53027 +
53028 +config GRKERNSEC_RESLOG
53029 + bool "Resource logging"
53030 + help
53031 + If you say Y here, all attempts to overstep resource limits will
53032 + be logged with the resource name, the requested size, and the current
53033 + limit. It is highly recommended that you say Y here. If the sysctl
53034 + option is enabled, a sysctl option with name "resource_logging" is
53035 + created. If the RBAC system is enabled, the sysctl value is ignored.
53036 +
53037 +config GRKERNSEC_CHROOT_EXECLOG
53038 + bool "Log execs within chroot"
53039 + help
53040 + If you say Y here, all executions inside a chroot jail will be logged
53041 + to syslog. This can cause a large amount of logs if certain
53042 + applications (eg. djb's daemontools) are installed on the system, and
53043 + is therefore left as an option. If the sysctl option is enabled, a
53044 + sysctl option with name "chroot_execlog" is created.
53045 +
53046 +config GRKERNSEC_AUDIT_PTRACE
53047 + bool "Ptrace logging"
53048 + help
53049 + If you say Y here, all attempts to attach to a process via ptrace
53050 + will be logged. If the sysctl option is enabled, a sysctl option
53051 + with name "audit_ptrace" is created.
53052 +
53053 +config GRKERNSEC_AUDIT_CHDIR
53054 + bool "Chdir logging"
53055 + help
53056 + If you say Y here, all chdir() calls will be logged. If the sysctl
53057 + option is enabled, a sysctl option with name "audit_chdir" is created.
53058 +
53059 +config GRKERNSEC_AUDIT_MOUNT
53060 + bool "(Un)Mount logging"
53061 + help
53062 + If you say Y here, all mounts and unmounts will be logged. If the
53063 + sysctl option is enabled, a sysctl option with name "audit_mount" is
53064 + created.
53065 +
53066 +config GRKERNSEC_SIGNAL
53067 + bool "Signal logging"
53068 + help
53069 + If you say Y here, certain important signals will be logged, such as
53070 + SIGSEGV, which will as a result inform you of when a error in a program
53071 + occurred, which in some cases could mean a possible exploit attempt.
53072 + If the sysctl option is enabled, a sysctl option with name
53073 + "signal_logging" is created.
53074 +
53075 +config GRKERNSEC_FORKFAIL
53076 + bool "Fork failure logging"
53077 + help
53078 + If you say Y here, all failed fork() attempts will be logged.
53079 + This could suggest a fork bomb, or someone attempting to overstep
53080 + their process limit. If the sysctl option is enabled, a sysctl option
53081 + with name "forkfail_logging" is created.
53082 +
53083 +config GRKERNSEC_TIME
53084 + bool "Time change logging"
53085 + help
53086 + If you say Y here, any changes of the system clock will be logged.
53087 + If the sysctl option is enabled, a sysctl option with name
53088 + "timechange_logging" is created.
53089 +
53090 +config GRKERNSEC_PROC_IPADDR
53091 + bool "/proc/<pid>/ipaddr support"
53092 + help
53093 + If you say Y here, a new entry will be added to each /proc/<pid>
53094 + directory that contains the IP address of the person using the task.
53095 + The IP is carried across local TCP and AF_UNIX stream sockets.
53096 + This information can be useful for IDS/IPSes to perform remote response
53097 + to a local attack. The entry is readable by only the owner of the
53098 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
53099 + the RBAC system), and thus does not create privacy concerns.
53100 +
53101 +config GRKERNSEC_RWXMAP_LOG
53102 + bool 'Denied RWX mmap/mprotect logging'
53103 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
53104 + help
53105 + If you say Y here, calls to mmap() and mprotect() with explicit
53106 + usage of PROT_WRITE and PROT_EXEC together will be logged when
53107 + denied by the PAX_MPROTECT feature. If the sysctl option is
53108 + enabled, a sysctl option with name "rwxmap_logging" is created.
53109 +
53110 +config GRKERNSEC_AUDIT_TEXTREL
53111 + bool 'ELF text relocations logging (READ HELP)'
53112 + depends on PAX_MPROTECT
53113 + help
53114 + If you say Y here, text relocations will be logged with the filename
53115 + of the offending library or binary. The purpose of the feature is
53116 + to help Linux distribution developers get rid of libraries and
53117 + binaries that need text relocations which hinder the future progress
53118 + of PaX. Only Linux distribution developers should say Y here, and
53119 + never on a production machine, as this option creates an information
53120 + leak that could aid an attacker in defeating the randomization of
53121 + a single memory region. If the sysctl option is enabled, a sysctl
53122 + option with name "audit_textrel" is created.
53123 +
53124 +endmenu
53125 +
53126 +menu "Executable Protections"
53127 +depends on GRKERNSEC
53128 +
53129 +config GRKERNSEC_EXECVE
53130 + bool "Enforce RLIMIT_NPROC on execs"
53131 + help
53132 + If you say Y here, users with a resource limit on processes will
53133 + have the value checked during execve() calls. The current system
53134 + only checks the system limit during fork() calls. If the sysctl option
53135 + is enabled, a sysctl option with name "execve_limiting" is created.
53136 +
53137 +config GRKERNSEC_DMESG
53138 + bool "Dmesg(8) restriction"
53139 + help
53140 + If you say Y here, non-root users will not be able to use dmesg(8)
53141 + to view up to the last 4kb of messages in the kernel's log buffer.
53142 + The kernel's log buffer often contains kernel addresses and other
53143 + identifying information useful to an attacker in fingerprinting a
53144 + system for a targeted exploit.
53145 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
53146 + created.
53147 +
53148 +config GRKERNSEC_HARDEN_PTRACE
53149 + bool "Deter ptrace-based process snooping"
53150 + help
53151 + If you say Y here, TTY sniffers and other malicious monitoring
53152 + programs implemented through ptrace will be defeated. If you
53153 + have been using the RBAC system, this option has already been
53154 + enabled for several years for all users, with the ability to make
53155 + fine-grained exceptions.
53156 +
53157 + This option only affects the ability of non-root users to ptrace
53158 + processes that are not a descendent of the ptracing process.
53159 + This means that strace ./binary and gdb ./binary will still work,
53160 + but attaching to arbitrary processes will not. If the sysctl
53161 + option is enabled, a sysctl option with name "harden_ptrace" is
53162 + created.
53163 +
53164 +config GRKERNSEC_TPE
53165 + bool "Trusted Path Execution (TPE)"
53166 + help
53167 + If you say Y here, you will be able to choose a gid to add to the
53168 + supplementary groups of users you want to mark as "untrusted."
53169 + These users will not be able to execute any files that are not in
53170 + root-owned directories writable only by root. If the sysctl option
53171 + is enabled, a sysctl option with name "tpe" is created.
53172 +
53173 +config GRKERNSEC_TPE_ALL
53174 + bool "Partially restrict all non-root users"
53175 + depends on GRKERNSEC_TPE
53176 + help
53177 + If you say Y here, all non-root users will be covered under
53178 + a weaker TPE restriction. This is separate from, and in addition to,
53179 + the main TPE options that you have selected elsewhere. Thus, if a
53180 + "trusted" GID is chosen, this restriction applies to even that GID.
53181 + Under this restriction, all non-root users will only be allowed to
53182 + execute files in directories they own that are not group or
53183 + world-writable, or in directories owned by root and writable only by
53184 + root. If the sysctl option is enabled, a sysctl option with name
53185 + "tpe_restrict_all" is created.
53186 +
53187 +config GRKERNSEC_TPE_INVERT
53188 + bool "Invert GID option"
53189 + depends on GRKERNSEC_TPE
53190 + help
53191 + If you say Y here, the group you specify in the TPE configuration will
53192 + decide what group TPE restrictions will be *disabled* for. This
53193 + option is useful if you want TPE restrictions to be applied to most
53194 + users on the system. If the sysctl option is enabled, a sysctl option
53195 + with name "tpe_invert" is created. Unlike other sysctl options, this
53196 + entry will default to on for backward-compatibility.
53197 +
53198 +config GRKERNSEC_TPE_GID
53199 + int "GID for untrusted users"
53200 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
53201 + default 1005
53202 + help
53203 + Setting this GID determines what group TPE restrictions will be
53204 + *enabled* for. If the sysctl option is enabled, a sysctl option
53205 + with name "tpe_gid" is created.
53206 +
53207 +config GRKERNSEC_TPE_GID
53208 + int "GID for trusted users"
53209 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
53210 + default 1005
53211 + help
53212 + Setting this GID determines what group TPE restrictions will be
53213 + *disabled* for. If the sysctl option is enabled, a sysctl option
53214 + with name "tpe_gid" is created.
53215 +
53216 +endmenu
53217 +menu "Network Protections"
53218 +depends on GRKERNSEC
53219 +
53220 +config GRKERNSEC_RANDNET
53221 + bool "Larger entropy pools"
53222 + help
53223 + If you say Y here, the entropy pools used for many features of Linux
53224 + and grsecurity will be doubled in size. Since several grsecurity
53225 + features use additional randomness, it is recommended that you say Y
53226 + here. Saying Y here has a similar effect as modifying
53227 + /proc/sys/kernel/random/poolsize.
53228 +
53229 +config GRKERNSEC_BLACKHOLE
53230 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
53231 + help
53232 + If you say Y here, neither TCP resets nor ICMP
53233 + destination-unreachable packets will be sent in response to packets
53234 + sent to ports for which no associated listening process exists.
53235 + This feature supports both IPV4 and IPV6 and exempts the
53236 + loopback interface from blackholing. Enabling this feature
53237 + makes a host more resilient to DoS attacks and reduces network
53238 + visibility against scanners.
53239 +
53240 + The blackhole feature as-implemented is equivalent to the FreeBSD
53241 + blackhole feature, as it prevents RST responses to all packets, not
53242 + just SYNs. Under most application behavior this causes no
53243 + problems, but applications (like haproxy) may not close certain
53244 + connections in a way that cleanly terminates them on the remote
53245 + end, leaving the remote host in LAST_ACK state. Because of this
53246 + side-effect and to prevent intentional LAST_ACK DoSes, this
53247 + feature also adds automatic mitigation against such attacks.
53248 + The mitigation drastically reduces the amount of time a socket
53249 + can spend in LAST_ACK state. If you're using haproxy and not
53250 + all servers it connects to have this option enabled, consider
53251 + disabling this feature on the haproxy host.
53252 +
53253 + If the sysctl option is enabled, two sysctl options with names
53254 + "ip_blackhole" and "lastack_retries" will be created.
53255 + While "ip_blackhole" takes the standard zero/non-zero on/off
53256 + toggle, "lastack_retries" uses the same kinds of values as
53257 + "tcp_retries1" and "tcp_retries2". The default value of 4
53258 + prevents a socket from lasting more than 45 seconds in LAST_ACK
53259 + state.
53260 +
53261 +config GRKERNSEC_SOCKET
53262 + bool "Socket restrictions"
53263 + help
53264 + If you say Y here, you will be able to choose from several options.
53265 + If you assign a GID on your system and add it to the supplementary
53266 + groups of users you want to restrict socket access to, this patch
53267 + will perform up to three things, based on the option(s) you choose.
53268 +
53269 +config GRKERNSEC_SOCKET_ALL
53270 + bool "Deny any sockets to group"
53271 + depends on GRKERNSEC_SOCKET
53272 + help
53273 + If you say Y here, you will be able to choose a GID of whose users will
53274 + be unable to connect to other hosts from your machine or run server
53275 + applications from your machine. If the sysctl option is enabled, a
53276 + sysctl option with name "socket_all" is created.
53277 +
53278 +config GRKERNSEC_SOCKET_ALL_GID
53279 + int "GID to deny all sockets for"
53280 + depends on GRKERNSEC_SOCKET_ALL
53281 + default 1004
53282 + help
53283 + Here you can choose the GID to disable socket access for. Remember to
53284 + add the users you want socket access disabled for to the GID
53285 + specified here. If the sysctl option is enabled, a sysctl option
53286 + with name "socket_all_gid" is created.
53287 +
53288 +config GRKERNSEC_SOCKET_CLIENT
53289 + bool "Deny client sockets to group"
53290 + depends on GRKERNSEC_SOCKET
53291 + help
53292 + If you say Y here, you will be able to choose a GID of whose users will
53293 + be unable to connect to other hosts from your machine, but will be
53294 + able to run servers. If this option is enabled, all users in the group
53295 + you specify will have to use passive mode when initiating ftp transfers
53296 + from the shell on your machine. If the sysctl option is enabled, a
53297 + sysctl option with name "socket_client" is created.
53298 +
53299 +config GRKERNSEC_SOCKET_CLIENT_GID
53300 + int "GID to deny client sockets for"
53301 + depends on GRKERNSEC_SOCKET_CLIENT
53302 + default 1003
53303 + help
53304 + Here you can choose the GID to disable client socket access for.
53305 + Remember to add the users you want client socket access disabled for to
53306 + the GID specified here. If the sysctl option is enabled, a sysctl
53307 + option with name "socket_client_gid" is created.
53308 +
53309 +config GRKERNSEC_SOCKET_SERVER
53310 + bool "Deny server sockets to group"
53311 + depends on GRKERNSEC_SOCKET
53312 + help
53313 + If you say Y here, you will be able to choose a GID of whose users will
53314 + be unable to run server applications from your machine. If the sysctl
53315 + option is enabled, a sysctl option with name "socket_server" is created.
53316 +
53317 +config GRKERNSEC_SOCKET_SERVER_GID
53318 + int "GID to deny server sockets for"
53319 + depends on GRKERNSEC_SOCKET_SERVER
53320 + default 1002
53321 + help
53322 + Here you can choose the GID to disable server socket access for.
53323 + Remember to add the users you want server socket access disabled for to
53324 + the GID specified here. If the sysctl option is enabled, a sysctl
53325 + option with name "socket_server_gid" is created.
53326 +
53327 +endmenu
53328 +menu "Sysctl support"
53329 +depends on GRKERNSEC && SYSCTL
53330 +
53331 +config GRKERNSEC_SYSCTL
53332 + bool "Sysctl support"
53333 + help
53334 + If you say Y here, you will be able to change the options that
53335 + grsecurity runs with at bootup, without having to recompile your
53336 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
53337 + to enable (1) or disable (0) various features. All the sysctl entries
53338 + are mutable until the "grsec_lock" entry is set to a non-zero value.
53339 + All features enabled in the kernel configuration are disabled at boot
53340 + if you do not say Y to the "Turn on features by default" option.
53341 + All options should be set at startup, and the grsec_lock entry should
53342 + be set to a non-zero value after all the options are set.
53343 + *THIS IS EXTREMELY IMPORTANT*
53344 +
53345 +config GRKERNSEC_SYSCTL_DISTRO
53346 + bool "Extra sysctl support for distro makers (READ HELP)"
53347 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
53348 + help
53349 + If you say Y here, additional sysctl options will be created
53350 + for features that affect processes running as root. Therefore,
53351 + it is critical when using this option that the grsec_lock entry be
53352 + enabled after boot. Only distros with prebuilt kernel packages
53353 + with this option enabled that can ensure grsec_lock is enabled
53354 + after boot should use this option.
53355 + *Failure to set grsec_lock after boot makes all grsec features
53356 + this option covers useless*
53357 +
53358 + Currently this option creates the following sysctl entries:
53359 + "Disable Privileged I/O": "disable_priv_io"
53360 +
53361 +config GRKERNSEC_SYSCTL_ON
53362 + bool "Turn on features by default"
53363 + depends on GRKERNSEC_SYSCTL
53364 + help
53365 + If you say Y here, instead of having all features enabled in the
53366 + kernel configuration disabled at boot time, the features will be
53367 + enabled at boot time. It is recommended you say Y here unless
53368 + there is some reason you would want all sysctl-tunable features to
53369 + be disabled by default. As mentioned elsewhere, it is important
53370 + to enable the grsec_lock entry once you have finished modifying
53371 + the sysctl entries.
53372 +
53373 +endmenu
53374 +menu "Logging Options"
53375 +depends on GRKERNSEC
53376 +
53377 +config GRKERNSEC_FLOODTIME
53378 + int "Seconds in between log messages (minimum)"
53379 + default 10
53380 + help
53381 + This option allows you to enforce the number of seconds between
53382 + grsecurity log messages. The default should be suitable for most
53383 + people, however, if you choose to change it, choose a value small enough
53384 + to allow informative logs to be produced, but large enough to
53385 + prevent flooding.
53386 +
53387 +config GRKERNSEC_FLOODBURST
53388 + int "Number of messages in a burst (maximum)"
53389 + default 4
53390 + help
53391 + This option allows you to choose the maximum number of messages allowed
53392 + within the flood time interval you chose in a separate option. The
53393 + default should be suitable for most people, however if you find that
53394 + many of your logs are being interpreted as flooding, you may want to
53395 + raise this value.
53396 +
53397 +endmenu
53398 +
53399 +endmenu
53400 diff -urNp linux-2.6.32.41/grsecurity/Makefile linux-2.6.32.41/grsecurity/Makefile
53401 --- linux-2.6.32.41/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
53402 +++ linux-2.6.32.41/grsecurity/Makefile 2011-05-24 20:27:46.000000000 -0400
53403 @@ -0,0 +1,33 @@
53404 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
53405 +# during 2001-2009 it has been completely redesigned by Brad Spengler
53406 +# into an RBAC system
53407 +#
53408 +# All code in this directory and various hooks inserted throughout the kernel
53409 +# are copyright Brad Spengler - Open Source Security, Inc., and released
53410 +# under the GPL v2 or higher
53411 +
53412 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
53413 + grsec_mount.o grsec_sig.o grsec_sock.o grsec_sysctl.o \
53414 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
53415 +
53416 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
53417 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
53418 + gracl_learn.o grsec_log.o
53419 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
53420 +
53421 +ifdef CONFIG_NET
53422 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
53423 +endif
53424 +
53425 +ifndef CONFIG_GRKERNSEC
53426 +obj-y += grsec_disabled.o
53427 +endif
53428 +
53429 +ifdef CONFIG_GRKERNSEC_HIDESYM
53430 +extra-y := grsec_hidesym.o
53431 +$(obj)/grsec_hidesym.o:
53432 + @-chmod -f 500 /boot
53433 + @-chmod -f 500 /lib/modules
53434 + @-chmod -f 700 .
53435 + @echo ' grsec: protected kernel image paths'
53436 +endif
53437 diff -urNp linux-2.6.32.41/include/acpi/acpi_drivers.h linux-2.6.32.41/include/acpi/acpi_drivers.h
53438 --- linux-2.6.32.41/include/acpi/acpi_drivers.h 2011-03-27 14:31:47.000000000 -0400
53439 +++ linux-2.6.32.41/include/acpi/acpi_drivers.h 2011-04-17 15:56:46.000000000 -0400
53440 @@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acp
53441 Dock Station
53442 -------------------------------------------------------------------------- */
53443 struct acpi_dock_ops {
53444 - acpi_notify_handler handler;
53445 - acpi_notify_handler uevent;
53446 + const acpi_notify_handler handler;
53447 + const acpi_notify_handler uevent;
53448 };
53449
53450 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
53451 @@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle ha
53452 extern int register_dock_notifier(struct notifier_block *nb);
53453 extern void unregister_dock_notifier(struct notifier_block *nb);
53454 extern int register_hotplug_dock_device(acpi_handle handle,
53455 - struct acpi_dock_ops *ops,
53456 + const struct acpi_dock_ops *ops,
53457 void *context);
53458 extern void unregister_hotplug_dock_device(acpi_handle handle);
53459 #else
53460 @@ -144,7 +144,7 @@ static inline void unregister_dock_notif
53461 {
53462 }
53463 static inline int register_hotplug_dock_device(acpi_handle handle,
53464 - struct acpi_dock_ops *ops,
53465 + const struct acpi_dock_ops *ops,
53466 void *context)
53467 {
53468 return -ENODEV;
53469 diff -urNp linux-2.6.32.41/include/asm-generic/atomic-long.h linux-2.6.32.41/include/asm-generic/atomic-long.h
53470 --- linux-2.6.32.41/include/asm-generic/atomic-long.h 2011-03-27 14:31:47.000000000 -0400
53471 +++ linux-2.6.32.41/include/asm-generic/atomic-long.h 2011-05-16 21:46:57.000000000 -0400
53472 @@ -22,6 +22,12 @@
53473
53474 typedef atomic64_t atomic_long_t;
53475
53476 +#ifdef CONFIG_PAX_REFCOUNT
53477 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
53478 +#else
53479 +typedef atomic64_t atomic_long_unchecked_t;
53480 +#endif
53481 +
53482 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
53483
53484 static inline long atomic_long_read(atomic_long_t *l)
53485 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
53486 return (long)atomic64_read(v);
53487 }
53488
53489 +#ifdef CONFIG_PAX_REFCOUNT
53490 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
53491 +{
53492 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53493 +
53494 + return (long)atomic64_read_unchecked(v);
53495 +}
53496 +#endif
53497 +
53498 static inline void atomic_long_set(atomic_long_t *l, long i)
53499 {
53500 atomic64_t *v = (atomic64_t *)l;
53501 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
53502 atomic64_set(v, i);
53503 }
53504
53505 +#ifdef CONFIG_PAX_REFCOUNT
53506 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
53507 +{
53508 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53509 +
53510 + atomic64_set_unchecked(v, i);
53511 +}
53512 +#endif
53513 +
53514 static inline void atomic_long_inc(atomic_long_t *l)
53515 {
53516 atomic64_t *v = (atomic64_t *)l;
53517 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
53518 atomic64_inc(v);
53519 }
53520
53521 +#ifdef CONFIG_PAX_REFCOUNT
53522 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
53523 +{
53524 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53525 +
53526 + atomic64_inc_unchecked(v);
53527 +}
53528 +#endif
53529 +
53530 static inline void atomic_long_dec(atomic_long_t *l)
53531 {
53532 atomic64_t *v = (atomic64_t *)l;
53533 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
53534 atomic64_dec(v);
53535 }
53536
53537 +#ifdef CONFIG_PAX_REFCOUNT
53538 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
53539 +{
53540 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53541 +
53542 + atomic64_dec_unchecked(v);
53543 +}
53544 +#endif
53545 +
53546 static inline void atomic_long_add(long i, atomic_long_t *l)
53547 {
53548 atomic64_t *v = (atomic64_t *)l;
53549 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long
53550 atomic64_add(i, v);
53551 }
53552
53553 +#ifdef CONFIG_PAX_REFCOUNT
53554 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
53555 +{
53556 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53557 +
53558 + atomic64_add_unchecked(i, v);
53559 +}
53560 +#endif
53561 +
53562 static inline void atomic_long_sub(long i, atomic_long_t *l)
53563 {
53564 atomic64_t *v = (atomic64_t *)l;
53565 @@ -115,6 +166,15 @@ static inline long atomic_long_inc_retur
53566 return (long)atomic64_inc_return(v);
53567 }
53568
53569 +#ifdef CONFIG_PAX_REFCOUNT
53570 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
53571 +{
53572 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53573 +
53574 + return (long)atomic64_inc_return_unchecked(v);
53575 +}
53576 +#endif
53577 +
53578 static inline long atomic_long_dec_return(atomic_long_t *l)
53579 {
53580 atomic64_t *v = (atomic64_t *)l;
53581 @@ -140,6 +200,12 @@ static inline long atomic_long_add_unles
53582
53583 typedef atomic_t atomic_long_t;
53584
53585 +#ifdef CONFIG_PAX_REFCOUNT
53586 +typedef atomic_unchecked_t atomic_long_unchecked_t;
53587 +#else
53588 +typedef atomic_t atomic_long_unchecked_t;
53589 +#endif
53590 +
53591 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
53592 static inline long atomic_long_read(atomic_long_t *l)
53593 {
53594 @@ -148,6 +214,15 @@ static inline long atomic_long_read(atom
53595 return (long)atomic_read(v);
53596 }
53597
53598 +#ifdef CONFIG_PAX_REFCOUNT
53599 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
53600 +{
53601 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53602 +
53603 + return (long)atomic_read_unchecked(v);
53604 +}
53605 +#endif
53606 +
53607 static inline void atomic_long_set(atomic_long_t *l, long i)
53608 {
53609 atomic_t *v = (atomic_t *)l;
53610 @@ -155,6 +230,15 @@ static inline void atomic_long_set(atomi
53611 atomic_set(v, i);
53612 }
53613
53614 +#ifdef CONFIG_PAX_REFCOUNT
53615 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
53616 +{
53617 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53618 +
53619 + atomic_set_unchecked(v, i);
53620 +}
53621 +#endif
53622 +
53623 static inline void atomic_long_inc(atomic_long_t *l)
53624 {
53625 atomic_t *v = (atomic_t *)l;
53626 @@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomi
53627 atomic_inc(v);
53628 }
53629
53630 +#ifdef CONFIG_PAX_REFCOUNT
53631 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
53632 +{
53633 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53634 +
53635 + atomic_inc_unchecked(v);
53636 +}
53637 +#endif
53638 +
53639 static inline void atomic_long_dec(atomic_long_t *l)
53640 {
53641 atomic_t *v = (atomic_t *)l;
53642 @@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomi
53643 atomic_dec(v);
53644 }
53645
53646 +#ifdef CONFIG_PAX_REFCOUNT
53647 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
53648 +{
53649 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53650 +
53651 + atomic_dec_unchecked(v);
53652 +}
53653 +#endif
53654 +
53655 static inline void atomic_long_add(long i, atomic_long_t *l)
53656 {
53657 atomic_t *v = (atomic_t *)l;
53658 @@ -176,6 +278,15 @@ static inline void atomic_long_add(long
53659 atomic_add(i, v);
53660 }
53661
53662 +#ifdef CONFIG_PAX_REFCOUNT
53663 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
53664 +{
53665 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53666 +
53667 + atomic_add_unchecked(i, v);
53668 +}
53669 +#endif
53670 +
53671 static inline void atomic_long_sub(long i, atomic_long_t *l)
53672 {
53673 atomic_t *v = (atomic_t *)l;
53674 @@ -232,6 +343,15 @@ static inline long atomic_long_inc_retur
53675 return (long)atomic_inc_return(v);
53676 }
53677
53678 +#ifdef CONFIG_PAX_REFCOUNT
53679 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
53680 +{
53681 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53682 +
53683 + return (long)atomic_inc_return_unchecked(v);
53684 +}
53685 +#endif
53686 +
53687 static inline long atomic_long_dec_return(atomic_long_t *l)
53688 {
53689 atomic_t *v = (atomic_t *)l;
53690 @@ -255,4 +375,47 @@ static inline long atomic_long_add_unles
53691
53692 #endif /* BITS_PER_LONG == 64 */
53693
53694 +#ifdef CONFIG_PAX_REFCOUNT
53695 +static inline void pax_refcount_needs_these_functions(void)
53696 +{
53697 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
53698 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
53699 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
53700 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
53701 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
53702 + atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
53703 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
53704 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
53705 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
53706 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
53707 + atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
53708 +
53709 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
53710 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
53711 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
53712 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
53713 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
53714 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
53715 +}
53716 +#else
53717 +#define atomic_read_unchecked(v) atomic_read(v)
53718 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
53719 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
53720 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
53721 +#define atomic_inc_unchecked(v) atomic_inc(v)
53722 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
53723 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
53724 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
53725 +#define atomic_dec_unchecked(v) atomic_dec(v)
53726 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
53727 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
53728 +
53729 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
53730 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
53731 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
53732 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
53733 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
53734 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
53735 +#endif
53736 +
53737 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
53738 diff -urNp linux-2.6.32.41/include/asm-generic/cache.h linux-2.6.32.41/include/asm-generic/cache.h
53739 --- linux-2.6.32.41/include/asm-generic/cache.h 2011-03-27 14:31:47.000000000 -0400
53740 +++ linux-2.6.32.41/include/asm-generic/cache.h 2011-05-04 17:56:28.000000000 -0400
53741 @@ -6,7 +6,7 @@
53742 * cache lines need to provide their own cache.h.
53743 */
53744
53745 -#define L1_CACHE_SHIFT 5
53746 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
53747 +#define L1_CACHE_SHIFT 5U
53748 +#define L1_CACHE_BYTES (1U << L1_CACHE_SHIFT)
53749
53750 #endif /* __ASM_GENERIC_CACHE_H */
53751 diff -urNp linux-2.6.32.41/include/asm-generic/dma-mapping-common.h linux-2.6.32.41/include/asm-generic/dma-mapping-common.h
53752 --- linux-2.6.32.41/include/asm-generic/dma-mapping-common.h 2011-03-27 14:31:47.000000000 -0400
53753 +++ linux-2.6.32.41/include/asm-generic/dma-mapping-common.h 2011-04-17 15:56:46.000000000 -0400
53754 @@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_
53755 enum dma_data_direction dir,
53756 struct dma_attrs *attrs)
53757 {
53758 - struct dma_map_ops *ops = get_dma_ops(dev);
53759 + const struct dma_map_ops *ops = get_dma_ops(dev);
53760 dma_addr_t addr;
53761
53762 kmemcheck_mark_initialized(ptr, size);
53763 @@ -30,7 +30,7 @@ static inline void dma_unmap_single_attr
53764 enum dma_data_direction dir,
53765 struct dma_attrs *attrs)
53766 {
53767 - struct dma_map_ops *ops = get_dma_ops(dev);
53768 + const struct dma_map_ops *ops = get_dma_ops(dev);
53769
53770 BUG_ON(!valid_dma_direction(dir));
53771 if (ops->unmap_page)
53772 @@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struc
53773 int nents, enum dma_data_direction dir,
53774 struct dma_attrs *attrs)
53775 {
53776 - struct dma_map_ops *ops = get_dma_ops(dev);
53777 + const struct dma_map_ops *ops = get_dma_ops(dev);
53778 int i, ents;
53779 struct scatterlist *s;
53780
53781 @@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(st
53782 int nents, enum dma_data_direction dir,
53783 struct dma_attrs *attrs)
53784 {
53785 - struct dma_map_ops *ops = get_dma_ops(dev);
53786 + const struct dma_map_ops *ops = get_dma_ops(dev);
53787
53788 BUG_ON(!valid_dma_direction(dir));
53789 debug_dma_unmap_sg(dev, sg, nents, dir);
53790 @@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(st
53791 size_t offset, size_t size,
53792 enum dma_data_direction dir)
53793 {
53794 - struct dma_map_ops *ops = get_dma_ops(dev);
53795 + const struct dma_map_ops *ops = get_dma_ops(dev);
53796 dma_addr_t addr;
53797
53798 kmemcheck_mark_initialized(page_address(page) + offset, size);
53799 @@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(st
53800 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
53801 size_t size, enum dma_data_direction dir)
53802 {
53803 - struct dma_map_ops *ops = get_dma_ops(dev);
53804 + const struct dma_map_ops *ops = get_dma_ops(dev);
53805
53806 BUG_ON(!valid_dma_direction(dir));
53807 if (ops->unmap_page)
53808 @@ -97,7 +97,7 @@ static inline void dma_sync_single_for_c
53809 size_t size,
53810 enum dma_data_direction dir)
53811 {
53812 - struct dma_map_ops *ops = get_dma_ops(dev);
53813 + const struct dma_map_ops *ops = get_dma_ops(dev);
53814
53815 BUG_ON(!valid_dma_direction(dir));
53816 if (ops->sync_single_for_cpu)
53817 @@ -109,7 +109,7 @@ static inline void dma_sync_single_for_d
53818 dma_addr_t addr, size_t size,
53819 enum dma_data_direction dir)
53820 {
53821 - struct dma_map_ops *ops = get_dma_ops(dev);
53822 + const struct dma_map_ops *ops = get_dma_ops(dev);
53823
53824 BUG_ON(!valid_dma_direction(dir));
53825 if (ops->sync_single_for_device)
53826 @@ -123,7 +123,7 @@ static inline void dma_sync_single_range
53827 size_t size,
53828 enum dma_data_direction dir)
53829 {
53830 - struct dma_map_ops *ops = get_dma_ops(dev);
53831 + const struct dma_map_ops *ops = get_dma_ops(dev);
53832
53833 BUG_ON(!valid_dma_direction(dir));
53834 if (ops->sync_single_range_for_cpu) {
53835 @@ -140,7 +140,7 @@ static inline void dma_sync_single_range
53836 size_t size,
53837 enum dma_data_direction dir)
53838 {
53839 - struct dma_map_ops *ops = get_dma_ops(dev);
53840 + const struct dma_map_ops *ops = get_dma_ops(dev);
53841
53842 BUG_ON(!valid_dma_direction(dir));
53843 if (ops->sync_single_range_for_device) {
53844 @@ -155,7 +155,7 @@ static inline void
53845 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
53846 int nelems, enum dma_data_direction dir)
53847 {
53848 - struct dma_map_ops *ops = get_dma_ops(dev);
53849 + const struct dma_map_ops *ops = get_dma_ops(dev);
53850
53851 BUG_ON(!valid_dma_direction(dir));
53852 if (ops->sync_sg_for_cpu)
53853 @@ -167,7 +167,7 @@ static inline void
53854 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
53855 int nelems, enum dma_data_direction dir)
53856 {
53857 - struct dma_map_ops *ops = get_dma_ops(dev);
53858 + const struct dma_map_ops *ops = get_dma_ops(dev);
53859
53860 BUG_ON(!valid_dma_direction(dir));
53861 if (ops->sync_sg_for_device)
53862 diff -urNp linux-2.6.32.41/include/asm-generic/futex.h linux-2.6.32.41/include/asm-generic/futex.h
53863 --- linux-2.6.32.41/include/asm-generic/futex.h 2011-03-27 14:31:47.000000000 -0400
53864 +++ linux-2.6.32.41/include/asm-generic/futex.h 2011-04-17 15:56:46.000000000 -0400
53865 @@ -6,7 +6,7 @@
53866 #include <asm/errno.h>
53867
53868 static inline int
53869 -futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
53870 +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
53871 {
53872 int op = (encoded_op >> 28) & 7;
53873 int cmp = (encoded_op >> 24) & 15;
53874 @@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op,
53875 }
53876
53877 static inline int
53878 -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
53879 +futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
53880 {
53881 return -ENOSYS;
53882 }
53883 diff -urNp linux-2.6.32.41/include/asm-generic/int-l64.h linux-2.6.32.41/include/asm-generic/int-l64.h
53884 --- linux-2.6.32.41/include/asm-generic/int-l64.h 2011-03-27 14:31:47.000000000 -0400
53885 +++ linux-2.6.32.41/include/asm-generic/int-l64.h 2011-04-17 15:56:46.000000000 -0400
53886 @@ -46,6 +46,8 @@ typedef unsigned int u32;
53887 typedef signed long s64;
53888 typedef unsigned long u64;
53889
53890 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
53891 +
53892 #define S8_C(x) x
53893 #define U8_C(x) x ## U
53894 #define S16_C(x) x
53895 diff -urNp linux-2.6.32.41/include/asm-generic/int-ll64.h linux-2.6.32.41/include/asm-generic/int-ll64.h
53896 --- linux-2.6.32.41/include/asm-generic/int-ll64.h 2011-03-27 14:31:47.000000000 -0400
53897 +++ linux-2.6.32.41/include/asm-generic/int-ll64.h 2011-04-17 15:56:46.000000000 -0400
53898 @@ -51,6 +51,8 @@ typedef unsigned int u32;
53899 typedef signed long long s64;
53900 typedef unsigned long long u64;
53901
53902 +typedef unsigned long long intoverflow_t;
53903 +
53904 #define S8_C(x) x
53905 #define U8_C(x) x ## U
53906 #define S16_C(x) x
53907 diff -urNp linux-2.6.32.41/include/asm-generic/kmap_types.h linux-2.6.32.41/include/asm-generic/kmap_types.h
53908 --- linux-2.6.32.41/include/asm-generic/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
53909 +++ linux-2.6.32.41/include/asm-generic/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
53910 @@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
53911 KMAP_D(16) KM_IRQ_PTE,
53912 KMAP_D(17) KM_NMI,
53913 KMAP_D(18) KM_NMI_PTE,
53914 -KMAP_D(19) KM_TYPE_NR
53915 +KMAP_D(19) KM_CLEARPAGE,
53916 +KMAP_D(20) KM_TYPE_NR
53917 };
53918
53919 #undef KMAP_D
53920 diff -urNp linux-2.6.32.41/include/asm-generic/pgtable.h linux-2.6.32.41/include/asm-generic/pgtable.h
53921 --- linux-2.6.32.41/include/asm-generic/pgtable.h 2011-03-27 14:31:47.000000000 -0400
53922 +++ linux-2.6.32.41/include/asm-generic/pgtable.h 2011-04-17 15:56:46.000000000 -0400
53923 @@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_ar
53924 unsigned long size);
53925 #endif
53926
53927 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
53928 +static inline unsigned long pax_open_kernel(void) { return 0; }
53929 +#endif
53930 +
53931 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
53932 +static inline unsigned long pax_close_kernel(void) { return 0; }
53933 +#endif
53934 +
53935 #endif /* !__ASSEMBLY__ */
53936
53937 #endif /* _ASM_GENERIC_PGTABLE_H */
53938 diff -urNp linux-2.6.32.41/include/asm-generic/pgtable-nopmd.h linux-2.6.32.41/include/asm-generic/pgtable-nopmd.h
53939 --- linux-2.6.32.41/include/asm-generic/pgtable-nopmd.h 2011-03-27 14:31:47.000000000 -0400
53940 +++ linux-2.6.32.41/include/asm-generic/pgtable-nopmd.h 2011-04-17 15:56:46.000000000 -0400
53941 @@ -1,14 +1,19 @@
53942 #ifndef _PGTABLE_NOPMD_H
53943 #define _PGTABLE_NOPMD_H
53944
53945 -#ifndef __ASSEMBLY__
53946 -
53947 #include <asm-generic/pgtable-nopud.h>
53948
53949 -struct mm_struct;
53950 -
53951 #define __PAGETABLE_PMD_FOLDED
53952
53953 +#define PMD_SHIFT PUD_SHIFT
53954 +#define PTRS_PER_PMD 1
53955 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
53956 +#define PMD_MASK (~(PMD_SIZE-1))
53957 +
53958 +#ifndef __ASSEMBLY__
53959 +
53960 +struct mm_struct;
53961 +
53962 /*
53963 * Having the pmd type consist of a pud gets the size right, and allows
53964 * us to conceptually access the pud entry that this pmd is folded into
53965 @@ -16,11 +21,6 @@ struct mm_struct;
53966 */
53967 typedef struct { pud_t pud; } pmd_t;
53968
53969 -#define PMD_SHIFT PUD_SHIFT
53970 -#define PTRS_PER_PMD 1
53971 -#define PMD_SIZE (1UL << PMD_SHIFT)
53972 -#define PMD_MASK (~(PMD_SIZE-1))
53973 -
53974 /*
53975 * The "pud_xxx()" functions here are trivial for a folded two-level
53976 * setup: the pmd is never bad, and a pmd always exists (as it's folded
53977 diff -urNp linux-2.6.32.41/include/asm-generic/pgtable-nopud.h linux-2.6.32.41/include/asm-generic/pgtable-nopud.h
53978 --- linux-2.6.32.41/include/asm-generic/pgtable-nopud.h 2011-03-27 14:31:47.000000000 -0400
53979 +++ linux-2.6.32.41/include/asm-generic/pgtable-nopud.h 2011-04-17 15:56:46.000000000 -0400
53980 @@ -1,10 +1,15 @@
53981 #ifndef _PGTABLE_NOPUD_H
53982 #define _PGTABLE_NOPUD_H
53983
53984 -#ifndef __ASSEMBLY__
53985 -
53986 #define __PAGETABLE_PUD_FOLDED
53987
53988 +#define PUD_SHIFT PGDIR_SHIFT
53989 +#define PTRS_PER_PUD 1
53990 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
53991 +#define PUD_MASK (~(PUD_SIZE-1))
53992 +
53993 +#ifndef __ASSEMBLY__
53994 +
53995 /*
53996 * Having the pud type consist of a pgd gets the size right, and allows
53997 * us to conceptually access the pgd entry that this pud is folded into
53998 @@ -12,11 +17,6 @@
53999 */
54000 typedef struct { pgd_t pgd; } pud_t;
54001
54002 -#define PUD_SHIFT PGDIR_SHIFT
54003 -#define PTRS_PER_PUD 1
54004 -#define PUD_SIZE (1UL << PUD_SHIFT)
54005 -#define PUD_MASK (~(PUD_SIZE-1))
54006 -
54007 /*
54008 * The "pgd_xxx()" functions here are trivial for a folded two-level
54009 * setup: the pud is never bad, and a pud always exists (as it's folded
54010 diff -urNp linux-2.6.32.41/include/asm-generic/vmlinux.lds.h linux-2.6.32.41/include/asm-generic/vmlinux.lds.h
54011 --- linux-2.6.32.41/include/asm-generic/vmlinux.lds.h 2011-03-27 14:31:47.000000000 -0400
54012 +++ linux-2.6.32.41/include/asm-generic/vmlinux.lds.h 2011-04-17 15:56:46.000000000 -0400
54013 @@ -199,6 +199,7 @@
54014 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
54015 VMLINUX_SYMBOL(__start_rodata) = .; \
54016 *(.rodata) *(.rodata.*) \
54017 + *(.data.read_only) \
54018 *(__vermagic) /* Kernel version magic */ \
54019 *(__markers_strings) /* Markers: strings */ \
54020 *(__tracepoints_strings)/* Tracepoints: strings */ \
54021 @@ -656,22 +657,24 @@
54022 * section in the linker script will go there too. @phdr should have
54023 * a leading colon.
54024 *
54025 - * Note that this macros defines __per_cpu_load as an absolute symbol.
54026 + * Note that this macros defines per_cpu_load as an absolute symbol.
54027 * If there is no need to put the percpu section at a predetermined
54028 * address, use PERCPU().
54029 */
54030 #define PERCPU_VADDR(vaddr, phdr) \
54031 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
54032 - .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
54033 + per_cpu_load = .; \
54034 + .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
54035 - LOAD_OFFSET) { \
54036 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
54037 VMLINUX_SYMBOL(__per_cpu_start) = .; \
54038 *(.data.percpu.first) \
54039 - *(.data.percpu.page_aligned) \
54040 *(.data.percpu) \
54041 + . = ALIGN(PAGE_SIZE); \
54042 + *(.data.percpu.page_aligned) \
54043 *(.data.percpu.shared_aligned) \
54044 VMLINUX_SYMBOL(__per_cpu_end) = .; \
54045 } phdr \
54046 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
54047 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
54048
54049 /**
54050 * PERCPU - define output section for percpu area, simple version
54051 diff -urNp linux-2.6.32.41/include/drm/drmP.h linux-2.6.32.41/include/drm/drmP.h
54052 --- linux-2.6.32.41/include/drm/drmP.h 2011-03-27 14:31:47.000000000 -0400
54053 +++ linux-2.6.32.41/include/drm/drmP.h 2011-04-17 15:56:46.000000000 -0400
54054 @@ -71,6 +71,7 @@
54055 #include <linux/workqueue.h>
54056 #include <linux/poll.h>
54057 #include <asm/pgalloc.h>
54058 +#include <asm/local.h>
54059 #include "drm.h"
54060
54061 #include <linux/idr.h>
54062 @@ -814,7 +815,7 @@ struct drm_driver {
54063 void (*vgaarb_irq)(struct drm_device *dev, bool state);
54064
54065 /* Driver private ops for this object */
54066 - struct vm_operations_struct *gem_vm_ops;
54067 + const struct vm_operations_struct *gem_vm_ops;
54068
54069 int major;
54070 int minor;
54071 @@ -917,7 +918,7 @@ struct drm_device {
54072
54073 /** \name Usage Counters */
54074 /*@{ */
54075 - int open_count; /**< Outstanding files open */
54076 + local_t open_count; /**< Outstanding files open */
54077 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
54078 atomic_t vma_count; /**< Outstanding vma areas open */
54079 int buf_use; /**< Buffers in use -- cannot alloc */
54080 @@ -928,7 +929,7 @@ struct drm_device {
54081 /*@{ */
54082 unsigned long counters;
54083 enum drm_stat_type types[15];
54084 - atomic_t counts[15];
54085 + atomic_unchecked_t counts[15];
54086 /*@} */
54087
54088 struct list_head filelist;
54089 @@ -1016,7 +1017,7 @@ struct drm_device {
54090 struct pci_controller *hose;
54091 #endif
54092 struct drm_sg_mem *sg; /**< Scatter gather memory */
54093 - unsigned int num_crtcs; /**< Number of CRTCs on this device */
54094 + unsigned int num_crtcs; /**< Number of CRTCs on this device */
54095 void *dev_private; /**< device private data */
54096 void *mm_private;
54097 struct address_space *dev_mapping;
54098 @@ -1042,11 +1043,11 @@ struct drm_device {
54099 spinlock_t object_name_lock;
54100 struct idr object_name_idr;
54101 atomic_t object_count;
54102 - atomic_t object_memory;
54103 + atomic_unchecked_t object_memory;
54104 atomic_t pin_count;
54105 - atomic_t pin_memory;
54106 + atomic_unchecked_t pin_memory;
54107 atomic_t gtt_count;
54108 - atomic_t gtt_memory;
54109 + atomic_unchecked_t gtt_memory;
54110 uint32_t gtt_total;
54111 uint32_t invalidate_domains; /* domains pending invalidation */
54112 uint32_t flush_domains; /* domains pending flush */
54113 diff -urNp linux-2.6.32.41/include/linux/a.out.h linux-2.6.32.41/include/linux/a.out.h
54114 --- linux-2.6.32.41/include/linux/a.out.h 2011-03-27 14:31:47.000000000 -0400
54115 +++ linux-2.6.32.41/include/linux/a.out.h 2011-04-17 15:56:46.000000000 -0400
54116 @@ -39,6 +39,14 @@ enum machine_type {
54117 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
54118 };
54119
54120 +/* Constants for the N_FLAGS field */
54121 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
54122 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
54123 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
54124 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
54125 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
54126 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
54127 +
54128 #if !defined (N_MAGIC)
54129 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
54130 #endif
54131 diff -urNp linux-2.6.32.41/include/linux/atmdev.h linux-2.6.32.41/include/linux/atmdev.h
54132 --- linux-2.6.32.41/include/linux/atmdev.h 2011-03-27 14:31:47.000000000 -0400
54133 +++ linux-2.6.32.41/include/linux/atmdev.h 2011-04-17 15:56:46.000000000 -0400
54134 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
54135 #endif
54136
54137 struct k_atm_aal_stats {
54138 -#define __HANDLE_ITEM(i) atomic_t i
54139 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
54140 __AAL_STAT_ITEMS
54141 #undef __HANDLE_ITEM
54142 };
54143 diff -urNp linux-2.6.32.41/include/linux/backlight.h linux-2.6.32.41/include/linux/backlight.h
54144 --- linux-2.6.32.41/include/linux/backlight.h 2011-03-27 14:31:47.000000000 -0400
54145 +++ linux-2.6.32.41/include/linux/backlight.h 2011-04-17 15:56:46.000000000 -0400
54146 @@ -36,18 +36,18 @@ struct backlight_device;
54147 struct fb_info;
54148
54149 struct backlight_ops {
54150 - unsigned int options;
54151 + const unsigned int options;
54152
54153 #define BL_CORE_SUSPENDRESUME (1 << 0)
54154
54155 /* Notify the backlight driver some property has changed */
54156 - int (*update_status)(struct backlight_device *);
54157 + int (* const update_status)(struct backlight_device *);
54158 /* Return the current backlight brightness (accounting for power,
54159 fb_blank etc.) */
54160 - int (*get_brightness)(struct backlight_device *);
54161 + int (* const get_brightness)(struct backlight_device *);
54162 /* Check if given framebuffer device is the one bound to this backlight;
54163 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
54164 - int (*check_fb)(struct fb_info *);
54165 + int (* const check_fb)(struct fb_info *);
54166 };
54167
54168 /* This structure defines all the properties of a backlight */
54169 @@ -86,7 +86,7 @@ struct backlight_device {
54170 registered this device has been unloaded, and if class_get_devdata()
54171 points to something in the body of that driver, it is also invalid. */
54172 struct mutex ops_lock;
54173 - struct backlight_ops *ops;
54174 + const struct backlight_ops *ops;
54175
54176 /* The framebuffer notifier block */
54177 struct notifier_block fb_notif;
54178 @@ -103,7 +103,7 @@ static inline void backlight_update_stat
54179 }
54180
54181 extern struct backlight_device *backlight_device_register(const char *name,
54182 - struct device *dev, void *devdata, struct backlight_ops *ops);
54183 + struct device *dev, void *devdata, const struct backlight_ops *ops);
54184 extern void backlight_device_unregister(struct backlight_device *bd);
54185 extern void backlight_force_update(struct backlight_device *bd,
54186 enum backlight_update_reason reason);
54187 diff -urNp linux-2.6.32.41/include/linux/binfmts.h linux-2.6.32.41/include/linux/binfmts.h
54188 --- linux-2.6.32.41/include/linux/binfmts.h 2011-04-17 17:00:52.000000000 -0400
54189 +++ linux-2.6.32.41/include/linux/binfmts.h 2011-04-17 15:56:46.000000000 -0400
54190 @@ -83,6 +83,7 @@ struct linux_binfmt {
54191 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
54192 int (*load_shlib)(struct file *);
54193 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
54194 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
54195 unsigned long min_coredump; /* minimal dump size */
54196 int hasvdso;
54197 };
54198 diff -urNp linux-2.6.32.41/include/linux/blkdev.h linux-2.6.32.41/include/linux/blkdev.h
54199 --- linux-2.6.32.41/include/linux/blkdev.h 2011-03-27 14:31:47.000000000 -0400
54200 +++ linux-2.6.32.41/include/linux/blkdev.h 2011-04-17 15:56:46.000000000 -0400
54201 @@ -1265,19 +1265,19 @@ static inline int blk_integrity_rq(struc
54202 #endif /* CONFIG_BLK_DEV_INTEGRITY */
54203
54204 struct block_device_operations {
54205 - int (*open) (struct block_device *, fmode_t);
54206 - int (*release) (struct gendisk *, fmode_t);
54207 - int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54208 - int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54209 - int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54210 - int (*direct_access) (struct block_device *, sector_t,
54211 + int (* const open) (struct block_device *, fmode_t);
54212 + int (* const release) (struct gendisk *, fmode_t);
54213 + int (* const locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54214 + int (* const ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54215 + int (* const compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54216 + int (* const direct_access) (struct block_device *, sector_t,
54217 void **, unsigned long *);
54218 - int (*media_changed) (struct gendisk *);
54219 - unsigned long long (*set_capacity) (struct gendisk *,
54220 + int (* const media_changed) (struct gendisk *);
54221 + unsigned long long (* const set_capacity) (struct gendisk *,
54222 unsigned long long);
54223 - int (*revalidate_disk) (struct gendisk *);
54224 - int (*getgeo)(struct block_device *, struct hd_geometry *);
54225 - struct module *owner;
54226 + int (* const revalidate_disk) (struct gendisk *);
54227 + int (*const getgeo)(struct block_device *, struct hd_geometry *);
54228 + struct module * const owner;
54229 };
54230
54231 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
54232 diff -urNp linux-2.6.32.41/include/linux/blktrace_api.h linux-2.6.32.41/include/linux/blktrace_api.h
54233 --- linux-2.6.32.41/include/linux/blktrace_api.h 2011-03-27 14:31:47.000000000 -0400
54234 +++ linux-2.6.32.41/include/linux/blktrace_api.h 2011-05-04 17:56:28.000000000 -0400
54235 @@ -160,7 +160,7 @@ struct blk_trace {
54236 struct dentry *dir;
54237 struct dentry *dropped_file;
54238 struct dentry *msg_file;
54239 - atomic_t dropped;
54240 + atomic_unchecked_t dropped;
54241 };
54242
54243 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
54244 diff -urNp linux-2.6.32.41/include/linux/byteorder/little_endian.h linux-2.6.32.41/include/linux/byteorder/little_endian.h
54245 --- linux-2.6.32.41/include/linux/byteorder/little_endian.h 2011-03-27 14:31:47.000000000 -0400
54246 +++ linux-2.6.32.41/include/linux/byteorder/little_endian.h 2011-04-17 15:56:46.000000000 -0400
54247 @@ -42,51 +42,51 @@
54248
54249 static inline __le64 __cpu_to_le64p(const __u64 *p)
54250 {
54251 - return (__force __le64)*p;
54252 + return (__force const __le64)*p;
54253 }
54254 static inline __u64 __le64_to_cpup(const __le64 *p)
54255 {
54256 - return (__force __u64)*p;
54257 + return (__force const __u64)*p;
54258 }
54259 static inline __le32 __cpu_to_le32p(const __u32 *p)
54260 {
54261 - return (__force __le32)*p;
54262 + return (__force const __le32)*p;
54263 }
54264 static inline __u32 __le32_to_cpup(const __le32 *p)
54265 {
54266 - return (__force __u32)*p;
54267 + return (__force const __u32)*p;
54268 }
54269 static inline __le16 __cpu_to_le16p(const __u16 *p)
54270 {
54271 - return (__force __le16)*p;
54272 + return (__force const __le16)*p;
54273 }
54274 static inline __u16 __le16_to_cpup(const __le16 *p)
54275 {
54276 - return (__force __u16)*p;
54277 + return (__force const __u16)*p;
54278 }
54279 static inline __be64 __cpu_to_be64p(const __u64 *p)
54280 {
54281 - return (__force __be64)__swab64p(p);
54282 + return (__force const __be64)__swab64p(p);
54283 }
54284 static inline __u64 __be64_to_cpup(const __be64 *p)
54285 {
54286 - return __swab64p((__u64 *)p);
54287 + return __swab64p((const __u64 *)p);
54288 }
54289 static inline __be32 __cpu_to_be32p(const __u32 *p)
54290 {
54291 - return (__force __be32)__swab32p(p);
54292 + return (__force const __be32)__swab32p(p);
54293 }
54294 static inline __u32 __be32_to_cpup(const __be32 *p)
54295 {
54296 - return __swab32p((__u32 *)p);
54297 + return __swab32p((const __u32 *)p);
54298 }
54299 static inline __be16 __cpu_to_be16p(const __u16 *p)
54300 {
54301 - return (__force __be16)__swab16p(p);
54302 + return (__force const __be16)__swab16p(p);
54303 }
54304 static inline __u16 __be16_to_cpup(const __be16 *p)
54305 {
54306 - return __swab16p((__u16 *)p);
54307 + return __swab16p((const __u16 *)p);
54308 }
54309 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
54310 #define __le64_to_cpus(x) do { (void)(x); } while (0)
54311 diff -urNp linux-2.6.32.41/include/linux/cache.h linux-2.6.32.41/include/linux/cache.h
54312 --- linux-2.6.32.41/include/linux/cache.h 2011-03-27 14:31:47.000000000 -0400
54313 +++ linux-2.6.32.41/include/linux/cache.h 2011-04-17 15:56:46.000000000 -0400
54314 @@ -16,6 +16,10 @@
54315 #define __read_mostly
54316 #endif
54317
54318 +#ifndef __read_only
54319 +#define __read_only __read_mostly
54320 +#endif
54321 +
54322 #ifndef ____cacheline_aligned
54323 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
54324 #endif
54325 diff -urNp linux-2.6.32.41/include/linux/capability.h linux-2.6.32.41/include/linux/capability.h
54326 --- linux-2.6.32.41/include/linux/capability.h 2011-03-27 14:31:47.000000000 -0400
54327 +++ linux-2.6.32.41/include/linux/capability.h 2011-04-17 15:56:46.000000000 -0400
54328 @@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff
54329 (security_real_capable_noaudit((t), (cap)) == 0)
54330
54331 extern int capable(int cap);
54332 +int capable_nolog(int cap);
54333
54334 /* audit system wants to get cap info from files as well */
54335 struct dentry;
54336 diff -urNp linux-2.6.32.41/include/linux/compiler-gcc4.h linux-2.6.32.41/include/linux/compiler-gcc4.h
54337 --- linux-2.6.32.41/include/linux/compiler-gcc4.h 2011-03-27 14:31:47.000000000 -0400
54338 +++ linux-2.6.32.41/include/linux/compiler-gcc4.h 2011-04-17 15:56:46.000000000 -0400
54339 @@ -36,4 +36,8 @@
54340 the kernel context */
54341 #define __cold __attribute__((__cold__))
54342
54343 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
54344 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
54345 +#define __bos0(ptr) __bos((ptr), 0)
54346 +#define __bos1(ptr) __bos((ptr), 1)
54347 #endif
54348 diff -urNp linux-2.6.32.41/include/linux/compiler.h linux-2.6.32.41/include/linux/compiler.h
54349 --- linux-2.6.32.41/include/linux/compiler.h 2011-03-27 14:31:47.000000000 -0400
54350 +++ linux-2.6.32.41/include/linux/compiler.h 2011-04-17 15:56:46.000000000 -0400
54351 @@ -256,6 +256,22 @@ void ftrace_likely_update(struct ftrace_
54352 #define __cold
54353 #endif
54354
54355 +#ifndef __alloc_size
54356 +#define __alloc_size
54357 +#endif
54358 +
54359 +#ifndef __bos
54360 +#define __bos
54361 +#endif
54362 +
54363 +#ifndef __bos0
54364 +#define __bos0
54365 +#endif
54366 +
54367 +#ifndef __bos1
54368 +#define __bos1
54369 +#endif
54370 +
54371 /* Simple shorthand for a section definition */
54372 #ifndef __section
54373 # define __section(S) __attribute__ ((__section__(#S)))
54374 @@ -278,6 +294,7 @@ void ftrace_likely_update(struct ftrace_
54375 * use is to mediate communication between process-level code and irq/NMI
54376 * handlers, all running on the same CPU.
54377 */
54378 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
54379 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
54380 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
54381
54382 #endif /* __LINUX_COMPILER_H */
54383 diff -urNp linux-2.6.32.41/include/linux/dcache.h linux-2.6.32.41/include/linux/dcache.h
54384 --- linux-2.6.32.41/include/linux/dcache.h 2011-03-27 14:31:47.000000000 -0400
54385 +++ linux-2.6.32.41/include/linux/dcache.h 2011-04-23 13:34:46.000000000 -0400
54386 @@ -119,6 +119,8 @@ struct dentry {
54387 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
54388 };
54389
54390 +#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
54391 +
54392 /*
54393 * dentry->d_lock spinlock nesting subclasses:
54394 *
54395 diff -urNp linux-2.6.32.41/include/linux/decompress/mm.h linux-2.6.32.41/include/linux/decompress/mm.h
54396 --- linux-2.6.32.41/include/linux/decompress/mm.h 2011-03-27 14:31:47.000000000 -0400
54397 +++ linux-2.6.32.41/include/linux/decompress/mm.h 2011-04-17 15:56:46.000000000 -0400
54398 @@ -78,7 +78,7 @@ static void free(void *where)
54399 * warnings when not needed (indeed large_malloc / large_free are not
54400 * needed by inflate */
54401
54402 -#define malloc(a) kmalloc(a, GFP_KERNEL)
54403 +#define malloc(a) kmalloc((a), GFP_KERNEL)
54404 #define free(a) kfree(a)
54405
54406 #define large_malloc(a) vmalloc(a)
54407 diff -urNp linux-2.6.32.41/include/linux/dma-mapping.h linux-2.6.32.41/include/linux/dma-mapping.h
54408 --- linux-2.6.32.41/include/linux/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
54409 +++ linux-2.6.32.41/include/linux/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
54410 @@ -16,50 +16,50 @@ enum dma_data_direction {
54411 };
54412
54413 struct dma_map_ops {
54414 - void* (*alloc_coherent)(struct device *dev, size_t size,
54415 + void* (* const alloc_coherent)(struct device *dev, size_t size,
54416 dma_addr_t *dma_handle, gfp_t gfp);
54417 - void (*free_coherent)(struct device *dev, size_t size,
54418 + void (* const free_coherent)(struct device *dev, size_t size,
54419 void *vaddr, dma_addr_t dma_handle);
54420 - dma_addr_t (*map_page)(struct device *dev, struct page *page,
54421 + dma_addr_t (* const map_page)(struct device *dev, struct page *page,
54422 unsigned long offset, size_t size,
54423 enum dma_data_direction dir,
54424 struct dma_attrs *attrs);
54425 - void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
54426 + void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
54427 size_t size, enum dma_data_direction dir,
54428 struct dma_attrs *attrs);
54429 - int (*map_sg)(struct device *dev, struct scatterlist *sg,
54430 + int (* const map_sg)(struct device *dev, struct scatterlist *sg,
54431 int nents, enum dma_data_direction dir,
54432 struct dma_attrs *attrs);
54433 - void (*unmap_sg)(struct device *dev,
54434 + void (* const unmap_sg)(struct device *dev,
54435 struct scatterlist *sg, int nents,
54436 enum dma_data_direction dir,
54437 struct dma_attrs *attrs);
54438 - void (*sync_single_for_cpu)(struct device *dev,
54439 + void (* const sync_single_for_cpu)(struct device *dev,
54440 dma_addr_t dma_handle, size_t size,
54441 enum dma_data_direction dir);
54442 - void (*sync_single_for_device)(struct device *dev,
54443 + void (* const sync_single_for_device)(struct device *dev,
54444 dma_addr_t dma_handle, size_t size,
54445 enum dma_data_direction dir);
54446 - void (*sync_single_range_for_cpu)(struct device *dev,
54447 + void (* const sync_single_range_for_cpu)(struct device *dev,
54448 dma_addr_t dma_handle,
54449 unsigned long offset,
54450 size_t size,
54451 enum dma_data_direction dir);
54452 - void (*sync_single_range_for_device)(struct device *dev,
54453 + void (* const sync_single_range_for_device)(struct device *dev,
54454 dma_addr_t dma_handle,
54455 unsigned long offset,
54456 size_t size,
54457 enum dma_data_direction dir);
54458 - void (*sync_sg_for_cpu)(struct device *dev,
54459 + void (* const sync_sg_for_cpu)(struct device *dev,
54460 struct scatterlist *sg, int nents,
54461 enum dma_data_direction dir);
54462 - void (*sync_sg_for_device)(struct device *dev,
54463 + void (* const sync_sg_for_device)(struct device *dev,
54464 struct scatterlist *sg, int nents,
54465 enum dma_data_direction dir);
54466 - int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
54467 - int (*dma_supported)(struct device *dev, u64 mask);
54468 + int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
54469 + int (* const dma_supported)(struct device *dev, u64 mask);
54470 int (*set_dma_mask)(struct device *dev, u64 mask);
54471 - int is_phys;
54472 + const int is_phys;
54473 };
54474
54475 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
54476 diff -urNp linux-2.6.32.41/include/linux/dst.h linux-2.6.32.41/include/linux/dst.h
54477 --- linux-2.6.32.41/include/linux/dst.h 2011-03-27 14:31:47.000000000 -0400
54478 +++ linux-2.6.32.41/include/linux/dst.h 2011-04-17 15:56:46.000000000 -0400
54479 @@ -380,7 +380,7 @@ struct dst_node
54480 struct thread_pool *pool;
54481
54482 /* Transaction IDs live here */
54483 - atomic_long_t gen;
54484 + atomic_long_unchecked_t gen;
54485
54486 /*
54487 * How frequently and how many times transaction
54488 diff -urNp linux-2.6.32.41/include/linux/elf.h linux-2.6.32.41/include/linux/elf.h
54489 --- linux-2.6.32.41/include/linux/elf.h 2011-03-27 14:31:47.000000000 -0400
54490 +++ linux-2.6.32.41/include/linux/elf.h 2011-04-17 15:56:46.000000000 -0400
54491 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
54492 #define PT_GNU_EH_FRAME 0x6474e550
54493
54494 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
54495 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
54496 +
54497 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
54498 +
54499 +/* Constants for the e_flags field */
54500 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
54501 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
54502 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
54503 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
54504 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
54505 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
54506
54507 /* These constants define the different elf file types */
54508 #define ET_NONE 0
54509 @@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
54510 #define DT_DEBUG 21
54511 #define DT_TEXTREL 22
54512 #define DT_JMPREL 23
54513 +#define DT_FLAGS 30
54514 + #define DF_TEXTREL 0x00000004
54515 #define DT_ENCODING 32
54516 #define OLD_DT_LOOS 0x60000000
54517 #define DT_LOOS 0x6000000d
54518 @@ -230,6 +243,19 @@ typedef struct elf64_hdr {
54519 #define PF_W 0x2
54520 #define PF_X 0x1
54521
54522 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
54523 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
54524 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
54525 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
54526 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
54527 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
54528 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
54529 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
54530 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
54531 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
54532 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
54533 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
54534 +
54535 typedef struct elf32_phdr{
54536 Elf32_Word p_type;
54537 Elf32_Off p_offset;
54538 @@ -322,6 +348,8 @@ typedef struct elf64_shdr {
54539 #define EI_OSABI 7
54540 #define EI_PAD 8
54541
54542 +#define EI_PAX 14
54543 +
54544 #define ELFMAG0 0x7f /* EI_MAG */
54545 #define ELFMAG1 'E'
54546 #define ELFMAG2 'L'
54547 @@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
54548 #define elf_phdr elf32_phdr
54549 #define elf_note elf32_note
54550 #define elf_addr_t Elf32_Off
54551 +#define elf_dyn Elf32_Dyn
54552
54553 #else
54554
54555 @@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
54556 #define elf_phdr elf64_phdr
54557 #define elf_note elf64_note
54558 #define elf_addr_t Elf64_Off
54559 +#define elf_dyn Elf64_Dyn
54560
54561 #endif
54562
54563 diff -urNp linux-2.6.32.41/include/linux/fscache-cache.h linux-2.6.32.41/include/linux/fscache-cache.h
54564 --- linux-2.6.32.41/include/linux/fscache-cache.h 2011-03-27 14:31:47.000000000 -0400
54565 +++ linux-2.6.32.41/include/linux/fscache-cache.h 2011-05-04 17:56:28.000000000 -0400
54566 @@ -116,7 +116,7 @@ struct fscache_operation {
54567 #endif
54568 };
54569
54570 -extern atomic_t fscache_op_debug_id;
54571 +extern atomic_unchecked_t fscache_op_debug_id;
54572 extern const struct slow_work_ops fscache_op_slow_work_ops;
54573
54574 extern void fscache_enqueue_operation(struct fscache_operation *);
54575 @@ -134,7 +134,7 @@ static inline void fscache_operation_ini
54576 fscache_operation_release_t release)
54577 {
54578 atomic_set(&op->usage, 1);
54579 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
54580 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
54581 op->release = release;
54582 INIT_LIST_HEAD(&op->pend_link);
54583 fscache_set_op_state(op, "Init");
54584 diff -urNp linux-2.6.32.41/include/linux/fs.h linux-2.6.32.41/include/linux/fs.h
54585 --- linux-2.6.32.41/include/linux/fs.h 2011-03-27 14:31:47.000000000 -0400
54586 +++ linux-2.6.32.41/include/linux/fs.h 2011-04-17 15:56:46.000000000 -0400
54587 @@ -90,6 +90,11 @@ struct inodes_stat_t {
54588 /* Expect random access pattern */
54589 #define FMODE_RANDOM ((__force fmode_t)4096)
54590
54591 +/* Hack for grsec so as not to require read permission simply to execute
54592 + * a binary
54593 + */
54594 +#define FMODE_GREXEC ((__force fmode_t)0x2000000)
54595 +
54596 /*
54597 * The below are the various read and write types that we support. Some of
54598 * them include behavioral modifiers that send information down to the
54599 @@ -568,41 +573,41 @@ typedef int (*read_actor_t)(read_descrip
54600 unsigned long, unsigned long);
54601
54602 struct address_space_operations {
54603 - int (*writepage)(struct page *page, struct writeback_control *wbc);
54604 - int (*readpage)(struct file *, struct page *);
54605 - void (*sync_page)(struct page *);
54606 + int (* const writepage)(struct page *page, struct writeback_control *wbc);
54607 + int (* const readpage)(struct file *, struct page *);
54608 + void (* const sync_page)(struct page *);
54609
54610 /* Write back some dirty pages from this mapping. */
54611 - int (*writepages)(struct address_space *, struct writeback_control *);
54612 + int (* const writepages)(struct address_space *, struct writeback_control *);
54613
54614 /* Set a page dirty. Return true if this dirtied it */
54615 - int (*set_page_dirty)(struct page *page);
54616 + int (* const set_page_dirty)(struct page *page);
54617
54618 - int (*readpages)(struct file *filp, struct address_space *mapping,
54619 + int (* const readpages)(struct file *filp, struct address_space *mapping,
54620 struct list_head *pages, unsigned nr_pages);
54621
54622 - int (*write_begin)(struct file *, struct address_space *mapping,
54623 + int (* const write_begin)(struct file *, struct address_space *mapping,
54624 loff_t pos, unsigned len, unsigned flags,
54625 struct page **pagep, void **fsdata);
54626 - int (*write_end)(struct file *, struct address_space *mapping,
54627 + int (* const write_end)(struct file *, struct address_space *mapping,
54628 loff_t pos, unsigned len, unsigned copied,
54629 struct page *page, void *fsdata);
54630
54631 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
54632 - sector_t (*bmap)(struct address_space *, sector_t);
54633 - void (*invalidatepage) (struct page *, unsigned long);
54634 - int (*releasepage) (struct page *, gfp_t);
54635 - ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
54636 + sector_t (* const bmap)(struct address_space *, sector_t);
54637 + void (* const invalidatepage) (struct page *, unsigned long);
54638 + int (* const releasepage) (struct page *, gfp_t);
54639 + ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
54640 loff_t offset, unsigned long nr_segs);
54641 - int (*get_xip_mem)(struct address_space *, pgoff_t, int,
54642 + int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
54643 void **, unsigned long *);
54644 /* migrate the contents of a page to the specified target */
54645 - int (*migratepage) (struct address_space *,
54646 + int (* const migratepage) (struct address_space *,
54647 struct page *, struct page *);
54648 - int (*launder_page) (struct page *);
54649 - int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
54650 + int (* const launder_page) (struct page *);
54651 + int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
54652 unsigned long);
54653 - int (*error_remove_page)(struct address_space *, struct page *);
54654 + int (* const error_remove_page)(struct address_space *, struct page *);
54655 };
54656
54657 /*
54658 @@ -1030,19 +1035,19 @@ static inline int file_check_writeable(s
54659 typedef struct files_struct *fl_owner_t;
54660
54661 struct file_lock_operations {
54662 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
54663 - void (*fl_release_private)(struct file_lock *);
54664 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
54665 + void (* const fl_release_private)(struct file_lock *);
54666 };
54667
54668 struct lock_manager_operations {
54669 - int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
54670 - void (*fl_notify)(struct file_lock *); /* unblock callback */
54671 - int (*fl_grant)(struct file_lock *, struct file_lock *, int);
54672 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
54673 - void (*fl_release_private)(struct file_lock *);
54674 - void (*fl_break)(struct file_lock *);
54675 - int (*fl_mylease)(struct file_lock *, struct file_lock *);
54676 - int (*fl_change)(struct file_lock **, int);
54677 + int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
54678 + void (* const fl_notify)(struct file_lock *); /* unblock callback */
54679 + int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
54680 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
54681 + void (* const fl_release_private)(struct file_lock *);
54682 + void (* const fl_break)(struct file_lock *);
54683 + int (* const fl_mylease)(struct file_lock *, struct file_lock *);
54684 + int (* const fl_change)(struct file_lock **, int);
54685 };
54686
54687 struct lock_manager {
54688 @@ -1441,7 +1446,7 @@ struct fiemap_extent_info {
54689 unsigned int fi_flags; /* Flags as passed from user */
54690 unsigned int fi_extents_mapped; /* Number of mapped extents */
54691 unsigned int fi_extents_max; /* Size of fiemap_extent array */
54692 - struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
54693 + struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
54694 * array */
54695 };
54696 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
54697 @@ -1558,30 +1563,30 @@ extern ssize_t vfs_writev(struct file *,
54698 unsigned long, loff_t *);
54699
54700 struct super_operations {
54701 - struct inode *(*alloc_inode)(struct super_block *sb);
54702 - void (*destroy_inode)(struct inode *);
54703 + struct inode *(* const alloc_inode)(struct super_block *sb);
54704 + void (* const destroy_inode)(struct inode *);
54705
54706 - void (*dirty_inode) (struct inode *);
54707 - int (*write_inode) (struct inode *, int);
54708 - void (*drop_inode) (struct inode *);
54709 - void (*delete_inode) (struct inode *);
54710 - void (*put_super) (struct super_block *);
54711 - void (*write_super) (struct super_block *);
54712 - int (*sync_fs)(struct super_block *sb, int wait);
54713 - int (*freeze_fs) (struct super_block *);
54714 - int (*unfreeze_fs) (struct super_block *);
54715 - int (*statfs) (struct dentry *, struct kstatfs *);
54716 - int (*remount_fs) (struct super_block *, int *, char *);
54717 - void (*clear_inode) (struct inode *);
54718 - void (*umount_begin) (struct super_block *);
54719 + void (* const dirty_inode) (struct inode *);
54720 + int (* const write_inode) (struct inode *, int);
54721 + void (* const drop_inode) (struct inode *);
54722 + void (* const delete_inode) (struct inode *);
54723 + void (* const put_super) (struct super_block *);
54724 + void (* const write_super) (struct super_block *);
54725 + int (* const sync_fs)(struct super_block *sb, int wait);
54726 + int (* const freeze_fs) (struct super_block *);
54727 + int (* const unfreeze_fs) (struct super_block *);
54728 + int (* const statfs) (struct dentry *, struct kstatfs *);
54729 + int (* const remount_fs) (struct super_block *, int *, char *);
54730 + void (* const clear_inode) (struct inode *);
54731 + void (* const umount_begin) (struct super_block *);
54732
54733 - int (*show_options)(struct seq_file *, struct vfsmount *);
54734 - int (*show_stats)(struct seq_file *, struct vfsmount *);
54735 + int (* const show_options)(struct seq_file *, struct vfsmount *);
54736 + int (* const show_stats)(struct seq_file *, struct vfsmount *);
54737 #ifdef CONFIG_QUOTA
54738 - ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
54739 - ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
54740 + ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
54741 + ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
54742 #endif
54743 - int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
54744 + int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
54745 };
54746
54747 /*
54748 diff -urNp linux-2.6.32.41/include/linux/fs_struct.h linux-2.6.32.41/include/linux/fs_struct.h
54749 --- linux-2.6.32.41/include/linux/fs_struct.h 2011-03-27 14:31:47.000000000 -0400
54750 +++ linux-2.6.32.41/include/linux/fs_struct.h 2011-04-17 15:56:46.000000000 -0400
54751 @@ -4,7 +4,7 @@
54752 #include <linux/path.h>
54753
54754 struct fs_struct {
54755 - int users;
54756 + atomic_t users;
54757 rwlock_t lock;
54758 int umask;
54759 int in_exec;
54760 diff -urNp linux-2.6.32.41/include/linux/ftrace_event.h linux-2.6.32.41/include/linux/ftrace_event.h
54761 --- linux-2.6.32.41/include/linux/ftrace_event.h 2011-03-27 14:31:47.000000000 -0400
54762 +++ linux-2.6.32.41/include/linux/ftrace_event.h 2011-05-04 17:56:28.000000000 -0400
54763 @@ -163,7 +163,7 @@ extern int trace_define_field(struct ftr
54764 int filter_type);
54765 extern int trace_define_common_fields(struct ftrace_event_call *call);
54766
54767 -#define is_signed_type(type) (((type)(-1)) < 0)
54768 +#define is_signed_type(type) (((type)(-1)) < (type)1)
54769
54770 int trace_set_clr_event(const char *system, const char *event, int set);
54771
54772 diff -urNp linux-2.6.32.41/include/linux/genhd.h linux-2.6.32.41/include/linux/genhd.h
54773 --- linux-2.6.32.41/include/linux/genhd.h 2011-03-27 14:31:47.000000000 -0400
54774 +++ linux-2.6.32.41/include/linux/genhd.h 2011-04-17 15:56:46.000000000 -0400
54775 @@ -161,7 +161,7 @@ struct gendisk {
54776
54777 struct timer_rand_state *random;
54778
54779 - atomic_t sync_io; /* RAID */
54780 + atomic_unchecked_t sync_io; /* RAID */
54781 struct work_struct async_notify;
54782 #ifdef CONFIG_BLK_DEV_INTEGRITY
54783 struct blk_integrity *integrity;
54784 diff -urNp linux-2.6.32.41/include/linux/gracl.h linux-2.6.32.41/include/linux/gracl.h
54785 --- linux-2.6.32.41/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
54786 +++ linux-2.6.32.41/include/linux/gracl.h 2011-04-17 15:56:46.000000000 -0400
54787 @@ -0,0 +1,317 @@
54788 +#ifndef GR_ACL_H
54789 +#define GR_ACL_H
54790 +
54791 +#include <linux/grdefs.h>
54792 +#include <linux/resource.h>
54793 +#include <linux/capability.h>
54794 +#include <linux/dcache.h>
54795 +#include <asm/resource.h>
54796 +
54797 +/* Major status information */
54798 +
54799 +#define GR_VERSION "grsecurity 2.2.2"
54800 +#define GRSECURITY_VERSION 0x2202
54801 +
54802 +enum {
54803 + GR_SHUTDOWN = 0,
54804 + GR_ENABLE = 1,
54805 + GR_SPROLE = 2,
54806 + GR_RELOAD = 3,
54807 + GR_SEGVMOD = 4,
54808 + GR_STATUS = 5,
54809 + GR_UNSPROLE = 6,
54810 + GR_PASSSET = 7,
54811 + GR_SPROLEPAM = 8,
54812 +};
54813 +
54814 +/* Password setup definitions
54815 + * kernel/grhash.c */
54816 +enum {
54817 + GR_PW_LEN = 128,
54818 + GR_SALT_LEN = 16,
54819 + GR_SHA_LEN = 32,
54820 +};
54821 +
54822 +enum {
54823 + GR_SPROLE_LEN = 64,
54824 +};
54825 +
54826 +enum {
54827 + GR_NO_GLOB = 0,
54828 + GR_REG_GLOB,
54829 + GR_CREATE_GLOB
54830 +};
54831 +
54832 +#define GR_NLIMITS 32
54833 +
54834 +/* Begin Data Structures */
54835 +
54836 +struct sprole_pw {
54837 + unsigned char *rolename;
54838 + unsigned char salt[GR_SALT_LEN];
54839 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
54840 +};
54841 +
54842 +struct name_entry {
54843 + __u32 key;
54844 + ino_t inode;
54845 + dev_t device;
54846 + char *name;
54847 + __u16 len;
54848 + __u8 deleted;
54849 + struct name_entry *prev;
54850 + struct name_entry *next;
54851 +};
54852 +
54853 +struct inodev_entry {
54854 + struct name_entry *nentry;
54855 + struct inodev_entry *prev;
54856 + struct inodev_entry *next;
54857 +};
54858 +
54859 +struct acl_role_db {
54860 + struct acl_role_label **r_hash;
54861 + __u32 r_size;
54862 +};
54863 +
54864 +struct inodev_db {
54865 + struct inodev_entry **i_hash;
54866 + __u32 i_size;
54867 +};
54868 +
54869 +struct name_db {
54870 + struct name_entry **n_hash;
54871 + __u32 n_size;
54872 +};
54873 +
54874 +struct crash_uid {
54875 + uid_t uid;
54876 + unsigned long expires;
54877 +};
54878 +
54879 +struct gr_hash_struct {
54880 + void **table;
54881 + void **nametable;
54882 + void *first;
54883 + __u32 table_size;
54884 + __u32 used_size;
54885 + int type;
54886 +};
54887 +
54888 +/* Userspace Grsecurity ACL data structures */
54889 +
54890 +struct acl_subject_label {
54891 + char *filename;
54892 + ino_t inode;
54893 + dev_t device;
54894 + __u32 mode;
54895 + kernel_cap_t cap_mask;
54896 + kernel_cap_t cap_lower;
54897 + kernel_cap_t cap_invert_audit;
54898 +
54899 + struct rlimit res[GR_NLIMITS];
54900 + __u32 resmask;
54901 +
54902 + __u8 user_trans_type;
54903 + __u8 group_trans_type;
54904 + uid_t *user_transitions;
54905 + gid_t *group_transitions;
54906 + __u16 user_trans_num;
54907 + __u16 group_trans_num;
54908 +
54909 + __u32 sock_families[2];
54910 + __u32 ip_proto[8];
54911 + __u32 ip_type;
54912 + struct acl_ip_label **ips;
54913 + __u32 ip_num;
54914 + __u32 inaddr_any_override;
54915 +
54916 + __u32 crashes;
54917 + unsigned long expires;
54918 +
54919 + struct acl_subject_label *parent_subject;
54920 + struct gr_hash_struct *hash;
54921 + struct acl_subject_label *prev;
54922 + struct acl_subject_label *next;
54923 +
54924 + struct acl_object_label **obj_hash;
54925 + __u32 obj_hash_size;
54926 + __u16 pax_flags;
54927 +};
54928 +
54929 +struct role_allowed_ip {
54930 + __u32 addr;
54931 + __u32 netmask;
54932 +
54933 + struct role_allowed_ip *prev;
54934 + struct role_allowed_ip *next;
54935 +};
54936 +
54937 +struct role_transition {
54938 + char *rolename;
54939 +
54940 + struct role_transition *prev;
54941 + struct role_transition *next;
54942 +};
54943 +
54944 +struct acl_role_label {
54945 + char *rolename;
54946 + uid_t uidgid;
54947 + __u16 roletype;
54948 +
54949 + __u16 auth_attempts;
54950 + unsigned long expires;
54951 +
54952 + struct acl_subject_label *root_label;
54953 + struct gr_hash_struct *hash;
54954 +
54955 + struct acl_role_label *prev;
54956 + struct acl_role_label *next;
54957 +
54958 + struct role_transition *transitions;
54959 + struct role_allowed_ip *allowed_ips;
54960 + uid_t *domain_children;
54961 + __u16 domain_child_num;
54962 +
54963 + struct acl_subject_label **subj_hash;
54964 + __u32 subj_hash_size;
54965 +};
54966 +
54967 +struct user_acl_role_db {
54968 + struct acl_role_label **r_table;
54969 + __u32 num_pointers; /* Number of allocations to track */
54970 + __u32 num_roles; /* Number of roles */
54971 + __u32 num_domain_children; /* Number of domain children */
54972 + __u32 num_subjects; /* Number of subjects */
54973 + __u32 num_objects; /* Number of objects */
54974 +};
54975 +
54976 +struct acl_object_label {
54977 + char *filename;
54978 + ino_t inode;
54979 + dev_t device;
54980 + __u32 mode;
54981 +
54982 + struct acl_subject_label *nested;
54983 + struct acl_object_label *globbed;
54984 +
54985 + /* next two structures not used */
54986 +
54987 + struct acl_object_label *prev;
54988 + struct acl_object_label *next;
54989 +};
54990 +
54991 +struct acl_ip_label {
54992 + char *iface;
54993 + __u32 addr;
54994 + __u32 netmask;
54995 + __u16 low, high;
54996 + __u8 mode;
54997 + __u32 type;
54998 + __u32 proto[8];
54999 +
55000 + /* next two structures not used */
55001 +
55002 + struct acl_ip_label *prev;
55003 + struct acl_ip_label *next;
55004 +};
55005 +
55006 +struct gr_arg {
55007 + struct user_acl_role_db role_db;
55008 + unsigned char pw[GR_PW_LEN];
55009 + unsigned char salt[GR_SALT_LEN];
55010 + unsigned char sum[GR_SHA_LEN];
55011 + unsigned char sp_role[GR_SPROLE_LEN];
55012 + struct sprole_pw *sprole_pws;
55013 + dev_t segv_device;
55014 + ino_t segv_inode;
55015 + uid_t segv_uid;
55016 + __u16 num_sprole_pws;
55017 + __u16 mode;
55018 +};
55019 +
55020 +struct gr_arg_wrapper {
55021 + struct gr_arg *arg;
55022 + __u32 version;
55023 + __u32 size;
55024 +};
55025 +
55026 +struct subject_map {
55027 + struct acl_subject_label *user;
55028 + struct acl_subject_label *kernel;
55029 + struct subject_map *prev;
55030 + struct subject_map *next;
55031 +};
55032 +
55033 +struct acl_subj_map_db {
55034 + struct subject_map **s_hash;
55035 + __u32 s_size;
55036 +};
55037 +
55038 +/* End Data Structures Section */
55039 +
55040 +/* Hash functions generated by empirical testing by Brad Spengler
55041 + Makes good use of the low bits of the inode. Generally 0-1 times
55042 + in loop for successful match. 0-3 for unsuccessful match.
55043 + Shift/add algorithm with modulus of table size and an XOR*/
55044 +
55045 +static __inline__ unsigned int
55046 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
55047 +{
55048 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
55049 +}
55050 +
55051 + static __inline__ unsigned int
55052 +shash(const struct acl_subject_label *userp, const unsigned int sz)
55053 +{
55054 + return ((const unsigned long)userp % sz);
55055 +}
55056 +
55057 +static __inline__ unsigned int
55058 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
55059 +{
55060 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
55061 +}
55062 +
55063 +static __inline__ unsigned int
55064 +nhash(const char *name, const __u16 len, const unsigned int sz)
55065 +{
55066 + return full_name_hash((const unsigned char *)name, len) % sz;
55067 +}
55068 +
55069 +#define FOR_EACH_ROLE_START(role) \
55070 + role = role_list; \
55071 + while (role) {
55072 +
55073 +#define FOR_EACH_ROLE_END(role) \
55074 + role = role->prev; \
55075 + }
55076 +
55077 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
55078 + subj = NULL; \
55079 + iter = 0; \
55080 + while (iter < role->subj_hash_size) { \
55081 + if (subj == NULL) \
55082 + subj = role->subj_hash[iter]; \
55083 + if (subj == NULL) { \
55084 + iter++; \
55085 + continue; \
55086 + }
55087 +
55088 +#define FOR_EACH_SUBJECT_END(subj,iter) \
55089 + subj = subj->next; \
55090 + if (subj == NULL) \
55091 + iter++; \
55092 + }
55093 +
55094 +
55095 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
55096 + subj = role->hash->first; \
55097 + while (subj != NULL) {
55098 +
55099 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
55100 + subj = subj->next; \
55101 + }
55102 +
55103 +#endif
55104 +
55105 diff -urNp linux-2.6.32.41/include/linux/gralloc.h linux-2.6.32.41/include/linux/gralloc.h
55106 --- linux-2.6.32.41/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
55107 +++ linux-2.6.32.41/include/linux/gralloc.h 2011-04-17 15:56:46.000000000 -0400
55108 @@ -0,0 +1,9 @@
55109 +#ifndef __GRALLOC_H
55110 +#define __GRALLOC_H
55111 +
55112 +void acl_free_all(void);
55113 +int acl_alloc_stack_init(unsigned long size);
55114 +void *acl_alloc(unsigned long len);
55115 +void *acl_alloc_num(unsigned long num, unsigned long len);
55116 +
55117 +#endif
55118 diff -urNp linux-2.6.32.41/include/linux/grdefs.h linux-2.6.32.41/include/linux/grdefs.h
55119 --- linux-2.6.32.41/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
55120 +++ linux-2.6.32.41/include/linux/grdefs.h 2011-06-11 16:20:26.000000000 -0400
55121 @@ -0,0 +1,140 @@
55122 +#ifndef GRDEFS_H
55123 +#define GRDEFS_H
55124 +
55125 +/* Begin grsecurity status declarations */
55126 +
55127 +enum {
55128 + GR_READY = 0x01,
55129 + GR_STATUS_INIT = 0x00 // disabled state
55130 +};
55131 +
55132 +/* Begin ACL declarations */
55133 +
55134 +/* Role flags */
55135 +
55136 +enum {
55137 + GR_ROLE_USER = 0x0001,
55138 + GR_ROLE_GROUP = 0x0002,
55139 + GR_ROLE_DEFAULT = 0x0004,
55140 + GR_ROLE_SPECIAL = 0x0008,
55141 + GR_ROLE_AUTH = 0x0010,
55142 + GR_ROLE_NOPW = 0x0020,
55143 + GR_ROLE_GOD = 0x0040,
55144 + GR_ROLE_LEARN = 0x0080,
55145 + GR_ROLE_TPE = 0x0100,
55146 + GR_ROLE_DOMAIN = 0x0200,
55147 + GR_ROLE_PAM = 0x0400,
55148 + GR_ROLE_PERSIST = 0x800
55149 +};
55150 +
55151 +/* ACL Subject and Object mode flags */
55152 +enum {
55153 + GR_DELETED = 0x80000000
55154 +};
55155 +
55156 +/* ACL Object-only mode flags */
55157 +enum {
55158 + GR_READ = 0x00000001,
55159 + GR_APPEND = 0x00000002,
55160 + GR_WRITE = 0x00000004,
55161 + GR_EXEC = 0x00000008,
55162 + GR_FIND = 0x00000010,
55163 + GR_INHERIT = 0x00000020,
55164 + GR_SETID = 0x00000040,
55165 + GR_CREATE = 0x00000080,
55166 + GR_DELETE = 0x00000100,
55167 + GR_LINK = 0x00000200,
55168 + GR_AUDIT_READ = 0x00000400,
55169 + GR_AUDIT_APPEND = 0x00000800,
55170 + GR_AUDIT_WRITE = 0x00001000,
55171 + GR_AUDIT_EXEC = 0x00002000,
55172 + GR_AUDIT_FIND = 0x00004000,
55173 + GR_AUDIT_INHERIT= 0x00008000,
55174 + GR_AUDIT_SETID = 0x00010000,
55175 + GR_AUDIT_CREATE = 0x00020000,
55176 + GR_AUDIT_DELETE = 0x00040000,
55177 + GR_AUDIT_LINK = 0x00080000,
55178 + GR_PTRACERD = 0x00100000,
55179 + GR_NOPTRACE = 0x00200000,
55180 + GR_SUPPRESS = 0x00400000,
55181 + GR_NOLEARN = 0x00800000,
55182 + GR_INIT_TRANSFER= 0x01000000
55183 +};
55184 +
55185 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
55186 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
55187 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
55188 +
55189 +/* ACL subject-only mode flags */
55190 +enum {
55191 + GR_KILL = 0x00000001,
55192 + GR_VIEW = 0x00000002,
55193 + GR_PROTECTED = 0x00000004,
55194 + GR_LEARN = 0x00000008,
55195 + GR_OVERRIDE = 0x00000010,
55196 + /* just a placeholder, this mode is only used in userspace */
55197 + GR_DUMMY = 0x00000020,
55198 + GR_PROTSHM = 0x00000040,
55199 + GR_KILLPROC = 0x00000080,
55200 + GR_KILLIPPROC = 0x00000100,
55201 + /* just a placeholder, this mode is only used in userspace */
55202 + GR_NOTROJAN = 0x00000200,
55203 + GR_PROTPROCFD = 0x00000400,
55204 + GR_PROCACCT = 0x00000800,
55205 + GR_RELAXPTRACE = 0x00001000,
55206 + GR_NESTED = 0x00002000,
55207 + GR_INHERITLEARN = 0x00004000,
55208 + GR_PROCFIND = 0x00008000,
55209 + GR_POVERRIDE = 0x00010000,
55210 + GR_KERNELAUTH = 0x00020000,
55211 + GR_ATSECURE = 0x00040000,
55212 + GR_SHMEXEC = 0x00080000
55213 +};
55214 +
55215 +enum {
55216 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
55217 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
55218 + GR_PAX_ENABLE_MPROTECT = 0x0004,
55219 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
55220 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
55221 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
55222 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
55223 + GR_PAX_DISABLE_MPROTECT = 0x0400,
55224 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
55225 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
55226 +};
55227 +
55228 +enum {
55229 + GR_ID_USER = 0x01,
55230 + GR_ID_GROUP = 0x02,
55231 +};
55232 +
55233 +enum {
55234 + GR_ID_ALLOW = 0x01,
55235 + GR_ID_DENY = 0x02,
55236 +};
55237 +
55238 +#define GR_CRASH_RES 31
55239 +#define GR_UIDTABLE_MAX 500
55240 +
55241 +/* begin resource learning section */
55242 +enum {
55243 + GR_RLIM_CPU_BUMP = 60,
55244 + GR_RLIM_FSIZE_BUMP = 50000,
55245 + GR_RLIM_DATA_BUMP = 10000,
55246 + GR_RLIM_STACK_BUMP = 1000,
55247 + GR_RLIM_CORE_BUMP = 10000,
55248 + GR_RLIM_RSS_BUMP = 500000,
55249 + GR_RLIM_NPROC_BUMP = 1,
55250 + GR_RLIM_NOFILE_BUMP = 5,
55251 + GR_RLIM_MEMLOCK_BUMP = 50000,
55252 + GR_RLIM_AS_BUMP = 500000,
55253 + GR_RLIM_LOCKS_BUMP = 2,
55254 + GR_RLIM_SIGPENDING_BUMP = 5,
55255 + GR_RLIM_MSGQUEUE_BUMP = 10000,
55256 + GR_RLIM_NICE_BUMP = 1,
55257 + GR_RLIM_RTPRIO_BUMP = 1,
55258 + GR_RLIM_RTTIME_BUMP = 1000000
55259 +};
55260 +
55261 +#endif
55262 diff -urNp linux-2.6.32.41/include/linux/grinternal.h linux-2.6.32.41/include/linux/grinternal.h
55263 --- linux-2.6.32.41/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
55264 +++ linux-2.6.32.41/include/linux/grinternal.h 2011-04-17 15:56:46.000000000 -0400
55265 @@ -0,0 +1,218 @@
55266 +#ifndef __GRINTERNAL_H
55267 +#define __GRINTERNAL_H
55268 +
55269 +#ifdef CONFIG_GRKERNSEC
55270 +
55271 +#include <linux/fs.h>
55272 +#include <linux/mnt_namespace.h>
55273 +#include <linux/nsproxy.h>
55274 +#include <linux/gracl.h>
55275 +#include <linux/grdefs.h>
55276 +#include <linux/grmsg.h>
55277 +
55278 +void gr_add_learn_entry(const char *fmt, ...)
55279 + __attribute__ ((format (printf, 1, 2)));
55280 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
55281 + const struct vfsmount *mnt);
55282 +__u32 gr_check_create(const struct dentry *new_dentry,
55283 + const struct dentry *parent,
55284 + const struct vfsmount *mnt, const __u32 mode);
55285 +int gr_check_protected_task(const struct task_struct *task);
55286 +__u32 to_gr_audit(const __u32 reqmode);
55287 +int gr_set_acls(const int type);
55288 +int gr_apply_subject_to_task(struct task_struct *task);
55289 +int gr_acl_is_enabled(void);
55290 +char gr_roletype_to_char(void);
55291 +
55292 +void gr_handle_alertkill(struct task_struct *task);
55293 +char *gr_to_filename(const struct dentry *dentry,
55294 + const struct vfsmount *mnt);
55295 +char *gr_to_filename1(const struct dentry *dentry,
55296 + const struct vfsmount *mnt);
55297 +char *gr_to_filename2(const struct dentry *dentry,
55298 + const struct vfsmount *mnt);
55299 +char *gr_to_filename3(const struct dentry *dentry,
55300 + const struct vfsmount *mnt);
55301 +
55302 +extern int grsec_enable_harden_ptrace;
55303 +extern int grsec_enable_link;
55304 +extern int grsec_enable_fifo;
55305 +extern int grsec_enable_execve;
55306 +extern int grsec_enable_shm;
55307 +extern int grsec_enable_execlog;
55308 +extern int grsec_enable_signal;
55309 +extern int grsec_enable_audit_ptrace;
55310 +extern int grsec_enable_forkfail;
55311 +extern int grsec_enable_time;
55312 +extern int grsec_enable_rofs;
55313 +extern int grsec_enable_chroot_shmat;
55314 +extern int grsec_enable_chroot_findtask;
55315 +extern int grsec_enable_chroot_mount;
55316 +extern int grsec_enable_chroot_double;
55317 +extern int grsec_enable_chroot_pivot;
55318 +extern int grsec_enable_chroot_chdir;
55319 +extern int grsec_enable_chroot_chmod;
55320 +extern int grsec_enable_chroot_mknod;
55321 +extern int grsec_enable_chroot_fchdir;
55322 +extern int grsec_enable_chroot_nice;
55323 +extern int grsec_enable_chroot_execlog;
55324 +extern int grsec_enable_chroot_caps;
55325 +extern int grsec_enable_chroot_sysctl;
55326 +extern int grsec_enable_chroot_unix;
55327 +extern int grsec_enable_tpe;
55328 +extern int grsec_tpe_gid;
55329 +extern int grsec_enable_tpe_all;
55330 +extern int grsec_enable_tpe_invert;
55331 +extern int grsec_enable_socket_all;
55332 +extern int grsec_socket_all_gid;
55333 +extern int grsec_enable_socket_client;
55334 +extern int grsec_socket_client_gid;
55335 +extern int grsec_enable_socket_server;
55336 +extern int grsec_socket_server_gid;
55337 +extern int grsec_audit_gid;
55338 +extern int grsec_enable_group;
55339 +extern int grsec_enable_audit_textrel;
55340 +extern int grsec_enable_log_rwxmaps;
55341 +extern int grsec_enable_mount;
55342 +extern int grsec_enable_chdir;
55343 +extern int grsec_resource_logging;
55344 +extern int grsec_enable_blackhole;
55345 +extern int grsec_lastack_retries;
55346 +extern int grsec_lock;
55347 +
55348 +extern spinlock_t grsec_alert_lock;
55349 +extern unsigned long grsec_alert_wtime;
55350 +extern unsigned long grsec_alert_fyet;
55351 +
55352 +extern spinlock_t grsec_audit_lock;
55353 +
55354 +extern rwlock_t grsec_exec_file_lock;
55355 +
55356 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
55357 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
55358 + (tsk)->exec_file->f_vfsmnt) : "/")
55359 +
55360 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
55361 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
55362 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
55363 +
55364 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
55365 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
55366 + (tsk)->exec_file->f_vfsmnt) : "/")
55367 +
55368 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
55369 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
55370 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
55371 +
55372 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
55373 +
55374 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
55375 +
55376 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
55377 + (task)->pid, (cred)->uid, \
55378 + (cred)->euid, (cred)->gid, (cred)->egid, \
55379 + gr_parent_task_fullpath(task), \
55380 + (task)->real_parent->comm, (task)->real_parent->pid, \
55381 + (pcred)->uid, (pcred)->euid, \
55382 + (pcred)->gid, (pcred)->egid
55383 +
55384 +#define GR_CHROOT_CAPS {{ \
55385 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
55386 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
55387 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
55388 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
55389 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
55390 + CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
55391 +
55392 +#define security_learn(normal_msg,args...) \
55393 +({ \
55394 + read_lock(&grsec_exec_file_lock); \
55395 + gr_add_learn_entry(normal_msg "\n", ## args); \
55396 + read_unlock(&grsec_exec_file_lock); \
55397 +})
55398 +
55399 +enum {
55400 + GR_DO_AUDIT,
55401 + GR_DONT_AUDIT,
55402 + GR_DONT_AUDIT_GOOD
55403 +};
55404 +
55405 +enum {
55406 + GR_TTYSNIFF,
55407 + GR_RBAC,
55408 + GR_RBAC_STR,
55409 + GR_STR_RBAC,
55410 + GR_RBAC_MODE2,
55411 + GR_RBAC_MODE3,
55412 + GR_FILENAME,
55413 + GR_SYSCTL_HIDDEN,
55414 + GR_NOARGS,
55415 + GR_ONE_INT,
55416 + GR_ONE_INT_TWO_STR,
55417 + GR_ONE_STR,
55418 + GR_STR_INT,
55419 + GR_TWO_STR_INT,
55420 + GR_TWO_INT,
55421 + GR_TWO_U64,
55422 + GR_THREE_INT,
55423 + GR_FIVE_INT_TWO_STR,
55424 + GR_TWO_STR,
55425 + GR_THREE_STR,
55426 + GR_FOUR_STR,
55427 + GR_STR_FILENAME,
55428 + GR_FILENAME_STR,
55429 + GR_FILENAME_TWO_INT,
55430 + GR_FILENAME_TWO_INT_STR,
55431 + GR_TEXTREL,
55432 + GR_PTRACE,
55433 + GR_RESOURCE,
55434 + GR_CAP,
55435 + GR_SIG,
55436 + GR_SIG2,
55437 + GR_CRASH1,
55438 + GR_CRASH2,
55439 + GR_PSACCT,
55440 + GR_RWXMAP
55441 +};
55442 +
55443 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
55444 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
55445 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
55446 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
55447 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
55448 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
55449 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
55450 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
55451 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
55452 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
55453 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
55454 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
55455 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
55456 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
55457 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
55458 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
55459 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
55460 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
55461 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
55462 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
55463 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
55464 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
55465 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
55466 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
55467 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
55468 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
55469 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
55470 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
55471 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
55472 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
55473 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
55474 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
55475 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
55476 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
55477 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
55478 +
55479 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
55480 +
55481 +#endif
55482 +
55483 +#endif
55484 diff -urNp linux-2.6.32.41/include/linux/grmsg.h linux-2.6.32.41/include/linux/grmsg.h
55485 --- linux-2.6.32.41/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
55486 +++ linux-2.6.32.41/include/linux/grmsg.h 2011-04-17 15:56:46.000000000 -0400
55487 @@ -0,0 +1,108 @@
55488 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
55489 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
55490 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
55491 +#define GR_STOPMOD_MSG "denied modification of module state by "
55492 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
55493 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
55494 +#define GR_IOPERM_MSG "denied use of ioperm() by "
55495 +#define GR_IOPL_MSG "denied use of iopl() by "
55496 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
55497 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
55498 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
55499 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
55500 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
55501 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
55502 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
55503 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
55504 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
55505 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
55506 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
55507 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
55508 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
55509 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
55510 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
55511 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
55512 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
55513 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
55514 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
55515 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
55516 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
55517 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
55518 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
55519 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
55520 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
55521 +#define GR_NPROC_MSG "denied overstep of process limit by "
55522 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
55523 +#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
55524 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
55525 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
55526 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
55527 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
55528 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
55529 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
55530 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
55531 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
55532 +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
55533 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
55534 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
55535 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
55536 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
55537 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
55538 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
55539 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
55540 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
55541 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
55542 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
55543 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
55544 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
55545 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
55546 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
55547 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
55548 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
55549 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
55550 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
55551 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
55552 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
55553 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
55554 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
55555 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
55556 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
55557 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
55558 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
55559 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
55560 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
55561 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
55562 +#define GR_NICE_CHROOT_MSG "denied priority change by "
55563 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
55564 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
55565 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
55566 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
55567 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
55568 +#define GR_TIME_MSG "time set by "
55569 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
55570 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
55571 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
55572 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
55573 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
55574 +#define GR_BIND_MSG "denied bind() by "
55575 +#define GR_CONNECT_MSG "denied connect() by "
55576 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
55577 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
55578 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
55579 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
55580 +#define GR_CAP_ACL_MSG "use of %s denied for "
55581 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
55582 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
55583 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
55584 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
55585 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
55586 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
55587 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
55588 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
55589 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
55590 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
55591 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
55592 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
55593 +#define GR_VM86_MSG "denied use of vm86 by "
55594 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
55595 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
55596 diff -urNp linux-2.6.32.41/include/linux/grsecurity.h linux-2.6.32.41/include/linux/grsecurity.h
55597 --- linux-2.6.32.41/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
55598 +++ linux-2.6.32.41/include/linux/grsecurity.h 2011-04-17 15:56:46.000000000 -0400
55599 @@ -0,0 +1,212 @@
55600 +#ifndef GR_SECURITY_H
55601 +#define GR_SECURITY_H
55602 +#include <linux/fs.h>
55603 +#include <linux/fs_struct.h>
55604 +#include <linux/binfmts.h>
55605 +#include <linux/gracl.h>
55606 +#include <linux/compat.h>
55607 +
55608 +/* notify of brain-dead configs */
55609 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
55610 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
55611 +#endif
55612 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
55613 +#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
55614 +#endif
55615 +#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
55616 +#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
55617 +#endif
55618 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
55619 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
55620 +#endif
55621 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
55622 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
55623 +#endif
55624 +
55625 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
55626 +void gr_handle_brute_check(void);
55627 +void gr_handle_kernel_exploit(void);
55628 +int gr_process_user_ban(void);
55629 +
55630 +char gr_roletype_to_char(void);
55631 +
55632 +int gr_acl_enable_at_secure(void);
55633 +
55634 +int gr_check_user_change(int real, int effective, int fs);
55635 +int gr_check_group_change(int real, int effective, int fs);
55636 +
55637 +void gr_del_task_from_ip_table(struct task_struct *p);
55638 +
55639 +int gr_pid_is_chrooted(struct task_struct *p);
55640 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
55641 +int gr_handle_chroot_nice(void);
55642 +int gr_handle_chroot_sysctl(const int op);
55643 +int gr_handle_chroot_setpriority(struct task_struct *p,
55644 + const int niceval);
55645 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
55646 +int gr_handle_chroot_chroot(const struct dentry *dentry,
55647 + const struct vfsmount *mnt);
55648 +int gr_handle_chroot_caps(struct path *path);
55649 +void gr_handle_chroot_chdir(struct path *path);
55650 +int gr_handle_chroot_chmod(const struct dentry *dentry,
55651 + const struct vfsmount *mnt, const int mode);
55652 +int gr_handle_chroot_mknod(const struct dentry *dentry,
55653 + const struct vfsmount *mnt, const int mode);
55654 +int gr_handle_chroot_mount(const struct dentry *dentry,
55655 + const struct vfsmount *mnt,
55656 + const char *dev_name);
55657 +int gr_handle_chroot_pivot(void);
55658 +int gr_handle_chroot_unix(const pid_t pid);
55659 +
55660 +int gr_handle_rawio(const struct inode *inode);
55661 +int gr_handle_nproc(void);
55662 +
55663 +void gr_handle_ioperm(void);
55664 +void gr_handle_iopl(void);
55665 +
55666 +int gr_tpe_allow(const struct file *file);
55667 +
55668 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
55669 +void gr_clear_chroot_entries(struct task_struct *task);
55670 +
55671 +void gr_log_forkfail(const int retval);
55672 +void gr_log_timechange(void);
55673 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
55674 +void gr_log_chdir(const struct dentry *dentry,
55675 + const struct vfsmount *mnt);
55676 +void gr_log_chroot_exec(const struct dentry *dentry,
55677 + const struct vfsmount *mnt);
55678 +void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
55679 +#ifdef CONFIG_COMPAT
55680 +void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
55681 +#endif
55682 +void gr_log_remount(const char *devname, const int retval);
55683 +void gr_log_unmount(const char *devname, const int retval);
55684 +void gr_log_mount(const char *from, const char *to, const int retval);
55685 +void gr_log_textrel(struct vm_area_struct *vma);
55686 +void gr_log_rwxmmap(struct file *file);
55687 +void gr_log_rwxmprotect(struct file *file);
55688 +
55689 +int gr_handle_follow_link(const struct inode *parent,
55690 + const struct inode *inode,
55691 + const struct dentry *dentry,
55692 + const struct vfsmount *mnt);
55693 +int gr_handle_fifo(const struct dentry *dentry,
55694 + const struct vfsmount *mnt,
55695 + const struct dentry *dir, const int flag,
55696 + const int acc_mode);
55697 +int gr_handle_hardlink(const struct dentry *dentry,
55698 + const struct vfsmount *mnt,
55699 + struct inode *inode,
55700 + const int mode, const char *to);
55701 +
55702 +int gr_is_capable(const int cap);
55703 +int gr_is_capable_nolog(const int cap);
55704 +void gr_learn_resource(const struct task_struct *task, const int limit,
55705 + const unsigned long wanted, const int gt);
55706 +void gr_copy_label(struct task_struct *tsk);
55707 +void gr_handle_crash(struct task_struct *task, const int sig);
55708 +int gr_handle_signal(const struct task_struct *p, const int sig);
55709 +int gr_check_crash_uid(const uid_t uid);
55710 +int gr_check_protected_task(const struct task_struct *task);
55711 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
55712 +int gr_acl_handle_mmap(const struct file *file,
55713 + const unsigned long prot);
55714 +int gr_acl_handle_mprotect(const struct file *file,
55715 + const unsigned long prot);
55716 +int gr_check_hidden_task(const struct task_struct *tsk);
55717 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
55718 + const struct vfsmount *mnt);
55719 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
55720 + const struct vfsmount *mnt);
55721 +__u32 gr_acl_handle_access(const struct dentry *dentry,
55722 + const struct vfsmount *mnt, const int fmode);
55723 +__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
55724 + const struct vfsmount *mnt, mode_t mode);
55725 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
55726 + const struct vfsmount *mnt, mode_t mode);
55727 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
55728 + const struct vfsmount *mnt);
55729 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
55730 + const struct vfsmount *mnt);
55731 +int gr_handle_ptrace(struct task_struct *task, const long request);
55732 +int gr_handle_proc_ptrace(struct task_struct *task);
55733 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
55734 + const struct vfsmount *mnt);
55735 +int gr_check_crash_exec(const struct file *filp);
55736 +int gr_acl_is_enabled(void);
55737 +void gr_set_kernel_label(struct task_struct *task);
55738 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
55739 + const gid_t gid);
55740 +int gr_set_proc_label(const struct dentry *dentry,
55741 + const struct vfsmount *mnt,
55742 + const int unsafe_share);
55743 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
55744 + const struct vfsmount *mnt);
55745 +__u32 gr_acl_handle_open(const struct dentry *dentry,
55746 + const struct vfsmount *mnt, const int fmode);
55747 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
55748 + const struct dentry *p_dentry,
55749 + const struct vfsmount *p_mnt, const int fmode,
55750 + const int imode);
55751 +void gr_handle_create(const struct dentry *dentry,
55752 + const struct vfsmount *mnt);
55753 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
55754 + const struct dentry *parent_dentry,
55755 + const struct vfsmount *parent_mnt,
55756 + const int mode);
55757 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
55758 + const struct dentry *parent_dentry,
55759 + const struct vfsmount *parent_mnt);
55760 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
55761 + const struct vfsmount *mnt);
55762 +void gr_handle_delete(const ino_t ino, const dev_t dev);
55763 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
55764 + const struct vfsmount *mnt);
55765 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
55766 + const struct dentry *parent_dentry,
55767 + const struct vfsmount *parent_mnt,
55768 + const char *from);
55769 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
55770 + const struct dentry *parent_dentry,
55771 + const struct vfsmount *parent_mnt,
55772 + const struct dentry *old_dentry,
55773 + const struct vfsmount *old_mnt, const char *to);
55774 +int gr_acl_handle_rename(struct dentry *new_dentry,
55775 + struct dentry *parent_dentry,
55776 + const struct vfsmount *parent_mnt,
55777 + struct dentry *old_dentry,
55778 + struct inode *old_parent_inode,
55779 + struct vfsmount *old_mnt, const char *newname);
55780 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
55781 + struct dentry *old_dentry,
55782 + struct dentry *new_dentry,
55783 + struct vfsmount *mnt, const __u8 replace);
55784 +__u32 gr_check_link(const struct dentry *new_dentry,
55785 + const struct dentry *parent_dentry,
55786 + const struct vfsmount *parent_mnt,
55787 + const struct dentry *old_dentry,
55788 + const struct vfsmount *old_mnt);
55789 +int gr_acl_handle_filldir(const struct file *file, const char *name,
55790 + const unsigned int namelen, const ino_t ino);
55791 +
55792 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
55793 + const struct vfsmount *mnt);
55794 +void gr_acl_handle_exit(void);
55795 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
55796 +int gr_acl_handle_procpidmem(const struct task_struct *task);
55797 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
55798 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
55799 +void gr_audit_ptrace(struct task_struct *task);
55800 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
55801 +
55802 +#ifdef CONFIG_GRKERNSEC
55803 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
55804 +void gr_handle_vm86(void);
55805 +void gr_handle_mem_readwrite(u64 from, u64 to);
55806 +
55807 +extern int grsec_enable_dmesg;
55808 +extern int grsec_disable_privio;
55809 +#endif
55810 +
55811 +#endif
55812 diff -urNp linux-2.6.32.41/include/linux/hdpu_features.h linux-2.6.32.41/include/linux/hdpu_features.h
55813 --- linux-2.6.32.41/include/linux/hdpu_features.h 2011-03-27 14:31:47.000000000 -0400
55814 +++ linux-2.6.32.41/include/linux/hdpu_features.h 2011-04-17 15:56:46.000000000 -0400
55815 @@ -3,7 +3,7 @@
55816 struct cpustate_t {
55817 spinlock_t lock;
55818 int excl;
55819 - int open_count;
55820 + atomic_t open_count;
55821 unsigned char cached_val;
55822 int inited;
55823 unsigned long *set_addr;
55824 diff -urNp linux-2.6.32.41/include/linux/highmem.h linux-2.6.32.41/include/linux/highmem.h
55825 --- linux-2.6.32.41/include/linux/highmem.h 2011-03-27 14:31:47.000000000 -0400
55826 +++ linux-2.6.32.41/include/linux/highmem.h 2011-04-17 15:56:46.000000000 -0400
55827 @@ -137,6 +137,18 @@ static inline void clear_highpage(struct
55828 kunmap_atomic(kaddr, KM_USER0);
55829 }
55830
55831 +static inline void sanitize_highpage(struct page *page)
55832 +{
55833 + void *kaddr;
55834 + unsigned long flags;
55835 +
55836 + local_irq_save(flags);
55837 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
55838 + clear_page(kaddr);
55839 + kunmap_atomic(kaddr, KM_CLEARPAGE);
55840 + local_irq_restore(flags);
55841 +}
55842 +
55843 static inline void zero_user_segments(struct page *page,
55844 unsigned start1, unsigned end1,
55845 unsigned start2, unsigned end2)
55846 diff -urNp linux-2.6.32.41/include/linux/i2o.h linux-2.6.32.41/include/linux/i2o.h
55847 --- linux-2.6.32.41/include/linux/i2o.h 2011-03-27 14:31:47.000000000 -0400
55848 +++ linux-2.6.32.41/include/linux/i2o.h 2011-05-04 17:56:28.000000000 -0400
55849 @@ -564,7 +564,7 @@ struct i2o_controller {
55850 struct i2o_device *exec; /* Executive */
55851 #if BITS_PER_LONG == 64
55852 spinlock_t context_list_lock; /* lock for context_list */
55853 - atomic_t context_list_counter; /* needed for unique contexts */
55854 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
55855 struct list_head context_list; /* list of context id's
55856 and pointers */
55857 #endif
55858 diff -urNp linux-2.6.32.41/include/linux/init_task.h linux-2.6.32.41/include/linux/init_task.h
55859 --- linux-2.6.32.41/include/linux/init_task.h 2011-03-27 14:31:47.000000000 -0400
55860 +++ linux-2.6.32.41/include/linux/init_task.h 2011-05-18 20:44:59.000000000 -0400
55861 @@ -83,6 +83,12 @@ extern struct group_info init_groups;
55862 #define INIT_IDS
55863 #endif
55864
55865 +#ifdef CONFIG_X86
55866 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
55867 +#else
55868 +#define INIT_TASK_THREAD_INFO
55869 +#endif
55870 +
55871 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
55872 /*
55873 * Because of the reduced scope of CAP_SETPCAP when filesystem
55874 @@ -156,6 +162,7 @@ extern struct cred init_cred;
55875 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
55876 .comm = "swapper", \
55877 .thread = INIT_THREAD, \
55878 + INIT_TASK_THREAD_INFO \
55879 .fs = &init_fs, \
55880 .files = &init_files, \
55881 .signal = &init_signals, \
55882 diff -urNp linux-2.6.32.41/include/linux/interrupt.h linux-2.6.32.41/include/linux/interrupt.h
55883 --- linux-2.6.32.41/include/linux/interrupt.h 2011-03-27 14:31:47.000000000 -0400
55884 +++ linux-2.6.32.41/include/linux/interrupt.h 2011-04-17 15:56:46.000000000 -0400
55885 @@ -362,7 +362,7 @@ enum
55886 /* map softirq index to softirq name. update 'softirq_to_name' in
55887 * kernel/softirq.c when adding a new softirq.
55888 */
55889 -extern char *softirq_to_name[NR_SOFTIRQS];
55890 +extern const char * const softirq_to_name[NR_SOFTIRQS];
55891
55892 /* softirq mask and active fields moved to irq_cpustat_t in
55893 * asm/hardirq.h to get better cache usage. KAO
55894 @@ -370,12 +370,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
55895
55896 struct softirq_action
55897 {
55898 - void (*action)(struct softirq_action *);
55899 + void (*action)(void);
55900 };
55901
55902 asmlinkage void do_softirq(void);
55903 asmlinkage void __do_softirq(void);
55904 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
55905 +extern void open_softirq(int nr, void (*action)(void));
55906 extern void softirq_init(void);
55907 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
55908 extern void raise_softirq_irqoff(unsigned int nr);
55909 diff -urNp linux-2.6.32.41/include/linux/irq.h linux-2.6.32.41/include/linux/irq.h
55910 --- linux-2.6.32.41/include/linux/irq.h 2011-03-27 14:31:47.000000000 -0400
55911 +++ linux-2.6.32.41/include/linux/irq.h 2011-04-17 15:56:46.000000000 -0400
55912 @@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq,
55913 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
55914 bool boot)
55915 {
55916 +#ifdef CONFIG_CPUMASK_OFFSTACK
55917 gfp_t gfp = GFP_ATOMIC;
55918
55919 if (boot)
55920 gfp = GFP_NOWAIT;
55921
55922 -#ifdef CONFIG_CPUMASK_OFFSTACK
55923 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
55924 return false;
55925
55926 diff -urNp linux-2.6.32.41/include/linux/kallsyms.h linux-2.6.32.41/include/linux/kallsyms.h
55927 --- linux-2.6.32.41/include/linux/kallsyms.h 2011-03-27 14:31:47.000000000 -0400
55928 +++ linux-2.6.32.41/include/linux/kallsyms.h 2011-04-17 15:56:46.000000000 -0400
55929 @@ -15,7 +15,8 @@
55930
55931 struct module;
55932
55933 -#ifdef CONFIG_KALLSYMS
55934 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
55935 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55936 /* Lookup the address for a symbol. Returns 0 if not found. */
55937 unsigned long kallsyms_lookup_name(const char *name);
55938
55939 @@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(un
55940 /* Stupid that this does nothing, but I didn't create this mess. */
55941 #define __print_symbol(fmt, addr)
55942 #endif /*CONFIG_KALLSYMS*/
55943 +#else /* when included by kallsyms.c, vsnprintf.c, or
55944 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
55945 +extern void __print_symbol(const char *fmt, unsigned long address);
55946 +extern int sprint_symbol(char *buffer, unsigned long address);
55947 +const char *kallsyms_lookup(unsigned long addr,
55948 + unsigned long *symbolsize,
55949 + unsigned long *offset,
55950 + char **modname, char *namebuf);
55951 +#endif
55952
55953 /* This macro allows us to keep printk typechecking */
55954 static void __check_printsym_format(const char *fmt, ...)
55955 diff -urNp linux-2.6.32.41/include/linux/kgdb.h linux-2.6.32.41/include/linux/kgdb.h
55956 --- linux-2.6.32.41/include/linux/kgdb.h 2011-03-27 14:31:47.000000000 -0400
55957 +++ linux-2.6.32.41/include/linux/kgdb.h 2011-05-04 17:56:20.000000000 -0400
55958 @@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
55959
55960 extern int kgdb_connected;
55961
55962 -extern atomic_t kgdb_setting_breakpoint;
55963 -extern atomic_t kgdb_cpu_doing_single_step;
55964 +extern atomic_unchecked_t kgdb_setting_breakpoint;
55965 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
55966
55967 extern struct task_struct *kgdb_usethread;
55968 extern struct task_struct *kgdb_contthread;
55969 @@ -251,20 +251,20 @@ struct kgdb_arch {
55970 */
55971 struct kgdb_io {
55972 const char *name;
55973 - int (*read_char) (void);
55974 - void (*write_char) (u8);
55975 - void (*flush) (void);
55976 - int (*init) (void);
55977 - void (*pre_exception) (void);
55978 - void (*post_exception) (void);
55979 + int (* const read_char) (void);
55980 + void (* const write_char) (u8);
55981 + void (* const flush) (void);
55982 + int (* const init) (void);
55983 + void (* const pre_exception) (void);
55984 + void (* const post_exception) (void);
55985 };
55986
55987 -extern struct kgdb_arch arch_kgdb_ops;
55988 +extern const struct kgdb_arch arch_kgdb_ops;
55989
55990 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
55991
55992 -extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
55993 -extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
55994 +extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
55995 +extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
55996
55997 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
55998 extern int kgdb_mem2hex(char *mem, char *buf, int count);
55999 diff -urNp linux-2.6.32.41/include/linux/kmod.h linux-2.6.32.41/include/linux/kmod.h
56000 --- linux-2.6.32.41/include/linux/kmod.h 2011-03-27 14:31:47.000000000 -0400
56001 +++ linux-2.6.32.41/include/linux/kmod.h 2011-04-17 15:56:46.000000000 -0400
56002 @@ -31,6 +31,8 @@
56003 * usually useless though. */
56004 extern int __request_module(bool wait, const char *name, ...) \
56005 __attribute__((format(printf, 2, 3)));
56006 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
56007 + __attribute__((format(printf, 3, 4)));
56008 #define request_module(mod...) __request_module(true, mod)
56009 #define request_module_nowait(mod...) __request_module(false, mod)
56010 #define try_then_request_module(x, mod...) \
56011 diff -urNp linux-2.6.32.41/include/linux/kobject.h linux-2.6.32.41/include/linux/kobject.h
56012 --- linux-2.6.32.41/include/linux/kobject.h 2011-03-27 14:31:47.000000000 -0400
56013 +++ linux-2.6.32.41/include/linux/kobject.h 2011-04-17 15:56:46.000000000 -0400
56014 @@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kob
56015
56016 struct kobj_type {
56017 void (*release)(struct kobject *kobj);
56018 - struct sysfs_ops *sysfs_ops;
56019 + const struct sysfs_ops *sysfs_ops;
56020 struct attribute **default_attrs;
56021 };
56022
56023 @@ -118,9 +118,9 @@ struct kobj_uevent_env {
56024 };
56025
56026 struct kset_uevent_ops {
56027 - int (*filter)(struct kset *kset, struct kobject *kobj);
56028 - const char *(*name)(struct kset *kset, struct kobject *kobj);
56029 - int (*uevent)(struct kset *kset, struct kobject *kobj,
56030 + int (* const filter)(struct kset *kset, struct kobject *kobj);
56031 + const char *(* const name)(struct kset *kset, struct kobject *kobj);
56032 + int (* const uevent)(struct kset *kset, struct kobject *kobj,
56033 struct kobj_uevent_env *env);
56034 };
56035
56036 @@ -132,7 +132,7 @@ struct kobj_attribute {
56037 const char *buf, size_t count);
56038 };
56039
56040 -extern struct sysfs_ops kobj_sysfs_ops;
56041 +extern const struct sysfs_ops kobj_sysfs_ops;
56042
56043 /**
56044 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
56045 @@ -155,14 +155,14 @@ struct kset {
56046 struct list_head list;
56047 spinlock_t list_lock;
56048 struct kobject kobj;
56049 - struct kset_uevent_ops *uevent_ops;
56050 + const struct kset_uevent_ops *uevent_ops;
56051 };
56052
56053 extern void kset_init(struct kset *kset);
56054 extern int __must_check kset_register(struct kset *kset);
56055 extern void kset_unregister(struct kset *kset);
56056 extern struct kset * __must_check kset_create_and_add(const char *name,
56057 - struct kset_uevent_ops *u,
56058 + const struct kset_uevent_ops *u,
56059 struct kobject *parent_kobj);
56060
56061 static inline struct kset *to_kset(struct kobject *kobj)
56062 diff -urNp linux-2.6.32.41/include/linux/kvm_host.h linux-2.6.32.41/include/linux/kvm_host.h
56063 --- linux-2.6.32.41/include/linux/kvm_host.h 2011-03-27 14:31:47.000000000 -0400
56064 +++ linux-2.6.32.41/include/linux/kvm_host.h 2011-04-17 15:56:46.000000000 -0400
56065 @@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
56066 void vcpu_load(struct kvm_vcpu *vcpu);
56067 void vcpu_put(struct kvm_vcpu *vcpu);
56068
56069 -int kvm_init(void *opaque, unsigned int vcpu_size,
56070 +int kvm_init(const void *opaque, unsigned int vcpu_size,
56071 struct module *module);
56072 void kvm_exit(void);
56073
56074 @@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
56075 struct kvm_guest_debug *dbg);
56076 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
56077
56078 -int kvm_arch_init(void *opaque);
56079 +int kvm_arch_init(const void *opaque);
56080 void kvm_arch_exit(void);
56081
56082 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
56083 diff -urNp linux-2.6.32.41/include/linux/libata.h linux-2.6.32.41/include/linux/libata.h
56084 --- linux-2.6.32.41/include/linux/libata.h 2011-03-27 14:31:47.000000000 -0400
56085 +++ linux-2.6.32.41/include/linux/libata.h 2011-04-23 12:56:11.000000000 -0400
56086 @@ -525,11 +525,11 @@ struct ata_ioports {
56087
56088 struct ata_host {
56089 spinlock_t lock;
56090 - struct device *dev;
56091 + struct device *dev;
56092 void __iomem * const *iomap;
56093 unsigned int n_ports;
56094 void *private_data;
56095 - struct ata_port_operations *ops;
56096 + const struct ata_port_operations *ops;
56097 unsigned long flags;
56098 #ifdef CONFIG_ATA_ACPI
56099 acpi_handle acpi_handle;
56100 @@ -710,7 +710,7 @@ struct ata_link {
56101
56102 struct ata_port {
56103 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
56104 - struct ata_port_operations *ops;
56105 + const struct ata_port_operations *ops;
56106 spinlock_t *lock;
56107 /* Flags owned by the EH context. Only EH should touch these once the
56108 port is active */
56109 @@ -892,7 +892,7 @@ struct ata_port_info {
56110 unsigned long pio_mask;
56111 unsigned long mwdma_mask;
56112 unsigned long udma_mask;
56113 - struct ata_port_operations *port_ops;
56114 + const struct ata_port_operations *port_ops;
56115 void *private_data;
56116 };
56117
56118 @@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timi
56119 extern const unsigned long sata_deb_timing_hotplug[];
56120 extern const unsigned long sata_deb_timing_long[];
56121
56122 -extern struct ata_port_operations ata_dummy_port_ops;
56123 +extern const struct ata_port_operations ata_dummy_port_ops;
56124 extern const struct ata_port_info ata_dummy_port_info;
56125
56126 static inline const unsigned long *
56127 @@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_
56128 struct scsi_host_template *sht);
56129 extern void ata_host_detach(struct ata_host *host);
56130 extern void ata_host_init(struct ata_host *, struct device *,
56131 - unsigned long, struct ata_port_operations *);
56132 + unsigned long, const struct ata_port_operations *);
56133 extern int ata_scsi_detect(struct scsi_host_template *sht);
56134 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
56135 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
56136 diff -urNp linux-2.6.32.41/include/linux/lockd/bind.h linux-2.6.32.41/include/linux/lockd/bind.h
56137 --- linux-2.6.32.41/include/linux/lockd/bind.h 2011-03-27 14:31:47.000000000 -0400
56138 +++ linux-2.6.32.41/include/linux/lockd/bind.h 2011-04-17 15:56:46.000000000 -0400
56139 @@ -23,13 +23,13 @@ struct svc_rqst;
56140 * This is the set of functions for lockd->nfsd communication
56141 */
56142 struct nlmsvc_binding {
56143 - __be32 (*fopen)(struct svc_rqst *,
56144 + __be32 (* const fopen)(struct svc_rqst *,
56145 struct nfs_fh *,
56146 struct file **);
56147 - void (*fclose)(struct file *);
56148 + void (* const fclose)(struct file *);
56149 };
56150
56151 -extern struct nlmsvc_binding * nlmsvc_ops;
56152 +extern const struct nlmsvc_binding * nlmsvc_ops;
56153
56154 /*
56155 * Similar to nfs_client_initdata, but without the NFS-specific
56156 diff -urNp linux-2.6.32.41/include/linux/mm.h linux-2.6.32.41/include/linux/mm.h
56157 --- linux-2.6.32.41/include/linux/mm.h 2011-03-27 14:31:47.000000000 -0400
56158 +++ linux-2.6.32.41/include/linux/mm.h 2011-04-17 15:56:46.000000000 -0400
56159 @@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void
56160
56161 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
56162 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
56163 +
56164 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
56165 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
56166 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
56167 +#else
56168 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
56169 +#endif
56170 +
56171 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
56172 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
56173
56174 @@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
56175 int set_page_dirty_lock(struct page *page);
56176 int clear_page_dirty_for_io(struct page *page);
56177
56178 -/* Is the vma a continuation of the stack vma above it? */
56179 -static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
56180 -{
56181 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
56182 -}
56183 -
56184 extern unsigned long move_page_tables(struct vm_area_struct *vma,
56185 unsigned long old_addr, struct vm_area_struct *new_vma,
56186 unsigned long new_addr, unsigned long len);
56187 @@ -890,6 +891,8 @@ struct shrinker {
56188 extern void register_shrinker(struct shrinker *);
56189 extern void unregister_shrinker(struct shrinker *);
56190
56191 +pgprot_t vm_get_page_prot(unsigned long vm_flags);
56192 +
56193 int vma_wants_writenotify(struct vm_area_struct *vma);
56194
56195 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
56196 @@ -1162,6 +1165,7 @@ out:
56197 }
56198
56199 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
56200 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
56201
56202 extern unsigned long do_brk(unsigned long, unsigned long);
56203
56204 @@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(
56205 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
56206 struct vm_area_struct **pprev);
56207
56208 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
56209 +extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
56210 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
56211 +
56212 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
56213 NULL if none. Assume start_addr < end_addr. */
56214 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
56215 @@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(st
56216 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
56217 }
56218
56219 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
56220 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
56221 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
56222 unsigned long pfn, unsigned long size, pgprot_t);
56223 @@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long
56224 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
56225 extern int sysctl_memory_failure_early_kill;
56226 extern int sysctl_memory_failure_recovery;
56227 -extern atomic_long_t mce_bad_pages;
56228 +extern atomic_long_unchecked_t mce_bad_pages;
56229 +
56230 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
56231 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
56232 +#else
56233 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
56234 +#endif
56235
56236 #endif /* __KERNEL__ */
56237 #endif /* _LINUX_MM_H */
56238 diff -urNp linux-2.6.32.41/include/linux/mm_types.h linux-2.6.32.41/include/linux/mm_types.h
56239 --- linux-2.6.32.41/include/linux/mm_types.h 2011-03-27 14:31:47.000000000 -0400
56240 +++ linux-2.6.32.41/include/linux/mm_types.h 2011-04-17 15:56:46.000000000 -0400
56241 @@ -186,6 +186,8 @@ struct vm_area_struct {
56242 #ifdef CONFIG_NUMA
56243 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
56244 #endif
56245 +
56246 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
56247 };
56248
56249 struct core_thread {
56250 @@ -287,6 +289,24 @@ struct mm_struct {
56251 #ifdef CONFIG_MMU_NOTIFIER
56252 struct mmu_notifier_mm *mmu_notifier_mm;
56253 #endif
56254 +
56255 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
56256 + unsigned long pax_flags;
56257 +#endif
56258 +
56259 +#ifdef CONFIG_PAX_DLRESOLVE
56260 + unsigned long call_dl_resolve;
56261 +#endif
56262 +
56263 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
56264 + unsigned long call_syscall;
56265 +#endif
56266 +
56267 +#ifdef CONFIG_PAX_ASLR
56268 + unsigned long delta_mmap; /* randomized offset */
56269 + unsigned long delta_stack; /* randomized offset */
56270 +#endif
56271 +
56272 };
56273
56274 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
56275 diff -urNp linux-2.6.32.41/include/linux/mmu_notifier.h linux-2.6.32.41/include/linux/mmu_notifier.h
56276 --- linux-2.6.32.41/include/linux/mmu_notifier.h 2011-03-27 14:31:47.000000000 -0400
56277 +++ linux-2.6.32.41/include/linux/mmu_notifier.h 2011-04-17 15:56:46.000000000 -0400
56278 @@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destr
56279 */
56280 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
56281 ({ \
56282 - pte_t __pte; \
56283 + pte_t ___pte; \
56284 struct vm_area_struct *___vma = __vma; \
56285 unsigned long ___address = __address; \
56286 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
56287 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
56288 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
56289 - __pte; \
56290 + ___pte; \
56291 })
56292
56293 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
56294 diff -urNp linux-2.6.32.41/include/linux/mmzone.h linux-2.6.32.41/include/linux/mmzone.h
56295 --- linux-2.6.32.41/include/linux/mmzone.h 2011-03-27 14:31:47.000000000 -0400
56296 +++ linux-2.6.32.41/include/linux/mmzone.h 2011-04-17 15:56:46.000000000 -0400
56297 @@ -350,7 +350,7 @@ struct zone {
56298 unsigned long flags; /* zone flags, see below */
56299
56300 /* Zone statistics */
56301 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
56302 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
56303
56304 /*
56305 * prev_priority holds the scanning priority for this zone. It is
56306 diff -urNp linux-2.6.32.41/include/linux/mod_devicetable.h linux-2.6.32.41/include/linux/mod_devicetable.h
56307 --- linux-2.6.32.41/include/linux/mod_devicetable.h 2011-03-27 14:31:47.000000000 -0400
56308 +++ linux-2.6.32.41/include/linux/mod_devicetable.h 2011-04-17 15:56:46.000000000 -0400
56309 @@ -12,7 +12,7 @@
56310 typedef unsigned long kernel_ulong_t;
56311 #endif
56312
56313 -#define PCI_ANY_ID (~0)
56314 +#define PCI_ANY_ID ((__u16)~0)
56315
56316 struct pci_device_id {
56317 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
56318 @@ -131,7 +131,7 @@ struct usb_device_id {
56319 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
56320 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
56321
56322 -#define HID_ANY_ID (~0)
56323 +#define HID_ANY_ID (~0U)
56324
56325 struct hid_device_id {
56326 __u16 bus;
56327 diff -urNp linux-2.6.32.41/include/linux/module.h linux-2.6.32.41/include/linux/module.h
56328 --- linux-2.6.32.41/include/linux/module.h 2011-03-27 14:31:47.000000000 -0400
56329 +++ linux-2.6.32.41/include/linux/module.h 2011-04-17 15:56:46.000000000 -0400
56330 @@ -287,16 +287,16 @@ struct module
56331 int (*init)(void);
56332
56333 /* If this is non-NULL, vfree after init() returns */
56334 - void *module_init;
56335 + void *module_init_rx, *module_init_rw;
56336
56337 /* Here is the actual code + data, vfree'd on unload. */
56338 - void *module_core;
56339 + void *module_core_rx, *module_core_rw;
56340
56341 /* Here are the sizes of the init and core sections */
56342 - unsigned int init_size, core_size;
56343 + unsigned int init_size_rw, core_size_rw;
56344
56345 /* The size of the executable code in each section. */
56346 - unsigned int init_text_size, core_text_size;
56347 + unsigned int init_size_rx, core_size_rx;
56348
56349 /* Arch-specific module values */
56350 struct mod_arch_specific arch;
56351 @@ -393,16 +393,46 @@ struct module *__module_address(unsigned
56352 bool is_module_address(unsigned long addr);
56353 bool is_module_text_address(unsigned long addr);
56354
56355 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
56356 +{
56357 +
56358 +#ifdef CONFIG_PAX_KERNEXEC
56359 + if (ktla_ktva(addr) >= (unsigned long)start &&
56360 + ktla_ktva(addr) < (unsigned long)start + size)
56361 + return 1;
56362 +#endif
56363 +
56364 + return ((void *)addr >= start && (void *)addr < start + size);
56365 +}
56366 +
56367 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
56368 +{
56369 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
56370 +}
56371 +
56372 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
56373 +{
56374 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
56375 +}
56376 +
56377 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
56378 +{
56379 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
56380 +}
56381 +
56382 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
56383 +{
56384 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
56385 +}
56386 +
56387 static inline int within_module_core(unsigned long addr, struct module *mod)
56388 {
56389 - return (unsigned long)mod->module_core <= addr &&
56390 - addr < (unsigned long)mod->module_core + mod->core_size;
56391 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
56392 }
56393
56394 static inline int within_module_init(unsigned long addr, struct module *mod)
56395 {
56396 - return (unsigned long)mod->module_init <= addr &&
56397 - addr < (unsigned long)mod->module_init + mod->init_size;
56398 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
56399 }
56400
56401 /* Search for module by name: must hold module_mutex. */
56402 diff -urNp linux-2.6.32.41/include/linux/moduleloader.h linux-2.6.32.41/include/linux/moduleloader.h
56403 --- linux-2.6.32.41/include/linux/moduleloader.h 2011-03-27 14:31:47.000000000 -0400
56404 +++ linux-2.6.32.41/include/linux/moduleloader.h 2011-04-17 15:56:46.000000000 -0400
56405 @@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
56406 sections. Returns NULL on failure. */
56407 void *module_alloc(unsigned long size);
56408
56409 +#ifdef CONFIG_PAX_KERNEXEC
56410 +void *module_alloc_exec(unsigned long size);
56411 +#else
56412 +#define module_alloc_exec(x) module_alloc(x)
56413 +#endif
56414 +
56415 /* Free memory returned from module_alloc. */
56416 void module_free(struct module *mod, void *module_region);
56417
56418 +#ifdef CONFIG_PAX_KERNEXEC
56419 +void module_free_exec(struct module *mod, void *module_region);
56420 +#else
56421 +#define module_free_exec(x, y) module_free((x), (y))
56422 +#endif
56423 +
56424 /* Apply the given relocation to the (simplified) ELF. Return -error
56425 or 0. */
56426 int apply_relocate(Elf_Shdr *sechdrs,
56427 diff -urNp linux-2.6.32.41/include/linux/moduleparam.h linux-2.6.32.41/include/linux/moduleparam.h
56428 --- linux-2.6.32.41/include/linux/moduleparam.h 2011-03-27 14:31:47.000000000 -0400
56429 +++ linux-2.6.32.41/include/linux/moduleparam.h 2011-04-17 15:56:46.000000000 -0400
56430 @@ -132,7 +132,7 @@ struct kparam_array
56431
56432 /* Actually copy string: maxlen param is usually sizeof(string). */
56433 #define module_param_string(name, string, len, perm) \
56434 - static const struct kparam_string __param_string_##name \
56435 + static const struct kparam_string __param_string_##name __used \
56436 = { len, string }; \
56437 __module_param_call(MODULE_PARAM_PREFIX, name, \
56438 param_set_copystring, param_get_string, \
56439 @@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffe
56440
56441 /* Comma-separated array: *nump is set to number they actually specified. */
56442 #define module_param_array_named(name, array, type, nump, perm) \
56443 - static const struct kparam_array __param_arr_##name \
56444 + static const struct kparam_array __param_arr_##name __used \
56445 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
56446 sizeof(array[0]), array }; \
56447 __module_param_call(MODULE_PARAM_PREFIX, name, \
56448 diff -urNp linux-2.6.32.41/include/linux/mutex.h linux-2.6.32.41/include/linux/mutex.h
56449 --- linux-2.6.32.41/include/linux/mutex.h 2011-03-27 14:31:47.000000000 -0400
56450 +++ linux-2.6.32.41/include/linux/mutex.h 2011-04-17 15:56:46.000000000 -0400
56451 @@ -51,7 +51,7 @@ struct mutex {
56452 spinlock_t wait_lock;
56453 struct list_head wait_list;
56454 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
56455 - struct thread_info *owner;
56456 + struct task_struct *owner;
56457 #endif
56458 #ifdef CONFIG_DEBUG_MUTEXES
56459 const char *name;
56460 diff -urNp linux-2.6.32.41/include/linux/namei.h linux-2.6.32.41/include/linux/namei.h
56461 --- linux-2.6.32.41/include/linux/namei.h 2011-03-27 14:31:47.000000000 -0400
56462 +++ linux-2.6.32.41/include/linux/namei.h 2011-04-17 15:56:46.000000000 -0400
56463 @@ -22,7 +22,7 @@ struct nameidata {
56464 unsigned int flags;
56465 int last_type;
56466 unsigned depth;
56467 - char *saved_names[MAX_NESTED_LINKS + 1];
56468 + const char *saved_names[MAX_NESTED_LINKS + 1];
56469
56470 /* Intent data */
56471 union {
56472 @@ -84,12 +84,12 @@ extern int follow_up(struct path *);
56473 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
56474 extern void unlock_rename(struct dentry *, struct dentry *);
56475
56476 -static inline void nd_set_link(struct nameidata *nd, char *path)
56477 +static inline void nd_set_link(struct nameidata *nd, const char *path)
56478 {
56479 nd->saved_names[nd->depth] = path;
56480 }
56481
56482 -static inline char *nd_get_link(struct nameidata *nd)
56483 +static inline const char *nd_get_link(const struct nameidata *nd)
56484 {
56485 return nd->saved_names[nd->depth];
56486 }
56487 diff -urNp linux-2.6.32.41/include/linux/netfilter/xt_gradm.h linux-2.6.32.41/include/linux/netfilter/xt_gradm.h
56488 --- linux-2.6.32.41/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
56489 +++ linux-2.6.32.41/include/linux/netfilter/xt_gradm.h 2011-04-17 15:56:46.000000000 -0400
56490 @@ -0,0 +1,9 @@
56491 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
56492 +#define _LINUX_NETFILTER_XT_GRADM_H 1
56493 +
56494 +struct xt_gradm_mtinfo {
56495 + __u16 flags;
56496 + __u16 invflags;
56497 +};
56498 +
56499 +#endif
56500 diff -urNp linux-2.6.32.41/include/linux/nodemask.h linux-2.6.32.41/include/linux/nodemask.h
56501 --- linux-2.6.32.41/include/linux/nodemask.h 2011-03-27 14:31:47.000000000 -0400
56502 +++ linux-2.6.32.41/include/linux/nodemask.h 2011-04-17 15:56:46.000000000 -0400
56503 @@ -464,11 +464,11 @@ static inline int num_node_state(enum no
56504
56505 #define any_online_node(mask) \
56506 ({ \
56507 - int node; \
56508 - for_each_node_mask(node, (mask)) \
56509 - if (node_online(node)) \
56510 + int __node; \
56511 + for_each_node_mask(__node, (mask)) \
56512 + if (node_online(__node)) \
56513 break; \
56514 - node; \
56515 + __node; \
56516 })
56517
56518 #define num_online_nodes() num_node_state(N_ONLINE)
56519 diff -urNp linux-2.6.32.41/include/linux/oprofile.h linux-2.6.32.41/include/linux/oprofile.h
56520 --- linux-2.6.32.41/include/linux/oprofile.h 2011-03-27 14:31:47.000000000 -0400
56521 +++ linux-2.6.32.41/include/linux/oprofile.h 2011-04-17 15:56:46.000000000 -0400
56522 @@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super
56523 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
56524 char const * name, ulong * val);
56525
56526 -/** Create a file for read-only access to an atomic_t. */
56527 +/** Create a file for read-only access to an atomic_unchecked_t. */
56528 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
56529 - char const * name, atomic_t * val);
56530 + char const * name, atomic_unchecked_t * val);
56531
56532 /** create a directory */
56533 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
56534 diff -urNp linux-2.6.32.41/include/linux/perf_event.h linux-2.6.32.41/include/linux/perf_event.h
56535 --- linux-2.6.32.41/include/linux/perf_event.h 2011-03-27 14:31:47.000000000 -0400
56536 +++ linux-2.6.32.41/include/linux/perf_event.h 2011-05-04 17:56:28.000000000 -0400
56537 @@ -476,7 +476,7 @@ struct hw_perf_event {
56538 struct hrtimer hrtimer;
56539 };
56540 };
56541 - atomic64_t prev_count;
56542 + atomic64_unchecked_t prev_count;
56543 u64 sample_period;
56544 u64 last_period;
56545 atomic64_t period_left;
56546 @@ -557,7 +557,7 @@ struct perf_event {
56547 const struct pmu *pmu;
56548
56549 enum perf_event_active_state state;
56550 - atomic64_t count;
56551 + atomic64_unchecked_t count;
56552
56553 /*
56554 * These are the total time in nanoseconds that the event
56555 @@ -595,8 +595,8 @@ struct perf_event {
56556 * These accumulate total time (in nanoseconds) that children
56557 * events have been enabled and running, respectively.
56558 */
56559 - atomic64_t child_total_time_enabled;
56560 - atomic64_t child_total_time_running;
56561 + atomic64_unchecked_t child_total_time_enabled;
56562 + atomic64_unchecked_t child_total_time_running;
56563
56564 /*
56565 * Protect attach/detach and child_list:
56566 diff -urNp linux-2.6.32.41/include/linux/pipe_fs_i.h linux-2.6.32.41/include/linux/pipe_fs_i.h
56567 --- linux-2.6.32.41/include/linux/pipe_fs_i.h 2011-03-27 14:31:47.000000000 -0400
56568 +++ linux-2.6.32.41/include/linux/pipe_fs_i.h 2011-04-17 15:56:46.000000000 -0400
56569 @@ -46,9 +46,9 @@ struct pipe_inode_info {
56570 wait_queue_head_t wait;
56571 unsigned int nrbufs, curbuf;
56572 struct page *tmp_page;
56573 - unsigned int readers;
56574 - unsigned int writers;
56575 - unsigned int waiting_writers;
56576 + atomic_t readers;
56577 + atomic_t writers;
56578 + atomic_t waiting_writers;
56579 unsigned int r_counter;
56580 unsigned int w_counter;
56581 struct fasync_struct *fasync_readers;
56582 diff -urNp linux-2.6.32.41/include/linux/poison.h linux-2.6.32.41/include/linux/poison.h
56583 --- linux-2.6.32.41/include/linux/poison.h 2011-03-27 14:31:47.000000000 -0400
56584 +++ linux-2.6.32.41/include/linux/poison.h 2011-04-17 15:56:46.000000000 -0400
56585 @@ -19,8 +19,8 @@
56586 * under normal circumstances, used to verify that nobody uses
56587 * non-initialized list entries.
56588 */
56589 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
56590 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
56591 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
56592 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
56593
56594 /********** include/linux/timer.h **********/
56595 /*
56596 diff -urNp linux-2.6.32.41/include/linux/proc_fs.h linux-2.6.32.41/include/linux/proc_fs.h
56597 --- linux-2.6.32.41/include/linux/proc_fs.h 2011-03-27 14:31:47.000000000 -0400
56598 +++ linux-2.6.32.41/include/linux/proc_fs.h 2011-04-17 15:56:46.000000000 -0400
56599 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
56600 return proc_create_data(name, mode, parent, proc_fops, NULL);
56601 }
56602
56603 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
56604 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
56605 +{
56606 +#ifdef CONFIG_GRKERNSEC_PROC_USER
56607 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
56608 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
56609 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
56610 +#else
56611 + return proc_create_data(name, mode, parent, proc_fops, NULL);
56612 +#endif
56613 +}
56614 +
56615 +
56616 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
56617 mode_t mode, struct proc_dir_entry *base,
56618 read_proc_t *read_proc, void * data)
56619 diff -urNp linux-2.6.32.41/include/linux/ptrace.h linux-2.6.32.41/include/linux/ptrace.h
56620 --- linux-2.6.32.41/include/linux/ptrace.h 2011-03-27 14:31:47.000000000 -0400
56621 +++ linux-2.6.32.41/include/linux/ptrace.h 2011-04-17 15:56:46.000000000 -0400
56622 @@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_
56623 extern void exit_ptrace(struct task_struct *tracer);
56624 #define PTRACE_MODE_READ 1
56625 #define PTRACE_MODE_ATTACH 2
56626 -/* Returns 0 on success, -errno on denial. */
56627 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
56628 /* Returns true on success, false on denial. */
56629 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
56630 +/* Returns true on success, false on denial. */
56631 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
56632
56633 static inline int ptrace_reparented(struct task_struct *child)
56634 {
56635 diff -urNp linux-2.6.32.41/include/linux/random.h linux-2.6.32.41/include/linux/random.h
56636 --- linux-2.6.32.41/include/linux/random.h 2011-03-27 14:31:47.000000000 -0400
56637 +++ linux-2.6.32.41/include/linux/random.h 2011-04-17 15:56:46.000000000 -0400
56638 @@ -74,6 +74,11 @@ unsigned long randomize_range(unsigned l
56639 u32 random32(void);
56640 void srandom32(u32 seed);
56641
56642 +static inline unsigned long pax_get_random_long(void)
56643 +{
56644 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
56645 +}
56646 +
56647 #endif /* __KERNEL___ */
56648
56649 #endif /* _LINUX_RANDOM_H */
56650 diff -urNp linux-2.6.32.41/include/linux/reboot.h linux-2.6.32.41/include/linux/reboot.h
56651 --- linux-2.6.32.41/include/linux/reboot.h 2011-03-27 14:31:47.000000000 -0400
56652 +++ linux-2.6.32.41/include/linux/reboot.h 2011-05-22 23:02:06.000000000 -0400
56653 @@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
56654 * Architecture-specific implementations of sys_reboot commands.
56655 */
56656
56657 -extern void machine_restart(char *cmd);
56658 -extern void machine_halt(void);
56659 -extern void machine_power_off(void);
56660 +extern void machine_restart(char *cmd) __noreturn;
56661 +extern void machine_halt(void) __noreturn;
56662 +extern void machine_power_off(void) __noreturn;
56663
56664 extern void machine_shutdown(void);
56665 struct pt_regs;
56666 @@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
56667 */
56668
56669 extern void kernel_restart_prepare(char *cmd);
56670 -extern void kernel_restart(char *cmd);
56671 -extern void kernel_halt(void);
56672 -extern void kernel_power_off(void);
56673 +extern void kernel_restart(char *cmd) __noreturn;
56674 +extern void kernel_halt(void) __noreturn;
56675 +extern void kernel_power_off(void) __noreturn;
56676
56677 void ctrl_alt_del(void);
56678
56679 @@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
56680 * Emergency restart, callable from an interrupt handler.
56681 */
56682
56683 -extern void emergency_restart(void);
56684 +extern void emergency_restart(void) __noreturn;
56685 #include <asm/emergency-restart.h>
56686
56687 #endif
56688 diff -urNp linux-2.6.32.41/include/linux/reiserfs_fs.h linux-2.6.32.41/include/linux/reiserfs_fs.h
56689 --- linux-2.6.32.41/include/linux/reiserfs_fs.h 2011-03-27 14:31:47.000000000 -0400
56690 +++ linux-2.6.32.41/include/linux/reiserfs_fs.h 2011-04-17 15:56:46.000000000 -0400
56691 @@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset
56692 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
56693
56694 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
56695 -#define get_generation(s) atomic_read (&fs_generation(s))
56696 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
56697 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
56698 #define __fs_changed(gen,s) (gen != get_generation (s))
56699 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
56700 @@ -1534,24 +1534,24 @@ static inline struct super_block *sb_fro
56701 */
56702
56703 struct item_operations {
56704 - int (*bytes_number) (struct item_head * ih, int block_size);
56705 - void (*decrement_key) (struct cpu_key *);
56706 - int (*is_left_mergeable) (struct reiserfs_key * ih,
56707 + int (* const bytes_number) (struct item_head * ih, int block_size);
56708 + void (* const decrement_key) (struct cpu_key *);
56709 + int (* const is_left_mergeable) (struct reiserfs_key * ih,
56710 unsigned long bsize);
56711 - void (*print_item) (struct item_head *, char *item);
56712 - void (*check_item) (struct item_head *, char *item);
56713 + void (* const print_item) (struct item_head *, char *item);
56714 + void (* const check_item) (struct item_head *, char *item);
56715
56716 - int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
56717 + int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
56718 int is_affected, int insert_size);
56719 - int (*check_left) (struct virtual_item * vi, int free,
56720 + int (* const check_left) (struct virtual_item * vi, int free,
56721 int start_skip, int end_skip);
56722 - int (*check_right) (struct virtual_item * vi, int free);
56723 - int (*part_size) (struct virtual_item * vi, int from, int to);
56724 - int (*unit_num) (struct virtual_item * vi);
56725 - void (*print_vi) (struct virtual_item * vi);
56726 + int (* const check_right) (struct virtual_item * vi, int free);
56727 + int (* const part_size) (struct virtual_item * vi, int from, int to);
56728 + int (* const unit_num) (struct virtual_item * vi);
56729 + void (* const print_vi) (struct virtual_item * vi);
56730 };
56731
56732 -extern struct item_operations *item_ops[TYPE_ANY + 1];
56733 +extern const struct item_operations * const item_ops[TYPE_ANY + 1];
56734
56735 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
56736 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
56737 diff -urNp linux-2.6.32.41/include/linux/reiserfs_fs_sb.h linux-2.6.32.41/include/linux/reiserfs_fs_sb.h
56738 --- linux-2.6.32.41/include/linux/reiserfs_fs_sb.h 2011-03-27 14:31:47.000000000 -0400
56739 +++ linux-2.6.32.41/include/linux/reiserfs_fs_sb.h 2011-04-17 15:56:46.000000000 -0400
56740 @@ -377,7 +377,7 @@ struct reiserfs_sb_info {
56741 /* Comment? -Hans */
56742 wait_queue_head_t s_wait;
56743 /* To be obsoleted soon by per buffer seals.. -Hans */
56744 - atomic_t s_generation_counter; // increased by one every time the
56745 + atomic_unchecked_t s_generation_counter; // increased by one every time the
56746 // tree gets re-balanced
56747 unsigned long s_properties; /* File system properties. Currently holds
56748 on-disk FS format */
56749 diff -urNp linux-2.6.32.41/include/linux/sched.h linux-2.6.32.41/include/linux/sched.h
56750 --- linux-2.6.32.41/include/linux/sched.h 2011-03-27 14:31:47.000000000 -0400
56751 +++ linux-2.6.32.41/include/linux/sched.h 2011-06-04 20:42:54.000000000 -0400
56752 @@ -101,6 +101,7 @@ struct bio;
56753 struct fs_struct;
56754 struct bts_context;
56755 struct perf_event_context;
56756 +struct linux_binprm;
56757
56758 /*
56759 * List of flags we want to share for kernel threads,
56760 @@ -350,7 +351,7 @@ extern signed long schedule_timeout_kill
56761 extern signed long schedule_timeout_uninterruptible(signed long timeout);
56762 asmlinkage void __schedule(void);
56763 asmlinkage void schedule(void);
56764 -extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
56765 +extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
56766
56767 struct nsproxy;
56768 struct user_namespace;
56769 @@ -371,9 +372,12 @@ struct user_namespace;
56770 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
56771
56772 extern int sysctl_max_map_count;
56773 +extern unsigned long sysctl_heap_stack_gap;
56774
56775 #include <linux/aio.h>
56776
56777 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
56778 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
56779 extern unsigned long
56780 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
56781 unsigned long, unsigned long);
56782 @@ -666,6 +670,16 @@ struct signal_struct {
56783 struct tty_audit_buf *tty_audit_buf;
56784 #endif
56785
56786 +#ifdef CONFIG_GRKERNSEC
56787 + u32 curr_ip;
56788 + u32 saved_ip;
56789 + u32 gr_saddr;
56790 + u32 gr_daddr;
56791 + u16 gr_sport;
56792 + u16 gr_dport;
56793 + u8 used_accept:1;
56794 +#endif
56795 +
56796 int oom_adj; /* OOM kill score adjustment (bit shift) */
56797 };
56798
56799 @@ -723,6 +737,11 @@ struct user_struct {
56800 struct key *session_keyring; /* UID's default session keyring */
56801 #endif
56802
56803 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
56804 + unsigned int banned;
56805 + unsigned long ban_expires;
56806 +#endif
56807 +
56808 /* Hash table maintenance information */
56809 struct hlist_node uidhash_node;
56810 uid_t uid;
56811 @@ -1328,8 +1347,8 @@ struct task_struct {
56812 struct list_head thread_group;
56813
56814 struct completion *vfork_done; /* for vfork() */
56815 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
56816 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
56817 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
56818 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
56819
56820 cputime_t utime, stime, utimescaled, stimescaled;
56821 cputime_t gtime;
56822 @@ -1343,16 +1362,6 @@ struct task_struct {
56823 struct task_cputime cputime_expires;
56824 struct list_head cpu_timers[3];
56825
56826 -/* process credentials */
56827 - const struct cred *real_cred; /* objective and real subjective task
56828 - * credentials (COW) */
56829 - const struct cred *cred; /* effective (overridable) subjective task
56830 - * credentials (COW) */
56831 - struct mutex cred_guard_mutex; /* guard against foreign influences on
56832 - * credential calculations
56833 - * (notably. ptrace) */
56834 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
56835 -
56836 char comm[TASK_COMM_LEN]; /* executable name excluding path
56837 - access with [gs]et_task_comm (which lock
56838 it with task_lock())
56839 @@ -1369,6 +1378,10 @@ struct task_struct {
56840 #endif
56841 /* CPU-specific state of this task */
56842 struct thread_struct thread;
56843 +/* thread_info moved to task_struct */
56844 +#ifdef CONFIG_X86
56845 + struct thread_info tinfo;
56846 +#endif
56847 /* filesystem information */
56848 struct fs_struct *fs;
56849 /* open file information */
56850 @@ -1436,6 +1449,15 @@ struct task_struct {
56851 int hardirq_context;
56852 int softirq_context;
56853 #endif
56854 +
56855 +/* process credentials */
56856 + const struct cred *real_cred; /* objective and real subjective task
56857 + * credentials (COW) */
56858 + struct mutex cred_guard_mutex; /* guard against foreign influences on
56859 + * credential calculations
56860 + * (notably. ptrace) */
56861 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
56862 +
56863 #ifdef CONFIG_LOCKDEP
56864 # define MAX_LOCK_DEPTH 48UL
56865 u64 curr_chain_key;
56866 @@ -1456,6 +1478,9 @@ struct task_struct {
56867
56868 struct backing_dev_info *backing_dev_info;
56869
56870 + const struct cred *cred; /* effective (overridable) subjective task
56871 + * credentials (COW) */
56872 +
56873 struct io_context *io_context;
56874
56875 unsigned long ptrace_message;
56876 @@ -1519,6 +1544,21 @@ struct task_struct {
56877 unsigned long default_timer_slack_ns;
56878
56879 struct list_head *scm_work_list;
56880 +
56881 +#ifdef CONFIG_GRKERNSEC
56882 + /* grsecurity */
56883 + struct dentry *gr_chroot_dentry;
56884 + struct acl_subject_label *acl;
56885 + struct acl_role_label *role;
56886 + struct file *exec_file;
56887 + u16 acl_role_id;
56888 + /* is this the task that authenticated to the special role */
56889 + u8 acl_sp_role;
56890 + u8 is_writable;
56891 + u8 brute;
56892 + u8 gr_is_chrooted;
56893 +#endif
56894 +
56895 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
56896 /* Index of current stored adress in ret_stack */
56897 int curr_ret_stack;
56898 @@ -1542,6 +1582,57 @@ struct task_struct {
56899 #endif /* CONFIG_TRACING */
56900 };
56901
56902 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
56903 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
56904 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
56905 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
56906 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
56907 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
56908 +
56909 +#ifdef CONFIG_PAX_SOFTMODE
56910 +extern unsigned int pax_softmode;
56911 +#endif
56912 +
56913 +extern int pax_check_flags(unsigned long *);
56914 +
56915 +/* if tsk != current then task_lock must be held on it */
56916 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
56917 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
56918 +{
56919 + if (likely(tsk->mm))
56920 + return tsk->mm->pax_flags;
56921 + else
56922 + return 0UL;
56923 +}
56924 +
56925 +/* if tsk != current then task_lock must be held on it */
56926 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
56927 +{
56928 + if (likely(tsk->mm)) {
56929 + tsk->mm->pax_flags = flags;
56930 + return 0;
56931 + }
56932 + return -EINVAL;
56933 +}
56934 +#endif
56935 +
56936 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
56937 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
56938 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
56939 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
56940 +#endif
56941 +
56942 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
56943 +void pax_report_insns(void *pc, void *sp);
56944 +void pax_report_refcount_overflow(struct pt_regs *regs);
56945 +void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type);
56946 +
56947 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
56948 +extern void pax_track_stack(void);
56949 +#else
56950 +static inline void pax_track_stack(void) {}
56951 +#endif
56952 +
56953 /* Future-safe accessor for struct task_struct's cpus_allowed. */
56954 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
56955
56956 @@ -1978,7 +2069,9 @@ void yield(void);
56957 extern struct exec_domain default_exec_domain;
56958
56959 union thread_union {
56960 +#ifndef CONFIG_X86
56961 struct thread_info thread_info;
56962 +#endif
56963 unsigned long stack[THREAD_SIZE/sizeof(long)];
56964 };
56965
56966 @@ -2155,7 +2248,7 @@ extern void __cleanup_sighand(struct sig
56967 extern void exit_itimers(struct signal_struct *);
56968 extern void flush_itimer_signals(void);
56969
56970 -extern NORET_TYPE void do_group_exit(int);
56971 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
56972
56973 extern void daemonize(const char *, ...);
56974 extern int allow_signal(int);
56975 @@ -2284,13 +2377,17 @@ static inline unsigned long *end_of_stac
56976
56977 #endif
56978
56979 -static inline int object_is_on_stack(void *obj)
56980 +static inline int object_starts_on_stack(void *obj)
56981 {
56982 - void *stack = task_stack_page(current);
56983 + const void *stack = task_stack_page(current);
56984
56985 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
56986 }
56987
56988 +#ifdef CONFIG_PAX_USERCOPY
56989 +extern int object_is_on_stack(const void *obj, unsigned long len);
56990 +#endif
56991 +
56992 extern void thread_info_cache_init(void);
56993
56994 #ifdef CONFIG_DEBUG_STACK_USAGE
56995 diff -urNp linux-2.6.32.41/include/linux/screen_info.h linux-2.6.32.41/include/linux/screen_info.h
56996 --- linux-2.6.32.41/include/linux/screen_info.h 2011-03-27 14:31:47.000000000 -0400
56997 +++ linux-2.6.32.41/include/linux/screen_info.h 2011-04-17 15:56:46.000000000 -0400
56998 @@ -42,7 +42,8 @@ struct screen_info {
56999 __u16 pages; /* 0x32 */
57000 __u16 vesa_attributes; /* 0x34 */
57001 __u32 capabilities; /* 0x36 */
57002 - __u8 _reserved[6]; /* 0x3a */
57003 + __u16 vesapm_size; /* 0x3a */
57004 + __u8 _reserved[4]; /* 0x3c */
57005 } __attribute__((packed));
57006
57007 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
57008 diff -urNp linux-2.6.32.41/include/linux/security.h linux-2.6.32.41/include/linux/security.h
57009 --- linux-2.6.32.41/include/linux/security.h 2011-03-27 14:31:47.000000000 -0400
57010 +++ linux-2.6.32.41/include/linux/security.h 2011-04-17 15:56:46.000000000 -0400
57011 @@ -34,6 +34,7 @@
57012 #include <linux/key.h>
57013 #include <linux/xfrm.h>
57014 #include <linux/gfp.h>
57015 +#include <linux/grsecurity.h>
57016 #include <net/flow.h>
57017
57018 /* Maximum number of letters for an LSM name string */
57019 diff -urNp linux-2.6.32.41/include/linux/shm.h linux-2.6.32.41/include/linux/shm.h
57020 --- linux-2.6.32.41/include/linux/shm.h 2011-03-27 14:31:47.000000000 -0400
57021 +++ linux-2.6.32.41/include/linux/shm.h 2011-04-17 15:56:46.000000000 -0400
57022 @@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
57023 pid_t shm_cprid;
57024 pid_t shm_lprid;
57025 struct user_struct *mlock_user;
57026 +#ifdef CONFIG_GRKERNSEC
57027 + time_t shm_createtime;
57028 + pid_t shm_lapid;
57029 +#endif
57030 };
57031
57032 /* shm_mode upper byte flags */
57033 diff -urNp linux-2.6.32.41/include/linux/skbuff.h linux-2.6.32.41/include/linux/skbuff.h
57034 --- linux-2.6.32.41/include/linux/skbuff.h 2011-03-27 14:31:47.000000000 -0400
57035 +++ linux-2.6.32.41/include/linux/skbuff.h 2011-05-04 17:56:20.000000000 -0400
57036 @@ -544,7 +544,7 @@ static inline union skb_shared_tx *skb_t
57037 */
57038 static inline int skb_queue_empty(const struct sk_buff_head *list)
57039 {
57040 - return list->next == (struct sk_buff *)list;
57041 + return list->next == (const struct sk_buff *)list;
57042 }
57043
57044 /**
57045 @@ -557,7 +557,7 @@ static inline int skb_queue_empty(const
57046 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
57047 const struct sk_buff *skb)
57048 {
57049 - return (skb->next == (struct sk_buff *) list);
57050 + return (skb->next == (const struct sk_buff *) list);
57051 }
57052
57053 /**
57054 @@ -570,7 +570,7 @@ static inline bool skb_queue_is_last(con
57055 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
57056 const struct sk_buff *skb)
57057 {
57058 - return (skb->prev == (struct sk_buff *) list);
57059 + return (skb->prev == (const struct sk_buff *) list);
57060 }
57061
57062 /**
57063 @@ -1367,7 +1367,7 @@ static inline int skb_network_offset(con
57064 * headroom, you should not reduce this.
57065 */
57066 #ifndef NET_SKB_PAD
57067 -#define NET_SKB_PAD 32
57068 +#define NET_SKB_PAD (_AC(32,U))
57069 #endif
57070
57071 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
57072 diff -urNp linux-2.6.32.41/include/linux/slab_def.h linux-2.6.32.41/include/linux/slab_def.h
57073 --- linux-2.6.32.41/include/linux/slab_def.h 2011-03-27 14:31:47.000000000 -0400
57074 +++ linux-2.6.32.41/include/linux/slab_def.h 2011-05-04 17:56:28.000000000 -0400
57075 @@ -69,10 +69,10 @@ struct kmem_cache {
57076 unsigned long node_allocs;
57077 unsigned long node_frees;
57078 unsigned long node_overflow;
57079 - atomic_t allochit;
57080 - atomic_t allocmiss;
57081 - atomic_t freehit;
57082 - atomic_t freemiss;
57083 + atomic_unchecked_t allochit;
57084 + atomic_unchecked_t allocmiss;
57085 + atomic_unchecked_t freehit;
57086 + atomic_unchecked_t freemiss;
57087
57088 /*
57089 * If debugging is enabled, then the allocator can add additional
57090 diff -urNp linux-2.6.32.41/include/linux/slab.h linux-2.6.32.41/include/linux/slab.h
57091 --- linux-2.6.32.41/include/linux/slab.h 2011-03-27 14:31:47.000000000 -0400
57092 +++ linux-2.6.32.41/include/linux/slab.h 2011-04-17 15:56:46.000000000 -0400
57093 @@ -11,12 +11,20 @@
57094
57095 #include <linux/gfp.h>
57096 #include <linux/types.h>
57097 +#include <linux/err.h>
57098
57099 /*
57100 * Flags to pass to kmem_cache_create().
57101 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
57102 */
57103 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
57104 +
57105 +#ifdef CONFIG_PAX_USERCOPY
57106 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
57107 +#else
57108 +#define SLAB_USERCOPY 0x00000000UL
57109 +#endif
57110 +
57111 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
57112 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
57113 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
57114 @@ -82,10 +90,13 @@
57115 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
57116 * Both make kfree a no-op.
57117 */
57118 -#define ZERO_SIZE_PTR ((void *)16)
57119 +#define ZERO_SIZE_PTR \
57120 +({ \
57121 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
57122 + (void *)(-MAX_ERRNO-1L); \
57123 +})
57124
57125 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
57126 - (unsigned long)ZERO_SIZE_PTR)
57127 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
57128
57129 /*
57130 * struct kmem_cache related prototypes
57131 @@ -138,6 +149,7 @@ void * __must_check krealloc(const void
57132 void kfree(const void *);
57133 void kzfree(const void *);
57134 size_t ksize(const void *);
57135 +void check_object_size(const void *ptr, unsigned long n, bool to);
57136
57137 /*
57138 * Allocator specific definitions. These are mainly used to establish optimized
57139 @@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t
57140
57141 void __init kmem_cache_init_late(void);
57142
57143 +#define kmalloc(x, y) \
57144 +({ \
57145 + void *___retval; \
57146 + intoverflow_t ___x = (intoverflow_t)x; \
57147 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
57148 + ___retval = NULL; \
57149 + else \
57150 + ___retval = kmalloc((size_t)___x, (y)); \
57151 + ___retval; \
57152 +})
57153 +
57154 +#define kmalloc_node(x, y, z) \
57155 +({ \
57156 + void *___retval; \
57157 + intoverflow_t ___x = (intoverflow_t)x; \
57158 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
57159 + ___retval = NULL; \
57160 + else \
57161 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
57162 + ___retval; \
57163 +})
57164 +
57165 +#define kzalloc(x, y) \
57166 +({ \
57167 + void *___retval; \
57168 + intoverflow_t ___x = (intoverflow_t)x; \
57169 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
57170 + ___retval = NULL; \
57171 + else \
57172 + ___retval = kzalloc((size_t)___x, (y)); \
57173 + ___retval; \
57174 +})
57175 +
57176 #endif /* _LINUX_SLAB_H */
57177 diff -urNp linux-2.6.32.41/include/linux/slub_def.h linux-2.6.32.41/include/linux/slub_def.h
57178 --- linux-2.6.32.41/include/linux/slub_def.h 2011-03-27 14:31:47.000000000 -0400
57179 +++ linux-2.6.32.41/include/linux/slub_def.h 2011-04-17 15:56:46.000000000 -0400
57180 @@ -86,7 +86,7 @@ struct kmem_cache {
57181 struct kmem_cache_order_objects max;
57182 struct kmem_cache_order_objects min;
57183 gfp_t allocflags; /* gfp flags to use on each alloc */
57184 - int refcount; /* Refcount for slab cache destroy */
57185 + atomic_t refcount; /* Refcount for slab cache destroy */
57186 void (*ctor)(void *);
57187 int inuse; /* Offset to metadata */
57188 int align; /* Alignment */
57189 diff -urNp linux-2.6.32.41/include/linux/sonet.h linux-2.6.32.41/include/linux/sonet.h
57190 --- linux-2.6.32.41/include/linux/sonet.h 2011-03-27 14:31:47.000000000 -0400
57191 +++ linux-2.6.32.41/include/linux/sonet.h 2011-04-17 15:56:46.000000000 -0400
57192 @@ -61,7 +61,7 @@ struct sonet_stats {
57193 #include <asm/atomic.h>
57194
57195 struct k_sonet_stats {
57196 -#define __HANDLE_ITEM(i) atomic_t i
57197 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
57198 __SONET_ITEMS
57199 #undef __HANDLE_ITEM
57200 };
57201 diff -urNp linux-2.6.32.41/include/linux/sunrpc/clnt.h linux-2.6.32.41/include/linux/sunrpc/clnt.h
57202 --- linux-2.6.32.41/include/linux/sunrpc/clnt.h 2011-03-27 14:31:47.000000000 -0400
57203 +++ linux-2.6.32.41/include/linux/sunrpc/clnt.h 2011-04-17 15:56:46.000000000 -0400
57204 @@ -167,9 +167,9 @@ static inline unsigned short rpc_get_por
57205 {
57206 switch (sap->sa_family) {
57207 case AF_INET:
57208 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
57209 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
57210 case AF_INET6:
57211 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
57212 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
57213 }
57214 return 0;
57215 }
57216 @@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const
57217 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
57218 const struct sockaddr *src)
57219 {
57220 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
57221 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
57222 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
57223
57224 dsin->sin_family = ssin->sin_family;
57225 @@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const
57226 if (sa->sa_family != AF_INET6)
57227 return 0;
57228
57229 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
57230 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
57231 }
57232
57233 #endif /* __KERNEL__ */
57234 diff -urNp linux-2.6.32.41/include/linux/sunrpc/svc_rdma.h linux-2.6.32.41/include/linux/sunrpc/svc_rdma.h
57235 --- linux-2.6.32.41/include/linux/sunrpc/svc_rdma.h 2011-03-27 14:31:47.000000000 -0400
57236 +++ linux-2.6.32.41/include/linux/sunrpc/svc_rdma.h 2011-05-04 17:56:28.000000000 -0400
57237 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
57238 extern unsigned int svcrdma_max_requests;
57239 extern unsigned int svcrdma_max_req_size;
57240
57241 -extern atomic_t rdma_stat_recv;
57242 -extern atomic_t rdma_stat_read;
57243 -extern atomic_t rdma_stat_write;
57244 -extern atomic_t rdma_stat_sq_starve;
57245 -extern atomic_t rdma_stat_rq_starve;
57246 -extern atomic_t rdma_stat_rq_poll;
57247 -extern atomic_t rdma_stat_rq_prod;
57248 -extern atomic_t rdma_stat_sq_poll;
57249 -extern atomic_t rdma_stat_sq_prod;
57250 +extern atomic_unchecked_t rdma_stat_recv;
57251 +extern atomic_unchecked_t rdma_stat_read;
57252 +extern atomic_unchecked_t rdma_stat_write;
57253 +extern atomic_unchecked_t rdma_stat_sq_starve;
57254 +extern atomic_unchecked_t rdma_stat_rq_starve;
57255 +extern atomic_unchecked_t rdma_stat_rq_poll;
57256 +extern atomic_unchecked_t rdma_stat_rq_prod;
57257 +extern atomic_unchecked_t rdma_stat_sq_poll;
57258 +extern atomic_unchecked_t rdma_stat_sq_prod;
57259
57260 #define RPCRDMA_VERSION 1
57261
57262 diff -urNp linux-2.6.32.41/include/linux/suspend.h linux-2.6.32.41/include/linux/suspend.h
57263 --- linux-2.6.32.41/include/linux/suspend.h 2011-03-27 14:31:47.000000000 -0400
57264 +++ linux-2.6.32.41/include/linux/suspend.h 2011-04-17 15:56:46.000000000 -0400
57265 @@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
57266 * which require special recovery actions in that situation.
57267 */
57268 struct platform_suspend_ops {
57269 - int (*valid)(suspend_state_t state);
57270 - int (*begin)(suspend_state_t state);
57271 - int (*prepare)(void);
57272 - int (*prepare_late)(void);
57273 - int (*enter)(suspend_state_t state);
57274 - void (*wake)(void);
57275 - void (*finish)(void);
57276 - void (*end)(void);
57277 - void (*recover)(void);
57278 + int (* const valid)(suspend_state_t state);
57279 + int (* const begin)(suspend_state_t state);
57280 + int (* const prepare)(void);
57281 + int (* const prepare_late)(void);
57282 + int (* const enter)(suspend_state_t state);
57283 + void (* const wake)(void);
57284 + void (* const finish)(void);
57285 + void (* const end)(void);
57286 + void (* const recover)(void);
57287 };
57288
57289 #ifdef CONFIG_SUSPEND
57290 @@ -120,7 +120,7 @@ struct platform_suspend_ops {
57291 * suspend_set_ops - set platform dependent suspend operations
57292 * @ops: The new suspend operations to set.
57293 */
57294 -extern void suspend_set_ops(struct platform_suspend_ops *ops);
57295 +extern void suspend_set_ops(const struct platform_suspend_ops *ops);
57296 extern int suspend_valid_only_mem(suspend_state_t state);
57297
57298 /**
57299 @@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t st
57300 #else /* !CONFIG_SUSPEND */
57301 #define suspend_valid_only_mem NULL
57302
57303 -static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
57304 +static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
57305 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
57306 #endif /* !CONFIG_SUSPEND */
57307
57308 @@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone
57309 * platforms which require special recovery actions in that situation.
57310 */
57311 struct platform_hibernation_ops {
57312 - int (*begin)(void);
57313 - void (*end)(void);
57314 - int (*pre_snapshot)(void);
57315 - void (*finish)(void);
57316 - int (*prepare)(void);
57317 - int (*enter)(void);
57318 - void (*leave)(void);
57319 - int (*pre_restore)(void);
57320 - void (*restore_cleanup)(void);
57321 - void (*recover)(void);
57322 + int (* const begin)(void);
57323 + void (* const end)(void);
57324 + int (* const pre_snapshot)(void);
57325 + void (* const finish)(void);
57326 + int (* const prepare)(void);
57327 + int (* const enter)(void);
57328 + void (* const leave)(void);
57329 + int (* const pre_restore)(void);
57330 + void (* const restore_cleanup)(void);
57331 + void (* const recover)(void);
57332 };
57333
57334 #ifdef CONFIG_HIBERNATION
57335 @@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct
57336 extern void swsusp_unset_page_free(struct page *);
57337 extern unsigned long get_safe_page(gfp_t gfp_mask);
57338
57339 -extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
57340 +extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
57341 extern int hibernate(void);
57342 extern bool system_entering_hibernation(void);
57343 #else /* CONFIG_HIBERNATION */
57344 @@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidd
57345 static inline void swsusp_set_page_free(struct page *p) {}
57346 static inline void swsusp_unset_page_free(struct page *p) {}
57347
57348 -static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
57349 +static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
57350 static inline int hibernate(void) { return -ENOSYS; }
57351 static inline bool system_entering_hibernation(void) { return false; }
57352 #endif /* CONFIG_HIBERNATION */
57353 diff -urNp linux-2.6.32.41/include/linux/sysctl.h linux-2.6.32.41/include/linux/sysctl.h
57354 --- linux-2.6.32.41/include/linux/sysctl.h 2011-03-27 14:31:47.000000000 -0400
57355 +++ linux-2.6.32.41/include/linux/sysctl.h 2011-04-17 15:56:46.000000000 -0400
57356 @@ -164,7 +164,11 @@ enum
57357 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
57358 };
57359
57360 -
57361 +#ifdef CONFIG_PAX_SOFTMODE
57362 +enum {
57363 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
57364 +};
57365 +#endif
57366
57367 /* CTL_VM names: */
57368 enum
57369 @@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_tab
57370
57371 extern int proc_dostring(struct ctl_table *, int,
57372 void __user *, size_t *, loff_t *);
57373 +extern int proc_dostring_modpriv(struct ctl_table *, int,
57374 + void __user *, size_t *, loff_t *);
57375 extern int proc_dointvec(struct ctl_table *, int,
57376 void __user *, size_t *, loff_t *);
57377 extern int proc_dointvec_minmax(struct ctl_table *, int,
57378 @@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name,
57379
57380 extern ctl_handler sysctl_data;
57381 extern ctl_handler sysctl_string;
57382 +extern ctl_handler sysctl_string_modpriv;
57383 extern ctl_handler sysctl_intvec;
57384 extern ctl_handler sysctl_jiffies;
57385 extern ctl_handler sysctl_ms_jiffies;
57386 diff -urNp linux-2.6.32.41/include/linux/sysfs.h linux-2.6.32.41/include/linux/sysfs.h
57387 --- linux-2.6.32.41/include/linux/sysfs.h 2011-03-27 14:31:47.000000000 -0400
57388 +++ linux-2.6.32.41/include/linux/sysfs.h 2011-04-17 15:56:46.000000000 -0400
57389 @@ -75,8 +75,8 @@ struct bin_attribute {
57390 };
57391
57392 struct sysfs_ops {
57393 - ssize_t (*show)(struct kobject *, struct attribute *,char *);
57394 - ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
57395 + ssize_t (* const show)(struct kobject *, struct attribute *,char *);
57396 + ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
57397 };
57398
57399 struct sysfs_dirent;
57400 diff -urNp linux-2.6.32.41/include/linux/thread_info.h linux-2.6.32.41/include/linux/thread_info.h
57401 --- linux-2.6.32.41/include/linux/thread_info.h 2011-03-27 14:31:47.000000000 -0400
57402 +++ linux-2.6.32.41/include/linux/thread_info.h 2011-04-17 15:56:46.000000000 -0400
57403 @@ -23,7 +23,7 @@ struct restart_block {
57404 };
57405 /* For futex_wait and futex_wait_requeue_pi */
57406 struct {
57407 - u32 *uaddr;
57408 + u32 __user *uaddr;
57409 u32 val;
57410 u32 flags;
57411 u32 bitset;
57412 diff -urNp linux-2.6.32.41/include/linux/tty.h linux-2.6.32.41/include/linux/tty.h
57413 --- linux-2.6.32.41/include/linux/tty.h 2011-03-27 14:31:47.000000000 -0400
57414 +++ linux-2.6.32.41/include/linux/tty.h 2011-04-17 15:56:46.000000000 -0400
57415 @@ -13,6 +13,7 @@
57416 #include <linux/tty_driver.h>
57417 #include <linux/tty_ldisc.h>
57418 #include <linux/mutex.h>
57419 +#include <linux/poll.h>
57420
57421 #include <asm/system.h>
57422
57423 @@ -443,7 +444,6 @@ extern int tty_perform_flush(struct tty_
57424 extern dev_t tty_devnum(struct tty_struct *tty);
57425 extern void proc_clear_tty(struct task_struct *p);
57426 extern struct tty_struct *get_current_tty(void);
57427 -extern void tty_default_fops(struct file_operations *fops);
57428 extern struct tty_struct *alloc_tty_struct(void);
57429 extern void free_tty_struct(struct tty_struct *tty);
57430 extern void initialize_tty_struct(struct tty_struct *tty,
57431 @@ -493,6 +493,18 @@ extern void tty_ldisc_begin(void);
57432 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
57433 extern void tty_ldisc_enable(struct tty_struct *tty);
57434
57435 +/* tty_io.c */
57436 +extern ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
57437 +extern ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
57438 +extern unsigned int tty_poll(struct file *, poll_table *);
57439 +#ifdef CONFIG_COMPAT
57440 +extern long tty_compat_ioctl(struct file *file, unsigned int cmd,
57441 + unsigned long arg);
57442 +#else
57443 +#define tty_compat_ioctl NULL
57444 +#endif
57445 +extern int tty_release(struct inode *, struct file *);
57446 +extern int tty_fasync(int fd, struct file *filp, int on);
57447
57448 /* n_tty.c */
57449 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
57450 diff -urNp linux-2.6.32.41/include/linux/tty_ldisc.h linux-2.6.32.41/include/linux/tty_ldisc.h
57451 --- linux-2.6.32.41/include/linux/tty_ldisc.h 2011-03-27 14:31:47.000000000 -0400
57452 +++ linux-2.6.32.41/include/linux/tty_ldisc.h 2011-04-17 15:56:46.000000000 -0400
57453 @@ -139,7 +139,7 @@ struct tty_ldisc_ops {
57454
57455 struct module *owner;
57456
57457 - int refcount;
57458 + atomic_t refcount;
57459 };
57460
57461 struct tty_ldisc {
57462 diff -urNp linux-2.6.32.41/include/linux/types.h linux-2.6.32.41/include/linux/types.h
57463 --- linux-2.6.32.41/include/linux/types.h 2011-03-27 14:31:47.000000000 -0400
57464 +++ linux-2.6.32.41/include/linux/types.h 2011-04-17 15:56:46.000000000 -0400
57465 @@ -191,10 +191,26 @@ typedef struct {
57466 volatile int counter;
57467 } atomic_t;
57468
57469 +#ifdef CONFIG_PAX_REFCOUNT
57470 +typedef struct {
57471 + volatile int counter;
57472 +} atomic_unchecked_t;
57473 +#else
57474 +typedef atomic_t atomic_unchecked_t;
57475 +#endif
57476 +
57477 #ifdef CONFIG_64BIT
57478 typedef struct {
57479 volatile long counter;
57480 } atomic64_t;
57481 +
57482 +#ifdef CONFIG_PAX_REFCOUNT
57483 +typedef struct {
57484 + volatile long counter;
57485 +} atomic64_unchecked_t;
57486 +#else
57487 +typedef atomic64_t atomic64_unchecked_t;
57488 +#endif
57489 #endif
57490
57491 struct ustat {
57492 diff -urNp linux-2.6.32.41/include/linux/uaccess.h linux-2.6.32.41/include/linux/uaccess.h
57493 --- linux-2.6.32.41/include/linux/uaccess.h 2011-03-27 14:31:47.000000000 -0400
57494 +++ linux-2.6.32.41/include/linux/uaccess.h 2011-04-17 15:56:46.000000000 -0400
57495 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
57496 long ret; \
57497 mm_segment_t old_fs = get_fs(); \
57498 \
57499 - set_fs(KERNEL_DS); \
57500 pagefault_disable(); \
57501 + set_fs(KERNEL_DS); \
57502 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
57503 - pagefault_enable(); \
57504 set_fs(old_fs); \
57505 + pagefault_enable(); \
57506 ret; \
57507 })
57508
57509 @@ -93,7 +93,7 @@ static inline unsigned long __copy_from_
57510 * Safely read from address @src to the buffer at @dst. If a kernel fault
57511 * happens, handle that and return -EFAULT.
57512 */
57513 -extern long probe_kernel_read(void *dst, void *src, size_t size);
57514 +extern long probe_kernel_read(void *dst, const void *src, size_t size);
57515
57516 /*
57517 * probe_kernel_write(): safely attempt to write to a location
57518 @@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst,
57519 * Safely write to address @dst from the buffer at @src. If a kernel fault
57520 * happens, handle that and return -EFAULT.
57521 */
57522 -extern long probe_kernel_write(void *dst, void *src, size_t size);
57523 +extern long probe_kernel_write(void *dst, const void *src, size_t size);
57524
57525 #endif /* __LINUX_UACCESS_H__ */
57526 diff -urNp linux-2.6.32.41/include/linux/unaligned/access_ok.h linux-2.6.32.41/include/linux/unaligned/access_ok.h
57527 --- linux-2.6.32.41/include/linux/unaligned/access_ok.h 2011-03-27 14:31:47.000000000 -0400
57528 +++ linux-2.6.32.41/include/linux/unaligned/access_ok.h 2011-04-17 15:56:46.000000000 -0400
57529 @@ -6,32 +6,32 @@
57530
57531 static inline u16 get_unaligned_le16(const void *p)
57532 {
57533 - return le16_to_cpup((__le16 *)p);
57534 + return le16_to_cpup((const __le16 *)p);
57535 }
57536
57537 static inline u32 get_unaligned_le32(const void *p)
57538 {
57539 - return le32_to_cpup((__le32 *)p);
57540 + return le32_to_cpup((const __le32 *)p);
57541 }
57542
57543 static inline u64 get_unaligned_le64(const void *p)
57544 {
57545 - return le64_to_cpup((__le64 *)p);
57546 + return le64_to_cpup((const __le64 *)p);
57547 }
57548
57549 static inline u16 get_unaligned_be16(const void *p)
57550 {
57551 - return be16_to_cpup((__be16 *)p);
57552 + return be16_to_cpup((const __be16 *)p);
57553 }
57554
57555 static inline u32 get_unaligned_be32(const void *p)
57556 {
57557 - return be32_to_cpup((__be32 *)p);
57558 + return be32_to_cpup((const __be32 *)p);
57559 }
57560
57561 static inline u64 get_unaligned_be64(const void *p)
57562 {
57563 - return be64_to_cpup((__be64 *)p);
57564 + return be64_to_cpup((const __be64 *)p);
57565 }
57566
57567 static inline void put_unaligned_le16(u16 val, void *p)
57568 diff -urNp linux-2.6.32.41/include/linux/vmalloc.h linux-2.6.32.41/include/linux/vmalloc.h
57569 --- linux-2.6.32.41/include/linux/vmalloc.h 2011-03-27 14:31:47.000000000 -0400
57570 +++ linux-2.6.32.41/include/linux/vmalloc.h 2011-04-17 15:56:46.000000000 -0400
57571 @@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
57572 #define VM_MAP 0x00000004 /* vmap()ed pages */
57573 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
57574 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
57575 +
57576 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
57577 +#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
57578 +#endif
57579 +
57580 /* bits [20..32] reserved for arch specific ioremap internals */
57581
57582 /*
57583 @@ -123,4 +128,81 @@ struct vm_struct **pcpu_get_vm_areas(con
57584
57585 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
57586
57587 +#define vmalloc(x) \
57588 +({ \
57589 + void *___retval; \
57590 + intoverflow_t ___x = (intoverflow_t)x; \
57591 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
57592 + ___retval = NULL; \
57593 + else \
57594 + ___retval = vmalloc((unsigned long)___x); \
57595 + ___retval; \
57596 +})
57597 +
57598 +#define __vmalloc(x, y, z) \
57599 +({ \
57600 + void *___retval; \
57601 + intoverflow_t ___x = (intoverflow_t)x; \
57602 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
57603 + ___retval = NULL; \
57604 + else \
57605 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
57606 + ___retval; \
57607 +})
57608 +
57609 +#define vmalloc_user(x) \
57610 +({ \
57611 + void *___retval; \
57612 + intoverflow_t ___x = (intoverflow_t)x; \
57613 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
57614 + ___retval = NULL; \
57615 + else \
57616 + ___retval = vmalloc_user((unsigned long)___x); \
57617 + ___retval; \
57618 +})
57619 +
57620 +#define vmalloc_exec(x) \
57621 +({ \
57622 + void *___retval; \
57623 + intoverflow_t ___x = (intoverflow_t)x; \
57624 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
57625 + ___retval = NULL; \
57626 + else \
57627 + ___retval = vmalloc_exec((unsigned long)___x); \
57628 + ___retval; \
57629 +})
57630 +
57631 +#define vmalloc_node(x, y) \
57632 +({ \
57633 + void *___retval; \
57634 + intoverflow_t ___x = (intoverflow_t)x; \
57635 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
57636 + ___retval = NULL; \
57637 + else \
57638 + ___retval = vmalloc_node((unsigned long)___x, (y));\
57639 + ___retval; \
57640 +})
57641 +
57642 +#define vmalloc_32(x) \
57643 +({ \
57644 + void *___retval; \
57645 + intoverflow_t ___x = (intoverflow_t)x; \
57646 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
57647 + ___retval = NULL; \
57648 + else \
57649 + ___retval = vmalloc_32((unsigned long)___x); \
57650 + ___retval; \
57651 +})
57652 +
57653 +#define vmalloc_32_user(x) \
57654 +({ \
57655 + void *___retval; \
57656 + intoverflow_t ___x = (intoverflow_t)x; \
57657 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
57658 + ___retval = NULL; \
57659 + else \
57660 + ___retval = vmalloc_32_user((unsigned long)___x);\
57661 + ___retval; \
57662 +})
57663 +
57664 #endif /* _LINUX_VMALLOC_H */
57665 diff -urNp linux-2.6.32.41/include/linux/vmstat.h linux-2.6.32.41/include/linux/vmstat.h
57666 --- linux-2.6.32.41/include/linux/vmstat.h 2011-03-27 14:31:47.000000000 -0400
57667 +++ linux-2.6.32.41/include/linux/vmstat.h 2011-04-17 15:56:46.000000000 -0400
57668 @@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(in
57669 /*
57670 * Zone based page accounting with per cpu differentials.
57671 */
57672 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
57673 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
57674
57675 static inline void zone_page_state_add(long x, struct zone *zone,
57676 enum zone_stat_item item)
57677 {
57678 - atomic_long_add(x, &zone->vm_stat[item]);
57679 - atomic_long_add(x, &vm_stat[item]);
57680 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
57681 + atomic_long_add_unchecked(x, &vm_stat[item]);
57682 }
57683
57684 static inline unsigned long global_page_state(enum zone_stat_item item)
57685 {
57686 - long x = atomic_long_read(&vm_stat[item]);
57687 + long x = atomic_long_read_unchecked(&vm_stat[item]);
57688 #ifdef CONFIG_SMP
57689 if (x < 0)
57690 x = 0;
57691 @@ -158,7 +158,7 @@ static inline unsigned long global_page_
57692 static inline unsigned long zone_page_state(struct zone *zone,
57693 enum zone_stat_item item)
57694 {
57695 - long x = atomic_long_read(&zone->vm_stat[item]);
57696 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
57697 #ifdef CONFIG_SMP
57698 if (x < 0)
57699 x = 0;
57700 @@ -175,7 +175,7 @@ static inline unsigned long zone_page_st
57701 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
57702 enum zone_stat_item item)
57703 {
57704 - long x = atomic_long_read(&zone->vm_stat[item]);
57705 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
57706
57707 #ifdef CONFIG_SMP
57708 int cpu;
57709 @@ -264,8 +264,8 @@ static inline void __mod_zone_page_state
57710
57711 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
57712 {
57713 - atomic_long_inc(&zone->vm_stat[item]);
57714 - atomic_long_inc(&vm_stat[item]);
57715 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
57716 + atomic_long_inc_unchecked(&vm_stat[item]);
57717 }
57718
57719 static inline void __inc_zone_page_state(struct page *page,
57720 @@ -276,8 +276,8 @@ static inline void __inc_zone_page_state
57721
57722 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
57723 {
57724 - atomic_long_dec(&zone->vm_stat[item]);
57725 - atomic_long_dec(&vm_stat[item]);
57726 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
57727 + atomic_long_dec_unchecked(&vm_stat[item]);
57728 }
57729
57730 static inline void __dec_zone_page_state(struct page *page,
57731 diff -urNp linux-2.6.32.41/include/media/v4l2-device.h linux-2.6.32.41/include/media/v4l2-device.h
57732 --- linux-2.6.32.41/include/media/v4l2-device.h 2011-03-27 14:31:47.000000000 -0400
57733 +++ linux-2.6.32.41/include/media/v4l2-device.h 2011-05-04 17:56:28.000000000 -0400
57734 @@ -71,7 +71,7 @@ int __must_check v4l2_device_register(st
57735 this function returns 0. If the name ends with a digit (e.g. cx18),
57736 then the name will be set to cx18-0 since cx180 looks really odd. */
57737 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
57738 - atomic_t *instance);
57739 + atomic_unchecked_t *instance);
57740
57741 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
57742 Since the parent disappears this ensures that v4l2_dev doesn't have an
57743 diff -urNp linux-2.6.32.41/include/net/flow.h linux-2.6.32.41/include/net/flow.h
57744 --- linux-2.6.32.41/include/net/flow.h 2011-03-27 14:31:47.000000000 -0400
57745 +++ linux-2.6.32.41/include/net/flow.h 2011-05-04 17:56:28.000000000 -0400
57746 @@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net
57747 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
57748 u8 dir, flow_resolve_t resolver);
57749 extern void flow_cache_flush(void);
57750 -extern atomic_t flow_cache_genid;
57751 +extern atomic_unchecked_t flow_cache_genid;
57752
57753 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
57754 {
57755 diff -urNp linux-2.6.32.41/include/net/inetpeer.h linux-2.6.32.41/include/net/inetpeer.h
57756 --- linux-2.6.32.41/include/net/inetpeer.h 2011-03-27 14:31:47.000000000 -0400
57757 +++ linux-2.6.32.41/include/net/inetpeer.h 2011-04-17 15:56:46.000000000 -0400
57758 @@ -24,7 +24,7 @@ struct inet_peer
57759 __u32 dtime; /* the time of last use of not
57760 * referenced entries */
57761 atomic_t refcnt;
57762 - atomic_t rid; /* Frag reception counter */
57763 + atomic_unchecked_t rid; /* Frag reception counter */
57764 __u32 tcp_ts;
57765 unsigned long tcp_ts_stamp;
57766 };
57767 diff -urNp linux-2.6.32.41/include/net/ip_vs.h linux-2.6.32.41/include/net/ip_vs.h
57768 --- linux-2.6.32.41/include/net/ip_vs.h 2011-03-27 14:31:47.000000000 -0400
57769 +++ linux-2.6.32.41/include/net/ip_vs.h 2011-05-04 17:56:28.000000000 -0400
57770 @@ -365,7 +365,7 @@ struct ip_vs_conn {
57771 struct ip_vs_conn *control; /* Master control connection */
57772 atomic_t n_control; /* Number of controlled ones */
57773 struct ip_vs_dest *dest; /* real server */
57774 - atomic_t in_pkts; /* incoming packet counter */
57775 + atomic_unchecked_t in_pkts; /* incoming packet counter */
57776
57777 /* packet transmitter for different forwarding methods. If it
57778 mangles the packet, it must return NF_DROP or better NF_STOLEN,
57779 @@ -466,7 +466,7 @@ struct ip_vs_dest {
57780 union nf_inet_addr addr; /* IP address of the server */
57781 __be16 port; /* port number of the server */
57782 volatile unsigned flags; /* dest status flags */
57783 - atomic_t conn_flags; /* flags to copy to conn */
57784 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
57785 atomic_t weight; /* server weight */
57786
57787 atomic_t refcnt; /* reference counter */
57788 diff -urNp linux-2.6.32.41/include/net/irda/ircomm_tty.h linux-2.6.32.41/include/net/irda/ircomm_tty.h
57789 --- linux-2.6.32.41/include/net/irda/ircomm_tty.h 2011-03-27 14:31:47.000000000 -0400
57790 +++ linux-2.6.32.41/include/net/irda/ircomm_tty.h 2011-04-17 15:56:46.000000000 -0400
57791 @@ -35,6 +35,7 @@
57792 #include <linux/termios.h>
57793 #include <linux/timer.h>
57794 #include <linux/tty.h> /* struct tty_struct */
57795 +#include <asm/local.h>
57796
57797 #include <net/irda/irias_object.h>
57798 #include <net/irda/ircomm_core.h>
57799 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
57800 unsigned short close_delay;
57801 unsigned short closing_wait; /* time to wait before closing */
57802
57803 - int open_count;
57804 - int blocked_open; /* # of blocked opens */
57805 + local_t open_count;
57806 + local_t blocked_open; /* # of blocked opens */
57807
57808 /* Protect concurent access to :
57809 * o self->open_count
57810 diff -urNp linux-2.6.32.41/include/net/iucv/af_iucv.h linux-2.6.32.41/include/net/iucv/af_iucv.h
57811 --- linux-2.6.32.41/include/net/iucv/af_iucv.h 2011-03-27 14:31:47.000000000 -0400
57812 +++ linux-2.6.32.41/include/net/iucv/af_iucv.h 2011-05-04 17:56:28.000000000 -0400
57813 @@ -87,7 +87,7 @@ struct iucv_sock {
57814 struct iucv_sock_list {
57815 struct hlist_head head;
57816 rwlock_t lock;
57817 - atomic_t autobind_name;
57818 + atomic_unchecked_t autobind_name;
57819 };
57820
57821 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
57822 diff -urNp linux-2.6.32.41/include/net/neighbour.h linux-2.6.32.41/include/net/neighbour.h
57823 --- linux-2.6.32.41/include/net/neighbour.h 2011-03-27 14:31:47.000000000 -0400
57824 +++ linux-2.6.32.41/include/net/neighbour.h 2011-04-17 15:56:46.000000000 -0400
57825 @@ -125,12 +125,12 @@ struct neighbour
57826 struct neigh_ops
57827 {
57828 int family;
57829 - void (*solicit)(struct neighbour *, struct sk_buff*);
57830 - void (*error_report)(struct neighbour *, struct sk_buff*);
57831 - int (*output)(struct sk_buff*);
57832 - int (*connected_output)(struct sk_buff*);
57833 - int (*hh_output)(struct sk_buff*);
57834 - int (*queue_xmit)(struct sk_buff*);
57835 + void (* const solicit)(struct neighbour *, struct sk_buff*);
57836 + void (* const error_report)(struct neighbour *, struct sk_buff*);
57837 + int (* const output)(struct sk_buff*);
57838 + int (* const connected_output)(struct sk_buff*);
57839 + int (* const hh_output)(struct sk_buff*);
57840 + int (* const queue_xmit)(struct sk_buff*);
57841 };
57842
57843 struct pneigh_entry
57844 diff -urNp linux-2.6.32.41/include/net/netlink.h linux-2.6.32.41/include/net/netlink.h
57845 --- linux-2.6.32.41/include/net/netlink.h 2011-03-27 14:31:47.000000000 -0400
57846 +++ linux-2.6.32.41/include/net/netlink.h 2011-04-17 15:56:46.000000000 -0400
57847 @@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct
57848 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
57849 {
57850 if (mark)
57851 - skb_trim(skb, (unsigned char *) mark - skb->data);
57852 + skb_trim(skb, (const unsigned char *) mark - skb->data);
57853 }
57854
57855 /**
57856 diff -urNp linux-2.6.32.41/include/net/netns/ipv4.h linux-2.6.32.41/include/net/netns/ipv4.h
57857 --- linux-2.6.32.41/include/net/netns/ipv4.h 2011-03-27 14:31:47.000000000 -0400
57858 +++ linux-2.6.32.41/include/net/netns/ipv4.h 2011-05-04 17:56:28.000000000 -0400
57859 @@ -54,7 +54,7 @@ struct netns_ipv4 {
57860 int current_rt_cache_rebuild_count;
57861
57862 struct timer_list rt_secret_timer;
57863 - atomic_t rt_genid;
57864 + atomic_unchecked_t rt_genid;
57865
57866 #ifdef CONFIG_IP_MROUTE
57867 struct sock *mroute_sk;
57868 diff -urNp linux-2.6.32.41/include/net/sctp/sctp.h linux-2.6.32.41/include/net/sctp/sctp.h
57869 --- linux-2.6.32.41/include/net/sctp/sctp.h 2011-03-27 14:31:47.000000000 -0400
57870 +++ linux-2.6.32.41/include/net/sctp/sctp.h 2011-04-17 15:56:46.000000000 -0400
57871 @@ -305,8 +305,8 @@ extern int sctp_debug_flag;
57872
57873 #else /* SCTP_DEBUG */
57874
57875 -#define SCTP_DEBUG_PRINTK(whatever...)
57876 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
57877 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
57878 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
57879 #define SCTP_ENABLE_DEBUG
57880 #define SCTP_DISABLE_DEBUG
57881 #define SCTP_ASSERT(expr, str, func)
57882 diff -urNp linux-2.6.32.41/include/net/sock.h linux-2.6.32.41/include/net/sock.h
57883 --- linux-2.6.32.41/include/net/sock.h 2011-03-27 14:31:47.000000000 -0400
57884 +++ linux-2.6.32.41/include/net/sock.h 2011-05-04 17:56:28.000000000 -0400
57885 @@ -272,7 +272,7 @@ struct sock {
57886 rwlock_t sk_callback_lock;
57887 int sk_err,
57888 sk_err_soft;
57889 - atomic_t sk_drops;
57890 + atomic_unchecked_t sk_drops;
57891 unsigned short sk_ack_backlog;
57892 unsigned short sk_max_ack_backlog;
57893 __u32 sk_priority;
57894 diff -urNp linux-2.6.32.41/include/net/tcp.h linux-2.6.32.41/include/net/tcp.h
57895 --- linux-2.6.32.41/include/net/tcp.h 2011-03-27 14:31:47.000000000 -0400
57896 +++ linux-2.6.32.41/include/net/tcp.h 2011-04-17 15:56:46.000000000 -0400
57897 @@ -1444,6 +1444,7 @@ enum tcp_seq_states {
57898 struct tcp_seq_afinfo {
57899 char *name;
57900 sa_family_t family;
57901 + /* cannot be const */
57902 struct file_operations seq_fops;
57903 struct seq_operations seq_ops;
57904 };
57905 diff -urNp linux-2.6.32.41/include/net/udp.h linux-2.6.32.41/include/net/udp.h
57906 --- linux-2.6.32.41/include/net/udp.h 2011-03-27 14:31:47.000000000 -0400
57907 +++ linux-2.6.32.41/include/net/udp.h 2011-04-17 15:56:46.000000000 -0400
57908 @@ -187,6 +187,7 @@ struct udp_seq_afinfo {
57909 char *name;
57910 sa_family_t family;
57911 struct udp_table *udp_table;
57912 + /* cannot be const */
57913 struct file_operations seq_fops;
57914 struct seq_operations seq_ops;
57915 };
57916 diff -urNp linux-2.6.32.41/include/scsi/scsi_device.h linux-2.6.32.41/include/scsi/scsi_device.h
57917 --- linux-2.6.32.41/include/scsi/scsi_device.h 2011-04-17 17:00:52.000000000 -0400
57918 +++ linux-2.6.32.41/include/scsi/scsi_device.h 2011-05-04 17:56:28.000000000 -0400
57919 @@ -156,9 +156,9 @@ struct scsi_device {
57920 unsigned int max_device_blocked; /* what device_blocked counts down from */
57921 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
57922
57923 - atomic_t iorequest_cnt;
57924 - atomic_t iodone_cnt;
57925 - atomic_t ioerr_cnt;
57926 + atomic_unchecked_t iorequest_cnt;
57927 + atomic_unchecked_t iodone_cnt;
57928 + atomic_unchecked_t ioerr_cnt;
57929
57930 struct device sdev_gendev,
57931 sdev_dev;
57932 diff -urNp linux-2.6.32.41/include/sound/ac97_codec.h linux-2.6.32.41/include/sound/ac97_codec.h
57933 --- linux-2.6.32.41/include/sound/ac97_codec.h 2011-03-27 14:31:47.000000000 -0400
57934 +++ linux-2.6.32.41/include/sound/ac97_codec.h 2011-04-17 15:56:46.000000000 -0400
57935 @@ -419,15 +419,15 @@
57936 struct snd_ac97;
57937
57938 struct snd_ac97_build_ops {
57939 - int (*build_3d) (struct snd_ac97 *ac97);
57940 - int (*build_specific) (struct snd_ac97 *ac97);
57941 - int (*build_spdif) (struct snd_ac97 *ac97);
57942 - int (*build_post_spdif) (struct snd_ac97 *ac97);
57943 + int (* const build_3d) (struct snd_ac97 *ac97);
57944 + int (* const build_specific) (struct snd_ac97 *ac97);
57945 + int (* const build_spdif) (struct snd_ac97 *ac97);
57946 + int (* const build_post_spdif) (struct snd_ac97 *ac97);
57947 #ifdef CONFIG_PM
57948 - void (*suspend) (struct snd_ac97 *ac97);
57949 - void (*resume) (struct snd_ac97 *ac97);
57950 + void (* const suspend) (struct snd_ac97 *ac97);
57951 + void (* const resume) (struct snd_ac97 *ac97);
57952 #endif
57953 - void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
57954 + void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
57955 };
57956
57957 struct snd_ac97_bus_ops {
57958 @@ -477,7 +477,7 @@ struct snd_ac97_template {
57959
57960 struct snd_ac97 {
57961 /* -- lowlevel (hardware) driver specific -- */
57962 - struct snd_ac97_build_ops * build_ops;
57963 + const struct snd_ac97_build_ops * build_ops;
57964 void *private_data;
57965 void (*private_free) (struct snd_ac97 *ac97);
57966 /* --- */
57967 diff -urNp linux-2.6.32.41/include/sound/ymfpci.h linux-2.6.32.41/include/sound/ymfpci.h
57968 --- linux-2.6.32.41/include/sound/ymfpci.h 2011-03-27 14:31:47.000000000 -0400
57969 +++ linux-2.6.32.41/include/sound/ymfpci.h 2011-05-04 17:56:28.000000000 -0400
57970 @@ -358,7 +358,7 @@ struct snd_ymfpci {
57971 spinlock_t reg_lock;
57972 spinlock_t voice_lock;
57973 wait_queue_head_t interrupt_sleep;
57974 - atomic_t interrupt_sleep_count;
57975 + atomic_unchecked_t interrupt_sleep_count;
57976 struct snd_info_entry *proc_entry;
57977 const struct firmware *dsp_microcode;
57978 const struct firmware *controller_microcode;
57979 diff -urNp linux-2.6.32.41/include/trace/events/irq.h linux-2.6.32.41/include/trace/events/irq.h
57980 --- linux-2.6.32.41/include/trace/events/irq.h 2011-03-27 14:31:47.000000000 -0400
57981 +++ linux-2.6.32.41/include/trace/events/irq.h 2011-04-17 15:56:46.000000000 -0400
57982 @@ -34,7 +34,7 @@
57983 */
57984 TRACE_EVENT(irq_handler_entry,
57985
57986 - TP_PROTO(int irq, struct irqaction *action),
57987 + TP_PROTO(int irq, const struct irqaction *action),
57988
57989 TP_ARGS(irq, action),
57990
57991 @@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
57992 */
57993 TRACE_EVENT(irq_handler_exit,
57994
57995 - TP_PROTO(int irq, struct irqaction *action, int ret),
57996 + TP_PROTO(int irq, const struct irqaction *action, int ret),
57997
57998 TP_ARGS(irq, action, ret),
57999
58000 @@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
58001 */
58002 TRACE_EVENT(softirq_entry,
58003
58004 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
58005 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
58006
58007 TP_ARGS(h, vec),
58008
58009 @@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
58010 */
58011 TRACE_EVENT(softirq_exit,
58012
58013 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
58014 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
58015
58016 TP_ARGS(h, vec),
58017
58018 diff -urNp linux-2.6.32.41/include/video/uvesafb.h linux-2.6.32.41/include/video/uvesafb.h
58019 --- linux-2.6.32.41/include/video/uvesafb.h 2011-03-27 14:31:47.000000000 -0400
58020 +++ linux-2.6.32.41/include/video/uvesafb.h 2011-04-17 15:56:46.000000000 -0400
58021 @@ -177,6 +177,7 @@ struct uvesafb_par {
58022 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
58023 u8 pmi_setpal; /* PMI for palette changes */
58024 u16 *pmi_base; /* protected mode interface location */
58025 + u8 *pmi_code; /* protected mode code location */
58026 void *pmi_start;
58027 void *pmi_pal;
58028 u8 *vbe_state_orig; /*
58029 diff -urNp linux-2.6.32.41/init/do_mounts.c linux-2.6.32.41/init/do_mounts.c
58030 --- linux-2.6.32.41/init/do_mounts.c 2011-03-27 14:31:47.000000000 -0400
58031 +++ linux-2.6.32.41/init/do_mounts.c 2011-04-17 15:56:46.000000000 -0400
58032 @@ -216,11 +216,11 @@ static void __init get_fs_names(char *pa
58033
58034 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
58035 {
58036 - int err = sys_mount(name, "/root", fs, flags, data);
58037 + int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
58038 if (err)
58039 return err;
58040
58041 - sys_chdir("/root");
58042 + sys_chdir((__force const char __user *)"/root");
58043 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
58044 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
58045 current->fs->pwd.mnt->mnt_sb->s_type->name,
58046 @@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...
58047 va_start(args, fmt);
58048 vsprintf(buf, fmt, args);
58049 va_end(args);
58050 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
58051 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
58052 if (fd >= 0) {
58053 sys_ioctl(fd, FDEJECT, 0);
58054 sys_close(fd);
58055 }
58056 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
58057 - fd = sys_open("/dev/console", O_RDWR, 0);
58058 + fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
58059 if (fd >= 0) {
58060 sys_ioctl(fd, TCGETS, (long)&termios);
58061 termios.c_lflag &= ~ICANON;
58062 sys_ioctl(fd, TCSETSF, (long)&termios);
58063 - sys_read(fd, &c, 1);
58064 + sys_read(fd, (char __user *)&c, 1);
58065 termios.c_lflag |= ICANON;
58066 sys_ioctl(fd, TCSETSF, (long)&termios);
58067 sys_close(fd);
58068 @@ -416,6 +416,6 @@ void __init prepare_namespace(void)
58069 mount_root();
58070 out:
58071 devtmpfs_mount("dev");
58072 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
58073 - sys_chroot(".");
58074 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
58075 + sys_chroot((__force char __user *)".");
58076 }
58077 diff -urNp linux-2.6.32.41/init/do_mounts.h linux-2.6.32.41/init/do_mounts.h
58078 --- linux-2.6.32.41/init/do_mounts.h 2011-03-27 14:31:47.000000000 -0400
58079 +++ linux-2.6.32.41/init/do_mounts.h 2011-04-17 15:56:46.000000000 -0400
58080 @@ -15,15 +15,15 @@ extern int root_mountflags;
58081
58082 static inline int create_dev(char *name, dev_t dev)
58083 {
58084 - sys_unlink(name);
58085 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
58086 + sys_unlink((__force char __user *)name);
58087 + return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
58088 }
58089
58090 #if BITS_PER_LONG == 32
58091 static inline u32 bstat(char *name)
58092 {
58093 struct stat64 stat;
58094 - if (sys_stat64(name, &stat) != 0)
58095 + if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
58096 return 0;
58097 if (!S_ISBLK(stat.st_mode))
58098 return 0;
58099 diff -urNp linux-2.6.32.41/init/do_mounts_initrd.c linux-2.6.32.41/init/do_mounts_initrd.c
58100 --- linux-2.6.32.41/init/do_mounts_initrd.c 2011-03-27 14:31:47.000000000 -0400
58101 +++ linux-2.6.32.41/init/do_mounts_initrd.c 2011-04-17 15:56:46.000000000 -0400
58102 @@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shel
58103 sys_close(old_fd);sys_close(root_fd);
58104 sys_close(0);sys_close(1);sys_close(2);
58105 sys_setsid();
58106 - (void) sys_open("/dev/console",O_RDWR,0);
58107 + (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
58108 (void) sys_dup(0);
58109 (void) sys_dup(0);
58110 return kernel_execve(shell, argv, envp_init);
58111 @@ -47,13 +47,13 @@ static void __init handle_initrd(void)
58112 create_dev("/dev/root.old", Root_RAM0);
58113 /* mount initrd on rootfs' /root */
58114 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
58115 - sys_mkdir("/old", 0700);
58116 - root_fd = sys_open("/", 0, 0);
58117 - old_fd = sys_open("/old", 0, 0);
58118 + sys_mkdir((__force const char __user *)"/old", 0700);
58119 + root_fd = sys_open((__force const char __user *)"/", 0, 0);
58120 + old_fd = sys_open((__force const char __user *)"/old", 0, 0);
58121 /* move initrd over / and chdir/chroot in initrd root */
58122 - sys_chdir("/root");
58123 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
58124 - sys_chroot(".");
58125 + sys_chdir((__force const char __user *)"/root");
58126 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
58127 + sys_chroot((__force const char __user *)".");
58128
58129 /*
58130 * In case that a resume from disk is carried out by linuxrc or one of
58131 @@ -70,15 +70,15 @@ static void __init handle_initrd(void)
58132
58133 /* move initrd to rootfs' /old */
58134 sys_fchdir(old_fd);
58135 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
58136 + sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
58137 /* switch root and cwd back to / of rootfs */
58138 sys_fchdir(root_fd);
58139 - sys_chroot(".");
58140 + sys_chroot((__force const char __user *)".");
58141 sys_close(old_fd);
58142 sys_close(root_fd);
58143
58144 if (new_decode_dev(real_root_dev) == Root_RAM0) {
58145 - sys_chdir("/old");
58146 + sys_chdir((__force const char __user *)"/old");
58147 return;
58148 }
58149
58150 @@ -86,17 +86,17 @@ static void __init handle_initrd(void)
58151 mount_root();
58152
58153 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
58154 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
58155 + error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
58156 if (!error)
58157 printk("okay\n");
58158 else {
58159 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
58160 + int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
58161 if (error == -ENOENT)
58162 printk("/initrd does not exist. Ignored.\n");
58163 else
58164 printk("failed\n");
58165 printk(KERN_NOTICE "Unmounting old root\n");
58166 - sys_umount("/old", MNT_DETACH);
58167 + sys_umount((__force char __user *)"/old", MNT_DETACH);
58168 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
58169 if (fd < 0) {
58170 error = fd;
58171 @@ -119,11 +119,11 @@ int __init initrd_load(void)
58172 * mounted in the normal path.
58173 */
58174 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
58175 - sys_unlink("/initrd.image");
58176 + sys_unlink((__force const char __user *)"/initrd.image");
58177 handle_initrd();
58178 return 1;
58179 }
58180 }
58181 - sys_unlink("/initrd.image");
58182 + sys_unlink((__force const char __user *)"/initrd.image");
58183 return 0;
58184 }
58185 diff -urNp linux-2.6.32.41/init/do_mounts_md.c linux-2.6.32.41/init/do_mounts_md.c
58186 --- linux-2.6.32.41/init/do_mounts_md.c 2011-03-27 14:31:47.000000000 -0400
58187 +++ linux-2.6.32.41/init/do_mounts_md.c 2011-04-17 15:56:46.000000000 -0400
58188 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
58189 partitioned ? "_d" : "", minor,
58190 md_setup_args[ent].device_names);
58191
58192 - fd = sys_open(name, 0, 0);
58193 + fd = sys_open((__force char __user *)name, 0, 0);
58194 if (fd < 0) {
58195 printk(KERN_ERR "md: open failed - cannot start "
58196 "array %s\n", name);
58197 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
58198 * array without it
58199 */
58200 sys_close(fd);
58201 - fd = sys_open(name, 0, 0);
58202 + fd = sys_open((__force char __user *)name, 0, 0);
58203 sys_ioctl(fd, BLKRRPART, 0);
58204 }
58205 sys_close(fd);
58206 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
58207
58208 wait_for_device_probe();
58209
58210 - fd = sys_open("/dev/md0", 0, 0);
58211 + fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
58212 if (fd >= 0) {
58213 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
58214 sys_close(fd);
58215 diff -urNp linux-2.6.32.41/init/initramfs.c linux-2.6.32.41/init/initramfs.c
58216 --- linux-2.6.32.41/init/initramfs.c 2011-03-27 14:31:47.000000000 -0400
58217 +++ linux-2.6.32.41/init/initramfs.c 2011-04-17 15:56:46.000000000 -0400
58218 @@ -74,7 +74,7 @@ static void __init free_hash(void)
58219 }
58220 }
58221
58222 -static long __init do_utime(char __user *filename, time_t mtime)
58223 +static long __init do_utime(__force char __user *filename, time_t mtime)
58224 {
58225 struct timespec t[2];
58226
58227 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
58228 struct dir_entry *de, *tmp;
58229 list_for_each_entry_safe(de, tmp, &dir_list, list) {
58230 list_del(&de->list);
58231 - do_utime(de->name, de->mtime);
58232 + do_utime((__force char __user *)de->name, de->mtime);
58233 kfree(de->name);
58234 kfree(de);
58235 }
58236 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
58237 if (nlink >= 2) {
58238 char *old = find_link(major, minor, ino, mode, collected);
58239 if (old)
58240 - return (sys_link(old, collected) < 0) ? -1 : 1;
58241 + return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
58242 }
58243 return 0;
58244 }
58245 @@ -280,11 +280,11 @@ static void __init clean_path(char *path
58246 {
58247 struct stat st;
58248
58249 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
58250 + if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
58251 if (S_ISDIR(st.st_mode))
58252 - sys_rmdir(path);
58253 + sys_rmdir((__force char __user *)path);
58254 else
58255 - sys_unlink(path);
58256 + sys_unlink((__force char __user *)path);
58257 }
58258 }
58259
58260 @@ -305,7 +305,7 @@ static int __init do_name(void)
58261 int openflags = O_WRONLY|O_CREAT;
58262 if (ml != 1)
58263 openflags |= O_TRUNC;
58264 - wfd = sys_open(collected, openflags, mode);
58265 + wfd = sys_open((__force char __user *)collected, openflags, mode);
58266
58267 if (wfd >= 0) {
58268 sys_fchown(wfd, uid, gid);
58269 @@ -317,17 +317,17 @@ static int __init do_name(void)
58270 }
58271 }
58272 } else if (S_ISDIR(mode)) {
58273 - sys_mkdir(collected, mode);
58274 - sys_chown(collected, uid, gid);
58275 - sys_chmod(collected, mode);
58276 + sys_mkdir((__force char __user *)collected, mode);
58277 + sys_chown((__force char __user *)collected, uid, gid);
58278 + sys_chmod((__force char __user *)collected, mode);
58279 dir_add(collected, mtime);
58280 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
58281 S_ISFIFO(mode) || S_ISSOCK(mode)) {
58282 if (maybe_link() == 0) {
58283 - sys_mknod(collected, mode, rdev);
58284 - sys_chown(collected, uid, gid);
58285 - sys_chmod(collected, mode);
58286 - do_utime(collected, mtime);
58287 + sys_mknod((__force char __user *)collected, mode, rdev);
58288 + sys_chown((__force char __user *)collected, uid, gid);
58289 + sys_chmod((__force char __user *)collected, mode);
58290 + do_utime((__force char __user *)collected, mtime);
58291 }
58292 }
58293 return 0;
58294 @@ -336,15 +336,15 @@ static int __init do_name(void)
58295 static int __init do_copy(void)
58296 {
58297 if (count >= body_len) {
58298 - sys_write(wfd, victim, body_len);
58299 + sys_write(wfd, (__force char __user *)victim, body_len);
58300 sys_close(wfd);
58301 - do_utime(vcollected, mtime);
58302 + do_utime((__force char __user *)vcollected, mtime);
58303 kfree(vcollected);
58304 eat(body_len);
58305 state = SkipIt;
58306 return 0;
58307 } else {
58308 - sys_write(wfd, victim, count);
58309 + sys_write(wfd, (__force char __user *)victim, count);
58310 body_len -= count;
58311 eat(count);
58312 return 1;
58313 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
58314 {
58315 collected[N_ALIGN(name_len) + body_len] = '\0';
58316 clean_path(collected, 0);
58317 - sys_symlink(collected + N_ALIGN(name_len), collected);
58318 - sys_lchown(collected, uid, gid);
58319 - do_utime(collected, mtime);
58320 + sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
58321 + sys_lchown((__force char __user *)collected, uid, gid);
58322 + do_utime((__force char __user *)collected, mtime);
58323 state = SkipIt;
58324 next_state = Reset;
58325 return 0;
58326 diff -urNp linux-2.6.32.41/init/Kconfig linux-2.6.32.41/init/Kconfig
58327 --- linux-2.6.32.41/init/Kconfig 2011-05-10 22:12:01.000000000 -0400
58328 +++ linux-2.6.32.41/init/Kconfig 2011-05-10 22:12:34.000000000 -0400
58329 @@ -1004,7 +1004,7 @@ config SLUB_DEBUG
58330
58331 config COMPAT_BRK
58332 bool "Disable heap randomization"
58333 - default y
58334 + default n
58335 help
58336 Randomizing heap placement makes heap exploits harder, but it
58337 also breaks ancient binaries (including anything libc5 based).
58338 diff -urNp linux-2.6.32.41/init/main.c linux-2.6.32.41/init/main.c
58339 --- linux-2.6.32.41/init/main.c 2011-05-10 22:12:01.000000000 -0400
58340 +++ linux-2.6.32.41/init/main.c 2011-05-22 23:02:06.000000000 -0400
58341 @@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void)
58342 #ifdef CONFIG_TC
58343 extern void tc_init(void);
58344 #endif
58345 +extern void grsecurity_init(void);
58346
58347 enum system_states system_state __read_mostly;
58348 EXPORT_SYMBOL(system_state);
58349 @@ -183,6 +184,49 @@ static int __init set_reset_devices(char
58350
58351 __setup("reset_devices", set_reset_devices);
58352
58353 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
58354 +extern char pax_enter_kernel_user[];
58355 +extern char pax_exit_kernel_user[];
58356 +extern pgdval_t clone_pgd_mask;
58357 +#endif
58358 +
58359 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
58360 +static int __init setup_pax_nouderef(char *str)
58361 +{
58362 +#ifdef CONFIG_X86_32
58363 + unsigned int cpu;
58364 + struct desc_struct *gdt;
58365 +
58366 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
58367 + gdt = get_cpu_gdt_table(cpu);
58368 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
58369 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
58370 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
58371 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
58372 + }
58373 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
58374 +#else
58375 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
58376 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
58377 + clone_pgd_mask = ~(pgdval_t)0UL;
58378 +#endif
58379 +
58380 + return 0;
58381 +}
58382 +early_param("pax_nouderef", setup_pax_nouderef);
58383 +#endif
58384 +
58385 +#ifdef CONFIG_PAX_SOFTMODE
58386 +unsigned int pax_softmode;
58387 +
58388 +static int __init setup_pax_softmode(char *str)
58389 +{
58390 + get_option(&str, &pax_softmode);
58391 + return 1;
58392 +}
58393 +__setup("pax_softmode=", setup_pax_softmode);
58394 +#endif
58395 +
58396 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
58397 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
58398 static const char *panic_later, *panic_param;
58399 @@ -705,52 +749,53 @@ int initcall_debug;
58400 core_param(initcall_debug, initcall_debug, bool, 0644);
58401
58402 static char msgbuf[64];
58403 -static struct boot_trace_call call;
58404 -static struct boot_trace_ret ret;
58405 +static struct boot_trace_call trace_call;
58406 +static struct boot_trace_ret trace_ret;
58407
58408 int do_one_initcall(initcall_t fn)
58409 {
58410 int count = preempt_count();
58411 ktime_t calltime, delta, rettime;
58412 + const char *msg1 = "", *msg2 = "";
58413
58414 if (initcall_debug) {
58415 - call.caller = task_pid_nr(current);
58416 - printk("calling %pF @ %i\n", fn, call.caller);
58417 + trace_call.caller = task_pid_nr(current);
58418 + printk("calling %pF @ %i\n", fn, trace_call.caller);
58419 calltime = ktime_get();
58420 - trace_boot_call(&call, fn);
58421 + trace_boot_call(&trace_call, fn);
58422 enable_boot_trace();
58423 }
58424
58425 - ret.result = fn();
58426 + trace_ret.result = fn();
58427
58428 if (initcall_debug) {
58429 disable_boot_trace();
58430 rettime = ktime_get();
58431 delta = ktime_sub(rettime, calltime);
58432 - ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
58433 - trace_boot_ret(&ret, fn);
58434 + trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
58435 + trace_boot_ret(&trace_ret, fn);
58436 printk("initcall %pF returned %d after %Ld usecs\n", fn,
58437 - ret.result, ret.duration);
58438 + trace_ret.result, trace_ret.duration);
58439 }
58440
58441 msgbuf[0] = 0;
58442
58443 - if (ret.result && ret.result != -ENODEV && initcall_debug)
58444 - sprintf(msgbuf, "error code %d ", ret.result);
58445 + if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
58446 + sprintf(msgbuf, "error code %d ", trace_ret.result);
58447
58448 if (preempt_count() != count) {
58449 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
58450 + msg1 = " preemption imbalance";
58451 preempt_count() = count;
58452 }
58453 if (irqs_disabled()) {
58454 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
58455 + msg2 = " disabled interrupts";
58456 local_irq_enable();
58457 }
58458 - if (msgbuf[0]) {
58459 - printk("initcall %pF returned with %s\n", fn, msgbuf);
58460 + if (msgbuf[0] || *msg1 || *msg2) {
58461 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
58462 }
58463
58464 - return ret.result;
58465 + return trace_ret.result;
58466 }
58467
58468
58469 @@ -893,11 +938,13 @@ static int __init kernel_init(void * unu
58470 if (!ramdisk_execute_command)
58471 ramdisk_execute_command = "/init";
58472
58473 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
58474 + if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
58475 ramdisk_execute_command = NULL;
58476 prepare_namespace();
58477 }
58478
58479 + grsecurity_init();
58480 +
58481 /*
58482 * Ok, we have completed the initial bootup, and
58483 * we're essentially up and running. Get rid of the
58484 diff -urNp linux-2.6.32.41/init/noinitramfs.c linux-2.6.32.41/init/noinitramfs.c
58485 --- linux-2.6.32.41/init/noinitramfs.c 2011-03-27 14:31:47.000000000 -0400
58486 +++ linux-2.6.32.41/init/noinitramfs.c 2011-04-17 15:56:46.000000000 -0400
58487 @@ -29,7 +29,7 @@ static int __init default_rootfs(void)
58488 {
58489 int err;
58490
58491 - err = sys_mkdir("/dev", 0755);
58492 + err = sys_mkdir((const char __user *)"/dev", 0755);
58493 if (err < 0)
58494 goto out;
58495
58496 @@ -39,7 +39,7 @@ static int __init default_rootfs(void)
58497 if (err < 0)
58498 goto out;
58499
58500 - err = sys_mkdir("/root", 0700);
58501 + err = sys_mkdir((const char __user *)"/root", 0700);
58502 if (err < 0)
58503 goto out;
58504
58505 diff -urNp linux-2.6.32.41/ipc/mqueue.c linux-2.6.32.41/ipc/mqueue.c
58506 --- linux-2.6.32.41/ipc/mqueue.c 2011-03-27 14:31:47.000000000 -0400
58507 +++ linux-2.6.32.41/ipc/mqueue.c 2011-04-17 15:56:46.000000000 -0400
58508 @@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(st
58509 mq_bytes = (mq_msg_tblsz +
58510 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
58511
58512 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
58513 spin_lock(&mq_lock);
58514 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
58515 u->mq_bytes + mq_bytes >
58516 diff -urNp linux-2.6.32.41/ipc/sem.c linux-2.6.32.41/ipc/sem.c
58517 --- linux-2.6.32.41/ipc/sem.c 2011-03-27 14:31:47.000000000 -0400
58518 +++ linux-2.6.32.41/ipc/sem.c 2011-05-16 21:46:57.000000000 -0400
58519 @@ -671,6 +671,8 @@ static int semctl_main(struct ipc_namesp
58520 ushort* sem_io = fast_sem_io;
58521 int nsems;
58522
58523 + pax_track_stack();
58524 +
58525 sma = sem_lock_check(ns, semid);
58526 if (IS_ERR(sma))
58527 return PTR_ERR(sma);
58528 @@ -1071,6 +1073,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
58529 unsigned long jiffies_left = 0;
58530 struct ipc_namespace *ns;
58531
58532 + pax_track_stack();
58533 +
58534 ns = current->nsproxy->ipc_ns;
58535
58536 if (nsops < 1 || semid < 0)
58537 diff -urNp linux-2.6.32.41/ipc/shm.c linux-2.6.32.41/ipc/shm.c
58538 --- linux-2.6.32.41/ipc/shm.c 2011-03-27 14:31:47.000000000 -0400
58539 +++ linux-2.6.32.41/ipc/shm.c 2011-04-17 15:56:46.000000000 -0400
58540 @@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_name
58541 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
58542 #endif
58543
58544 +#ifdef CONFIG_GRKERNSEC
58545 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
58546 + const time_t shm_createtime, const uid_t cuid,
58547 + const int shmid);
58548 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
58549 + const time_t shm_createtime);
58550 +#endif
58551 +
58552 void shm_init_ns(struct ipc_namespace *ns)
58553 {
58554 ns->shm_ctlmax = SHMMAX;
58555 @@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *
58556 shp->shm_lprid = 0;
58557 shp->shm_atim = shp->shm_dtim = 0;
58558 shp->shm_ctim = get_seconds();
58559 +#ifdef CONFIG_GRKERNSEC
58560 + {
58561 + struct timespec timeval;
58562 + do_posix_clock_monotonic_gettime(&timeval);
58563 +
58564 + shp->shm_createtime = timeval.tv_sec;
58565 + }
58566 +#endif
58567 shp->shm_segsz = size;
58568 shp->shm_nattch = 0;
58569 shp->shm_file = file;
58570 @@ -880,9 +896,21 @@ long do_shmat(int shmid, char __user *sh
58571 if (err)
58572 goto out_unlock;
58573
58574 +#ifdef CONFIG_GRKERNSEC
58575 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
58576 + shp->shm_perm.cuid, shmid) ||
58577 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
58578 + err = -EACCES;
58579 + goto out_unlock;
58580 + }
58581 +#endif
58582 +
58583 path.dentry = dget(shp->shm_file->f_path.dentry);
58584 path.mnt = shp->shm_file->f_path.mnt;
58585 shp->shm_nattch++;
58586 +#ifdef CONFIG_GRKERNSEC
58587 + shp->shm_lapid = current->pid;
58588 +#endif
58589 size = i_size_read(path.dentry->d_inode);
58590 shm_unlock(shp);
58591
58592 diff -urNp linux-2.6.32.41/kernel/acct.c linux-2.6.32.41/kernel/acct.c
58593 --- linux-2.6.32.41/kernel/acct.c 2011-03-27 14:31:47.000000000 -0400
58594 +++ linux-2.6.32.41/kernel/acct.c 2011-04-17 15:56:46.000000000 -0400
58595 @@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_a
58596 */
58597 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
58598 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
58599 - file->f_op->write(file, (char *)&ac,
58600 + file->f_op->write(file, (__force char __user *)&ac,
58601 sizeof(acct_t), &file->f_pos);
58602 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
58603 set_fs(fs);
58604 diff -urNp linux-2.6.32.41/kernel/audit.c linux-2.6.32.41/kernel/audit.c
58605 --- linux-2.6.32.41/kernel/audit.c 2011-03-27 14:31:47.000000000 -0400
58606 +++ linux-2.6.32.41/kernel/audit.c 2011-05-04 17:56:28.000000000 -0400
58607 @@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
58608 3) suppressed due to audit_rate_limit
58609 4) suppressed due to audit_backlog_limit
58610 */
58611 -static atomic_t audit_lost = ATOMIC_INIT(0);
58612 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
58613
58614 /* The netlink socket. */
58615 static struct sock *audit_sock;
58616 @@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
58617 unsigned long now;
58618 int print;
58619
58620 - atomic_inc(&audit_lost);
58621 + atomic_inc_unchecked(&audit_lost);
58622
58623 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
58624
58625 @@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
58626 printk(KERN_WARNING
58627 "audit: audit_lost=%d audit_rate_limit=%d "
58628 "audit_backlog_limit=%d\n",
58629 - atomic_read(&audit_lost),
58630 + atomic_read_unchecked(&audit_lost),
58631 audit_rate_limit,
58632 audit_backlog_limit);
58633 audit_panic(message);
58634 @@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_b
58635 status_set.pid = audit_pid;
58636 status_set.rate_limit = audit_rate_limit;
58637 status_set.backlog_limit = audit_backlog_limit;
58638 - status_set.lost = atomic_read(&audit_lost);
58639 + status_set.lost = atomic_read_unchecked(&audit_lost);
58640 status_set.backlog = skb_queue_len(&audit_skb_queue);
58641 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
58642 &status_set, sizeof(status_set));
58643 @@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_b
58644 spin_unlock_irq(&tsk->sighand->siglock);
58645 }
58646 read_unlock(&tasklist_lock);
58647 - audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
58648 - &s, sizeof(s));
58649 +
58650 + if (!err)
58651 + audit_send_reply(NETLINK_CB(skb).pid, seq,
58652 + AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
58653 break;
58654 }
58655 case AUDIT_TTY_SET: {
58656 diff -urNp linux-2.6.32.41/kernel/auditsc.c linux-2.6.32.41/kernel/auditsc.c
58657 --- linux-2.6.32.41/kernel/auditsc.c 2011-03-27 14:31:47.000000000 -0400
58658 +++ linux-2.6.32.41/kernel/auditsc.c 2011-05-04 17:56:28.000000000 -0400
58659 @@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_conte
58660 }
58661
58662 /* global counter which is incremented every time something logs in */
58663 -static atomic_t session_id = ATOMIC_INIT(0);
58664 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
58665
58666 /**
58667 * audit_set_loginuid - set a task's audit_context loginuid
58668 @@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT
58669 */
58670 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
58671 {
58672 - unsigned int sessionid = atomic_inc_return(&session_id);
58673 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
58674 struct audit_context *context = task->audit_context;
58675
58676 if (context && context->in_syscall) {
58677 diff -urNp linux-2.6.32.41/kernel/capability.c linux-2.6.32.41/kernel/capability.c
58678 --- linux-2.6.32.41/kernel/capability.c 2011-03-27 14:31:47.000000000 -0400
58679 +++ linux-2.6.32.41/kernel/capability.c 2011-04-17 15:56:46.000000000 -0400
58680 @@ -305,10 +305,26 @@ int capable(int cap)
58681 BUG();
58682 }
58683
58684 - if (security_capable(cap) == 0) {
58685 + if (security_capable(cap) == 0 && gr_is_capable(cap)) {
58686 current->flags |= PF_SUPERPRIV;
58687 return 1;
58688 }
58689 return 0;
58690 }
58691 +
58692 +int capable_nolog(int cap)
58693 +{
58694 + if (unlikely(!cap_valid(cap))) {
58695 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
58696 + BUG();
58697 + }
58698 +
58699 + if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
58700 + current->flags |= PF_SUPERPRIV;
58701 + return 1;
58702 + }
58703 + return 0;
58704 +}
58705 +
58706 EXPORT_SYMBOL(capable);
58707 +EXPORT_SYMBOL(capable_nolog);
58708 diff -urNp linux-2.6.32.41/kernel/cgroup.c linux-2.6.32.41/kernel/cgroup.c
58709 --- linux-2.6.32.41/kernel/cgroup.c 2011-03-27 14:31:47.000000000 -0400
58710 +++ linux-2.6.32.41/kernel/cgroup.c 2011-05-16 21:46:57.000000000 -0400
58711 @@ -536,6 +536,8 @@ static struct css_set *find_css_set(
58712 struct hlist_head *hhead;
58713 struct cg_cgroup_link *link;
58714
58715 + pax_track_stack();
58716 +
58717 /* First see if we already have a cgroup group that matches
58718 * the desired set */
58719 read_lock(&css_set_lock);
58720 diff -urNp linux-2.6.32.41/kernel/configs.c linux-2.6.32.41/kernel/configs.c
58721 --- linux-2.6.32.41/kernel/configs.c 2011-03-27 14:31:47.000000000 -0400
58722 +++ linux-2.6.32.41/kernel/configs.c 2011-04-17 15:56:46.000000000 -0400
58723 @@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
58724 struct proc_dir_entry *entry;
58725
58726 /* create the current config file */
58727 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
58728 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
58729 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
58730 + &ikconfig_file_ops);
58731 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58732 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
58733 + &ikconfig_file_ops);
58734 +#endif
58735 +#else
58736 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
58737 &ikconfig_file_ops);
58738 +#endif
58739 +
58740 if (!entry)
58741 return -ENOMEM;
58742
58743 diff -urNp linux-2.6.32.41/kernel/cpu.c linux-2.6.32.41/kernel/cpu.c
58744 --- linux-2.6.32.41/kernel/cpu.c 2011-03-27 14:31:47.000000000 -0400
58745 +++ linux-2.6.32.41/kernel/cpu.c 2011-04-17 15:56:46.000000000 -0400
58746 @@ -19,7 +19,7 @@
58747 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
58748 static DEFINE_MUTEX(cpu_add_remove_lock);
58749
58750 -static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
58751 +static RAW_NOTIFIER_HEAD(cpu_chain);
58752
58753 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
58754 * Should always be manipulated under cpu_add_remove_lock
58755 diff -urNp linux-2.6.32.41/kernel/cred.c linux-2.6.32.41/kernel/cred.c
58756 --- linux-2.6.32.41/kernel/cred.c 2011-03-27 14:31:47.000000000 -0400
58757 +++ linux-2.6.32.41/kernel/cred.c 2011-05-17 19:26:34.000000000 -0400
58758 @@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head
58759 */
58760 void __put_cred(struct cred *cred)
58761 {
58762 + pax_track_stack();
58763 +
58764 kdebug("__put_cred(%p{%d,%d})", cred,
58765 atomic_read(&cred->usage),
58766 read_cred_subscribers(cred));
58767 @@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
58768 {
58769 struct cred *cred;
58770
58771 + pax_track_stack();
58772 +
58773 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
58774 atomic_read(&tsk->cred->usage),
58775 read_cred_subscribers(tsk->cred));
58776 @@ -222,6 +226,8 @@ const struct cred *get_task_cred(struct
58777 {
58778 const struct cred *cred;
58779
58780 + pax_track_stack();
58781 +
58782 rcu_read_lock();
58783
58784 do {
58785 @@ -241,6 +247,8 @@ struct cred *cred_alloc_blank(void)
58786 {
58787 struct cred *new;
58788
58789 + pax_track_stack();
58790 +
58791 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
58792 if (!new)
58793 return NULL;
58794 @@ -289,6 +297,8 @@ struct cred *prepare_creds(void)
58795 const struct cred *old;
58796 struct cred *new;
58797
58798 + pax_track_stack();
58799 +
58800 validate_process_creds();
58801
58802 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
58803 @@ -335,6 +345,8 @@ struct cred *prepare_exec_creds(void)
58804 struct thread_group_cred *tgcred = NULL;
58805 struct cred *new;
58806
58807 + pax_track_stack();
58808 +
58809 #ifdef CONFIG_KEYS
58810 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
58811 if (!tgcred)
58812 @@ -441,6 +453,8 @@ int copy_creds(struct task_struct *p, un
58813 struct cred *new;
58814 int ret;
58815
58816 + pax_track_stack();
58817 +
58818 mutex_init(&p->cred_guard_mutex);
58819
58820 if (
58821 @@ -528,6 +542,8 @@ int commit_creds(struct cred *new)
58822 struct task_struct *task = current;
58823 const struct cred *old = task->real_cred;
58824
58825 + pax_track_stack();
58826 +
58827 kdebug("commit_creds(%p{%d,%d})", new,
58828 atomic_read(&new->usage),
58829 read_cred_subscribers(new));
58830 @@ -544,6 +560,8 @@ int commit_creds(struct cred *new)
58831
58832 get_cred(new); /* we will require a ref for the subj creds too */
58833
58834 + gr_set_role_label(task, new->uid, new->gid);
58835 +
58836 /* dumpability changes */
58837 if (old->euid != new->euid ||
58838 old->egid != new->egid ||
58839 @@ -606,6 +624,8 @@ EXPORT_SYMBOL(commit_creds);
58840 */
58841 void abort_creds(struct cred *new)
58842 {
58843 + pax_track_stack();
58844 +
58845 kdebug("abort_creds(%p{%d,%d})", new,
58846 atomic_read(&new->usage),
58847 read_cred_subscribers(new));
58848 @@ -629,6 +649,8 @@ const struct cred *override_creds(const
58849 {
58850 const struct cred *old = current->cred;
58851
58852 + pax_track_stack();
58853 +
58854 kdebug("override_creds(%p{%d,%d})", new,
58855 atomic_read(&new->usage),
58856 read_cred_subscribers(new));
58857 @@ -658,6 +680,8 @@ void revert_creds(const struct cred *old
58858 {
58859 const struct cred *override = current->cred;
58860
58861 + pax_track_stack();
58862 +
58863 kdebug("revert_creds(%p{%d,%d})", old,
58864 atomic_read(&old->usage),
58865 read_cred_subscribers(old));
58866 @@ -704,6 +728,8 @@ struct cred *prepare_kernel_cred(struct
58867 const struct cred *old;
58868 struct cred *new;
58869
58870 + pax_track_stack();
58871 +
58872 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
58873 if (!new)
58874 return NULL;
58875 @@ -758,6 +784,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
58876 */
58877 int set_security_override(struct cred *new, u32 secid)
58878 {
58879 + pax_track_stack();
58880 +
58881 return security_kernel_act_as(new, secid);
58882 }
58883 EXPORT_SYMBOL(set_security_override);
58884 @@ -777,6 +805,8 @@ int set_security_override_from_ctx(struc
58885 u32 secid;
58886 int ret;
58887
58888 + pax_track_stack();
58889 +
58890 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
58891 if (ret < 0)
58892 return ret;
58893 diff -urNp linux-2.6.32.41/kernel/exit.c linux-2.6.32.41/kernel/exit.c
58894 --- linux-2.6.32.41/kernel/exit.c 2011-03-27 14:31:47.000000000 -0400
58895 +++ linux-2.6.32.41/kernel/exit.c 2011-04-17 15:56:46.000000000 -0400
58896 @@ -55,6 +55,10 @@
58897 #include <asm/pgtable.h>
58898 #include <asm/mmu_context.h>
58899
58900 +#ifdef CONFIG_GRKERNSEC
58901 +extern rwlock_t grsec_exec_file_lock;
58902 +#endif
58903 +
58904 static void exit_mm(struct task_struct * tsk);
58905
58906 static void __unhash_process(struct task_struct *p)
58907 @@ -174,6 +178,8 @@ void release_task(struct task_struct * p
58908 struct task_struct *leader;
58909 int zap_leader;
58910 repeat:
58911 + gr_del_task_from_ip_table(p);
58912 +
58913 tracehook_prepare_release_task(p);
58914 /* don't need to get the RCU readlock here - the process is dead and
58915 * can't be modifying its own credentials */
58916 @@ -341,11 +347,22 @@ static void reparent_to_kthreadd(void)
58917 {
58918 write_lock_irq(&tasklist_lock);
58919
58920 +#ifdef CONFIG_GRKERNSEC
58921 + write_lock(&grsec_exec_file_lock);
58922 + if (current->exec_file) {
58923 + fput(current->exec_file);
58924 + current->exec_file = NULL;
58925 + }
58926 + write_unlock(&grsec_exec_file_lock);
58927 +#endif
58928 +
58929 ptrace_unlink(current);
58930 /* Reparent to init */
58931 current->real_parent = current->parent = kthreadd_task;
58932 list_move_tail(&current->sibling, &current->real_parent->children);
58933
58934 + gr_set_kernel_label(current);
58935 +
58936 /* Set the exit signal to SIGCHLD so we signal init on exit */
58937 current->exit_signal = SIGCHLD;
58938
58939 @@ -397,7 +414,7 @@ int allow_signal(int sig)
58940 * know it'll be handled, so that they don't get converted to
58941 * SIGKILL or just silently dropped.
58942 */
58943 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
58944 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
58945 recalc_sigpending();
58946 spin_unlock_irq(&current->sighand->siglock);
58947 return 0;
58948 @@ -433,6 +450,17 @@ void daemonize(const char *name, ...)
58949 vsnprintf(current->comm, sizeof(current->comm), name, args);
58950 va_end(args);
58951
58952 +#ifdef CONFIG_GRKERNSEC
58953 + write_lock(&grsec_exec_file_lock);
58954 + if (current->exec_file) {
58955 + fput(current->exec_file);
58956 + current->exec_file = NULL;
58957 + }
58958 + write_unlock(&grsec_exec_file_lock);
58959 +#endif
58960 +
58961 + gr_set_kernel_label(current);
58962 +
58963 /*
58964 * If we were started as result of loading a module, close all of the
58965 * user space pages. We don't need them, and if we didn't close them
58966 @@ -897,17 +925,17 @@ NORET_TYPE void do_exit(long code)
58967 struct task_struct *tsk = current;
58968 int group_dead;
58969
58970 - profile_task_exit(tsk);
58971 -
58972 - WARN_ON(atomic_read(&tsk->fs_excl));
58973 -
58974 + /*
58975 + * Check this first since set_fs() below depends on
58976 + * current_thread_info(), which we better not access when we're in
58977 + * interrupt context. Other than that, we want to do the set_fs()
58978 + * as early as possible.
58979 + */
58980 if (unlikely(in_interrupt()))
58981 panic("Aiee, killing interrupt handler!");
58982 - if (unlikely(!tsk->pid))
58983 - panic("Attempted to kill the idle task!");
58984
58985 /*
58986 - * If do_exit is called because this processes oopsed, it's possible
58987 + * If do_exit is called because this processes Oops'ed, it's possible
58988 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
58989 * continuing. Amongst other possible reasons, this is to prevent
58990 * mm_release()->clear_child_tid() from writing to a user-controlled
58991 @@ -915,6 +943,13 @@ NORET_TYPE void do_exit(long code)
58992 */
58993 set_fs(USER_DS);
58994
58995 + profile_task_exit(tsk);
58996 +
58997 + WARN_ON(atomic_read(&tsk->fs_excl));
58998 +
58999 + if (unlikely(!tsk->pid))
59000 + panic("Attempted to kill the idle task!");
59001 +
59002 tracehook_report_exit(&code);
59003
59004 validate_creds_for_do_exit(tsk);
59005 @@ -973,6 +1008,9 @@ NORET_TYPE void do_exit(long code)
59006 tsk->exit_code = code;
59007 taskstats_exit(tsk, group_dead);
59008
59009 + gr_acl_handle_psacct(tsk, code);
59010 + gr_acl_handle_exit();
59011 +
59012 exit_mm(tsk);
59013
59014 if (group_dead)
59015 @@ -1188,7 +1226,7 @@ static int wait_task_zombie(struct wait_
59016
59017 if (unlikely(wo->wo_flags & WNOWAIT)) {
59018 int exit_code = p->exit_code;
59019 - int why, status;
59020 + int why;
59021
59022 get_task_struct(p);
59023 read_unlock(&tasklist_lock);
59024 diff -urNp linux-2.6.32.41/kernel/fork.c linux-2.6.32.41/kernel/fork.c
59025 --- linux-2.6.32.41/kernel/fork.c 2011-03-27 14:31:47.000000000 -0400
59026 +++ linux-2.6.32.41/kernel/fork.c 2011-04-17 15:56:46.000000000 -0400
59027 @@ -253,7 +253,7 @@ static struct task_struct *dup_task_stru
59028 *stackend = STACK_END_MAGIC; /* for overflow detection */
59029
59030 #ifdef CONFIG_CC_STACKPROTECTOR
59031 - tsk->stack_canary = get_random_int();
59032 + tsk->stack_canary = pax_get_random_long();
59033 #endif
59034
59035 /* One for us, one for whoever does the "release_task()" (usually parent) */
59036 @@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm
59037 mm->locked_vm = 0;
59038 mm->mmap = NULL;
59039 mm->mmap_cache = NULL;
59040 - mm->free_area_cache = oldmm->mmap_base;
59041 - mm->cached_hole_size = ~0UL;
59042 + mm->free_area_cache = oldmm->free_area_cache;
59043 + mm->cached_hole_size = oldmm->cached_hole_size;
59044 mm->map_count = 0;
59045 cpumask_clear(mm_cpumask(mm));
59046 mm->mm_rb = RB_ROOT;
59047 @@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm
59048 tmp->vm_flags &= ~VM_LOCKED;
59049 tmp->vm_mm = mm;
59050 tmp->vm_next = tmp->vm_prev = NULL;
59051 + tmp->vm_mirror = NULL;
59052 anon_vma_link(tmp);
59053 file = tmp->vm_file;
59054 if (file) {
59055 @@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm
59056 if (retval)
59057 goto out;
59058 }
59059 +
59060 +#ifdef CONFIG_PAX_SEGMEXEC
59061 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
59062 + struct vm_area_struct *mpnt_m;
59063 +
59064 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
59065 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
59066 +
59067 + if (!mpnt->vm_mirror)
59068 + continue;
59069 +
59070 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
59071 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
59072 + mpnt->vm_mirror = mpnt_m;
59073 + } else {
59074 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
59075 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
59076 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
59077 + mpnt->vm_mirror->vm_mirror = mpnt;
59078 + }
59079 + }
59080 + BUG_ON(mpnt_m);
59081 + }
59082 +#endif
59083 +
59084 /* a new mm has just been created */
59085 arch_dup_mmap(oldmm, mm);
59086 retval = 0;
59087 @@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_f
59088 write_unlock(&fs->lock);
59089 return -EAGAIN;
59090 }
59091 - fs->users++;
59092 + atomic_inc(&fs->users);
59093 write_unlock(&fs->lock);
59094 return 0;
59095 }
59096 tsk->fs = copy_fs_struct(fs);
59097 if (!tsk->fs)
59098 return -ENOMEM;
59099 + gr_set_chroot_entries(tsk, &tsk->fs->root);
59100 return 0;
59101 }
59102
59103 @@ -1033,10 +1060,13 @@ static struct task_struct *copy_process(
59104 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
59105 #endif
59106 retval = -EAGAIN;
59107 +
59108 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
59109 +
59110 if (atomic_read(&p->real_cred->user->processes) >=
59111 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
59112 - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
59113 - p->real_cred->user != INIT_USER)
59114 + if (p->real_cred->user != INIT_USER &&
59115 + !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
59116 goto bad_fork_free;
59117 }
59118
59119 @@ -1183,6 +1213,8 @@ static struct task_struct *copy_process(
59120 goto bad_fork_free_pid;
59121 }
59122
59123 + gr_copy_label(p);
59124 +
59125 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
59126 /*
59127 * Clear TID on mm_release()?
59128 @@ -1333,6 +1365,8 @@ bad_fork_cleanup_count:
59129 bad_fork_free:
59130 free_task(p);
59131 fork_out:
59132 + gr_log_forkfail(retval);
59133 +
59134 return ERR_PTR(retval);
59135 }
59136
59137 @@ -1426,6 +1460,8 @@ long do_fork(unsigned long clone_flags,
59138 if (clone_flags & CLONE_PARENT_SETTID)
59139 put_user(nr, parent_tidptr);
59140
59141 + gr_handle_brute_check();
59142 +
59143 if (clone_flags & CLONE_VFORK) {
59144 p->vfork_done = &vfork;
59145 init_completion(&vfork);
59146 @@ -1558,7 +1594,7 @@ static int unshare_fs(unsigned long unsh
59147 return 0;
59148
59149 /* don't need lock here; in the worst case we'll do useless copy */
59150 - if (fs->users == 1)
59151 + if (atomic_read(&fs->users) == 1)
59152 return 0;
59153
59154 *new_fsp = copy_fs_struct(fs);
59155 @@ -1681,7 +1717,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
59156 fs = current->fs;
59157 write_lock(&fs->lock);
59158 current->fs = new_fs;
59159 - if (--fs->users)
59160 + gr_set_chroot_entries(current, &current->fs->root);
59161 + if (atomic_dec_return(&fs->users))
59162 new_fs = NULL;
59163 else
59164 new_fs = fs;
59165 diff -urNp linux-2.6.32.41/kernel/futex.c linux-2.6.32.41/kernel/futex.c
59166 --- linux-2.6.32.41/kernel/futex.c 2011-03-27 14:31:47.000000000 -0400
59167 +++ linux-2.6.32.41/kernel/futex.c 2011-05-16 21:46:57.000000000 -0400
59168 @@ -54,6 +54,7 @@
59169 #include <linux/mount.h>
59170 #include <linux/pagemap.h>
59171 #include <linux/syscalls.h>
59172 +#include <linux/ptrace.h>
59173 #include <linux/signal.h>
59174 #include <linux/module.h>
59175 #include <linux/magic.h>
59176 @@ -221,6 +222,11 @@ get_futex_key(u32 __user *uaddr, int fsh
59177 struct page *page;
59178 int err;
59179
59180 +#ifdef CONFIG_PAX_SEGMEXEC
59181 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
59182 + return -EFAULT;
59183 +#endif
59184 +
59185 /*
59186 * The futex address must be "naturally" aligned.
59187 */
59188 @@ -1789,6 +1795,8 @@ static int futex_wait(u32 __user *uaddr,
59189 struct futex_q q;
59190 int ret;
59191
59192 + pax_track_stack();
59193 +
59194 if (!bitset)
59195 return -EINVAL;
59196
59197 @@ -1841,7 +1849,7 @@ retry:
59198
59199 restart = &current_thread_info()->restart_block;
59200 restart->fn = futex_wait_restart;
59201 - restart->futex.uaddr = (u32 *)uaddr;
59202 + restart->futex.uaddr = uaddr;
59203 restart->futex.val = val;
59204 restart->futex.time = abs_time->tv64;
59205 restart->futex.bitset = bitset;
59206 @@ -2203,6 +2211,8 @@ static int futex_wait_requeue_pi(u32 __u
59207 struct futex_q q;
59208 int res, ret;
59209
59210 + pax_track_stack();
59211 +
59212 if (!bitset)
59213 return -EINVAL;
59214
59215 @@ -2377,7 +2387,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
59216 {
59217 struct robust_list_head __user *head;
59218 unsigned long ret;
59219 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
59220 const struct cred *cred = current_cred(), *pcred;
59221 +#endif
59222
59223 if (!futex_cmpxchg_enabled)
59224 return -ENOSYS;
59225 @@ -2393,11 +2405,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
59226 if (!p)
59227 goto err_unlock;
59228 ret = -EPERM;
59229 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59230 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
59231 + goto err_unlock;
59232 +#else
59233 pcred = __task_cred(p);
59234 if (cred->euid != pcred->euid &&
59235 cred->euid != pcred->uid &&
59236 !capable(CAP_SYS_PTRACE))
59237 goto err_unlock;
59238 +#endif
59239 head = p->robust_list;
59240 rcu_read_unlock();
59241 }
59242 @@ -2459,7 +2476,7 @@ retry:
59243 */
59244 static inline int fetch_robust_entry(struct robust_list __user **entry,
59245 struct robust_list __user * __user *head,
59246 - int *pi)
59247 + unsigned int *pi)
59248 {
59249 unsigned long uentry;
59250
59251 @@ -2640,6 +2657,7 @@ static int __init futex_init(void)
59252 {
59253 u32 curval;
59254 int i;
59255 + mm_segment_t oldfs;
59256
59257 /*
59258 * This will fail and we want it. Some arch implementations do
59259 @@ -2651,7 +2669,10 @@ static int __init futex_init(void)
59260 * implementation, the non functional ones will return
59261 * -ENOSYS.
59262 */
59263 + oldfs = get_fs();
59264 + set_fs(USER_DS);
59265 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
59266 + set_fs(oldfs);
59267 if (curval == -EFAULT)
59268 futex_cmpxchg_enabled = 1;
59269
59270 diff -urNp linux-2.6.32.41/kernel/futex_compat.c linux-2.6.32.41/kernel/futex_compat.c
59271 --- linux-2.6.32.41/kernel/futex_compat.c 2011-03-27 14:31:47.000000000 -0400
59272 +++ linux-2.6.32.41/kernel/futex_compat.c 2011-04-17 15:56:46.000000000 -0400
59273 @@ -10,6 +10,7 @@
59274 #include <linux/compat.h>
59275 #include <linux/nsproxy.h>
59276 #include <linux/futex.h>
59277 +#include <linux/ptrace.h>
59278
59279 #include <asm/uaccess.h>
59280
59281 @@ -135,7 +136,10 @@ compat_sys_get_robust_list(int pid, comp
59282 {
59283 struct compat_robust_list_head __user *head;
59284 unsigned long ret;
59285 - const struct cred *cred = current_cred(), *pcred;
59286 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
59287 + const struct cred *cred = current_cred();
59288 + const struct cred *pcred;
59289 +#endif
59290
59291 if (!futex_cmpxchg_enabled)
59292 return -ENOSYS;
59293 @@ -151,11 +155,16 @@ compat_sys_get_robust_list(int pid, comp
59294 if (!p)
59295 goto err_unlock;
59296 ret = -EPERM;
59297 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59298 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
59299 + goto err_unlock;
59300 +#else
59301 pcred = __task_cred(p);
59302 if (cred->euid != pcred->euid &&
59303 cred->euid != pcred->uid &&
59304 !capable(CAP_SYS_PTRACE))
59305 goto err_unlock;
59306 +#endif
59307 head = p->compat_robust_list;
59308 read_unlock(&tasklist_lock);
59309 }
59310 diff -urNp linux-2.6.32.41/kernel/gcov/base.c linux-2.6.32.41/kernel/gcov/base.c
59311 --- linux-2.6.32.41/kernel/gcov/base.c 2011-03-27 14:31:47.000000000 -0400
59312 +++ linux-2.6.32.41/kernel/gcov/base.c 2011-04-17 15:56:46.000000000 -0400
59313 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
59314 }
59315
59316 #ifdef CONFIG_MODULES
59317 -static inline int within(void *addr, void *start, unsigned long size)
59318 -{
59319 - return ((addr >= start) && (addr < start + size));
59320 -}
59321 -
59322 /* Update list and generate events when modules are unloaded. */
59323 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
59324 void *data)
59325 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
59326 prev = NULL;
59327 /* Remove entries located in module from linked list. */
59328 for (info = gcov_info_head; info; info = info->next) {
59329 - if (within(info, mod->module_core, mod->core_size)) {
59330 + if (within_module_core_rw((unsigned long)info, mod)) {
59331 if (prev)
59332 prev->next = info->next;
59333 else
59334 diff -urNp linux-2.6.32.41/kernel/hrtimer.c linux-2.6.32.41/kernel/hrtimer.c
59335 --- linux-2.6.32.41/kernel/hrtimer.c 2011-03-27 14:31:47.000000000 -0400
59336 +++ linux-2.6.32.41/kernel/hrtimer.c 2011-04-17 15:56:46.000000000 -0400
59337 @@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
59338 local_irq_restore(flags);
59339 }
59340
59341 -static void run_hrtimer_softirq(struct softirq_action *h)
59342 +static void run_hrtimer_softirq(void)
59343 {
59344 hrtimer_peek_ahead_timers();
59345 }
59346 diff -urNp linux-2.6.32.41/kernel/kallsyms.c linux-2.6.32.41/kernel/kallsyms.c
59347 --- linux-2.6.32.41/kernel/kallsyms.c 2011-03-27 14:31:47.000000000 -0400
59348 +++ linux-2.6.32.41/kernel/kallsyms.c 2011-04-17 15:56:46.000000000 -0400
59349 @@ -11,6 +11,9 @@
59350 * Changed the compression method from stem compression to "table lookup"
59351 * compression (see scripts/kallsyms.c for a more complete description)
59352 */
59353 +#ifdef CONFIG_GRKERNSEC_HIDESYM
59354 +#define __INCLUDED_BY_HIDESYM 1
59355 +#endif
59356 #include <linux/kallsyms.h>
59357 #include <linux/module.h>
59358 #include <linux/init.h>
59359 @@ -51,12 +54,33 @@ extern const unsigned long kallsyms_mark
59360
59361 static inline int is_kernel_inittext(unsigned long addr)
59362 {
59363 + if (system_state != SYSTEM_BOOTING)
59364 + return 0;
59365 +
59366 if (addr >= (unsigned long)_sinittext
59367 && addr <= (unsigned long)_einittext)
59368 return 1;
59369 return 0;
59370 }
59371
59372 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
59373 +#ifdef CONFIG_MODULES
59374 +static inline int is_module_text(unsigned long addr)
59375 +{
59376 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
59377 + return 1;
59378 +
59379 + addr = ktla_ktva(addr);
59380 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
59381 +}
59382 +#else
59383 +static inline int is_module_text(unsigned long addr)
59384 +{
59385 + return 0;
59386 +}
59387 +#endif
59388 +#endif
59389 +
59390 static inline int is_kernel_text(unsigned long addr)
59391 {
59392 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
59393 @@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigne
59394
59395 static inline int is_kernel(unsigned long addr)
59396 {
59397 +
59398 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
59399 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
59400 + return 1;
59401 +
59402 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
59403 +#else
59404 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
59405 +#endif
59406 +
59407 return 1;
59408 return in_gate_area_no_task(addr);
59409 }
59410
59411 static int is_ksym_addr(unsigned long addr)
59412 {
59413 +
59414 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
59415 + if (is_module_text(addr))
59416 + return 0;
59417 +#endif
59418 +
59419 if (all_var)
59420 return is_kernel(addr);
59421
59422 @@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(st
59423
59424 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
59425 {
59426 - iter->name[0] = '\0';
59427 iter->nameoff = get_symbol_offset(new_pos);
59428 iter->pos = new_pos;
59429 }
59430 @@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, vo
59431 {
59432 struct kallsym_iter *iter = m->private;
59433
59434 +#ifdef CONFIG_GRKERNSEC_HIDESYM
59435 + if (current_uid())
59436 + return 0;
59437 +#endif
59438 +
59439 /* Some debugging symbols have no name. Ignore them. */
59440 if (!iter->name[0])
59441 return 0;
59442 @@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *i
59443 struct kallsym_iter *iter;
59444 int ret;
59445
59446 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
59447 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
59448 if (!iter)
59449 return -ENOMEM;
59450 reset_iter(iter, 0);
59451 diff -urNp linux-2.6.32.41/kernel/kgdb.c linux-2.6.32.41/kernel/kgdb.c
59452 --- linux-2.6.32.41/kernel/kgdb.c 2011-04-17 17:00:52.000000000 -0400
59453 +++ linux-2.6.32.41/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
59454 @@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
59455 /* Guard for recursive entry */
59456 static int exception_level;
59457
59458 -static struct kgdb_io *kgdb_io_ops;
59459 +static const struct kgdb_io *kgdb_io_ops;
59460 static DEFINE_SPINLOCK(kgdb_registration_lock);
59461
59462 /* kgdb console driver is loaded */
59463 @@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1)
59464 */
59465 static atomic_t passive_cpu_wait[NR_CPUS];
59466 static atomic_t cpu_in_kgdb[NR_CPUS];
59467 -atomic_t kgdb_setting_breakpoint;
59468 +atomic_unchecked_t kgdb_setting_breakpoint;
59469
59470 struct task_struct *kgdb_usethread;
59471 struct task_struct *kgdb_contthread;
59472 @@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBY
59473 sizeof(unsigned long)];
59474
59475 /* to keep track of the CPU which is doing the single stepping*/
59476 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
59477 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
59478
59479 /*
59480 * If you are debugging a problem where roundup (the collection of
59481 @@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
59482 return 0;
59483 if (kgdb_connected)
59484 return 1;
59485 - if (atomic_read(&kgdb_setting_breakpoint))
59486 + if (atomic_read_unchecked(&kgdb_setting_breakpoint))
59487 return 1;
59488 if (print_wait)
59489 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
59490 @@ -1426,8 +1426,8 @@ acquirelock:
59491 * instance of the exception handler wanted to come into the
59492 * debugger on a different CPU via a single step
59493 */
59494 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
59495 - atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
59496 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
59497 + atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
59498
59499 atomic_set(&kgdb_active, -1);
59500 touch_softlockup_watchdog();
59501 @@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void
59502 *
59503 * Register it with the KGDB core.
59504 */
59505 -int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
59506 +int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
59507 {
59508 int err;
59509
59510 @@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_modul
59511 *
59512 * Unregister it with the KGDB core.
59513 */
59514 -void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
59515 +void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
59516 {
59517 BUG_ON(kgdb_connected);
59518
59519 @@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_mod
59520 */
59521 void kgdb_breakpoint(void)
59522 {
59523 - atomic_set(&kgdb_setting_breakpoint, 1);
59524 + atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
59525 wmb(); /* Sync point before breakpoint */
59526 arch_kgdb_breakpoint();
59527 wmb(); /* Sync point after breakpoint */
59528 - atomic_set(&kgdb_setting_breakpoint, 0);
59529 + atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
59530 }
59531 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
59532
59533 diff -urNp linux-2.6.32.41/kernel/kmod.c linux-2.6.32.41/kernel/kmod.c
59534 --- linux-2.6.32.41/kernel/kmod.c 2011-03-27 14:31:47.000000000 -0400
59535 +++ linux-2.6.32.41/kernel/kmod.c 2011-04-17 15:56:46.000000000 -0400
59536 @@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
59537 * If module auto-loading support is disabled then this function
59538 * becomes a no-operation.
59539 */
59540 -int __request_module(bool wait, const char *fmt, ...)
59541 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
59542 {
59543 - va_list args;
59544 char module_name[MODULE_NAME_LEN];
59545 unsigned int max_modprobes;
59546 int ret;
59547 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
59548 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
59549 static char *envp[] = { "HOME=/",
59550 "TERM=linux",
59551 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
59552 @@ -84,12 +83,24 @@ int __request_module(bool wait, const ch
59553 if (ret)
59554 return ret;
59555
59556 - va_start(args, fmt);
59557 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
59558 - va_end(args);
59559 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
59560 if (ret >= MODULE_NAME_LEN)
59561 return -ENAMETOOLONG;
59562
59563 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
59564 + if (!current_uid()) {
59565 + /* hack to workaround consolekit/udisks stupidity */
59566 + read_lock(&tasklist_lock);
59567 + if (!strcmp(current->comm, "mount") &&
59568 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
59569 + read_unlock(&tasklist_lock);
59570 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
59571 + return -EPERM;
59572 + }
59573 + read_unlock(&tasklist_lock);
59574 + }
59575 +#endif
59576 +
59577 /* If modprobe needs a service that is in a module, we get a recursive
59578 * loop. Limit the number of running kmod threads to max_threads/2 or
59579 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
59580 @@ -121,6 +132,48 @@ int __request_module(bool wait, const ch
59581 atomic_dec(&kmod_concurrent);
59582 return ret;
59583 }
59584 +
59585 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
59586 +{
59587 + va_list args;
59588 + int ret;
59589 +
59590 + va_start(args, fmt);
59591 + ret = ____request_module(wait, module_param, fmt, args);
59592 + va_end(args);
59593 +
59594 + return ret;
59595 +}
59596 +
59597 +int __request_module(bool wait, const char *fmt, ...)
59598 +{
59599 + va_list args;
59600 + int ret;
59601 +
59602 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
59603 + if (current_uid()) {
59604 + char module_param[MODULE_NAME_LEN];
59605 +
59606 + memset(module_param, 0, sizeof(module_param));
59607 +
59608 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
59609 +
59610 + va_start(args, fmt);
59611 + ret = ____request_module(wait, module_param, fmt, args);
59612 + va_end(args);
59613 +
59614 + return ret;
59615 + }
59616 +#endif
59617 +
59618 + va_start(args, fmt);
59619 + ret = ____request_module(wait, NULL, fmt, args);
59620 + va_end(args);
59621 +
59622 + return ret;
59623 +}
59624 +
59625 +
59626 EXPORT_SYMBOL(__request_module);
59627 #endif /* CONFIG_MODULES */
59628
59629 diff -urNp linux-2.6.32.41/kernel/kprobes.c linux-2.6.32.41/kernel/kprobes.c
59630 --- linux-2.6.32.41/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
59631 +++ linux-2.6.32.41/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
59632 @@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_
59633 * kernel image and loaded module images reside. This is required
59634 * so x86_64 can correctly handle the %rip-relative fixups.
59635 */
59636 - kip->insns = module_alloc(PAGE_SIZE);
59637 + kip->insns = module_alloc_exec(PAGE_SIZE);
59638 if (!kip->insns) {
59639 kfree(kip);
59640 return NULL;
59641 @@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(st
59642 */
59643 if (!list_is_singular(&kprobe_insn_pages)) {
59644 list_del(&kip->list);
59645 - module_free(NULL, kip->insns);
59646 + module_free_exec(NULL, kip->insns);
59647 kfree(kip);
59648 }
59649 return 1;
59650 @@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
59651 {
59652 int i, err = 0;
59653 unsigned long offset = 0, size = 0;
59654 - char *modname, namebuf[128];
59655 + char *modname, namebuf[KSYM_NAME_LEN];
59656 const char *symbol_name;
59657 void *addr;
59658 struct kprobe_blackpoint *kb;
59659 @@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(st
59660 const char *sym = NULL;
59661 unsigned int i = *(loff_t *) v;
59662 unsigned long offset = 0;
59663 - char *modname, namebuf[128];
59664 + char *modname, namebuf[KSYM_NAME_LEN];
59665
59666 head = &kprobe_table[i];
59667 preempt_disable();
59668 diff -urNp linux-2.6.32.41/kernel/lockdep.c linux-2.6.32.41/kernel/lockdep.c
59669 --- linux-2.6.32.41/kernel/lockdep.c 2011-03-27 14:31:47.000000000 -0400
59670 +++ linux-2.6.32.41/kernel/lockdep.c 2011-04-17 15:56:46.000000000 -0400
59671 @@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_t
59672 /*
59673 * Various lockdep statistics:
59674 */
59675 -atomic_t chain_lookup_hits;
59676 -atomic_t chain_lookup_misses;
59677 -atomic_t hardirqs_on_events;
59678 -atomic_t hardirqs_off_events;
59679 -atomic_t redundant_hardirqs_on;
59680 -atomic_t redundant_hardirqs_off;
59681 -atomic_t softirqs_on_events;
59682 -atomic_t softirqs_off_events;
59683 -atomic_t redundant_softirqs_on;
59684 -atomic_t redundant_softirqs_off;
59685 -atomic_t nr_unused_locks;
59686 -atomic_t nr_cyclic_checks;
59687 -atomic_t nr_find_usage_forwards_checks;
59688 -atomic_t nr_find_usage_backwards_checks;
59689 +atomic_unchecked_t chain_lookup_hits;
59690 +atomic_unchecked_t chain_lookup_misses;
59691 +atomic_unchecked_t hardirqs_on_events;
59692 +atomic_unchecked_t hardirqs_off_events;
59693 +atomic_unchecked_t redundant_hardirqs_on;
59694 +atomic_unchecked_t redundant_hardirqs_off;
59695 +atomic_unchecked_t softirqs_on_events;
59696 +atomic_unchecked_t softirqs_off_events;
59697 +atomic_unchecked_t redundant_softirqs_on;
59698 +atomic_unchecked_t redundant_softirqs_off;
59699 +atomic_unchecked_t nr_unused_locks;
59700 +atomic_unchecked_t nr_cyclic_checks;
59701 +atomic_unchecked_t nr_find_usage_forwards_checks;
59702 +atomic_unchecked_t nr_find_usage_backwards_checks;
59703 #endif
59704
59705 /*
59706 @@ -577,6 +577,10 @@ static int static_obj(void *obj)
59707 int i;
59708 #endif
59709
59710 +#ifdef CONFIG_PAX_KERNEXEC
59711 + start = ktla_ktva(start);
59712 +#endif
59713 +
59714 /*
59715 * static variable?
59716 */
59717 @@ -592,8 +596,7 @@ static int static_obj(void *obj)
59718 */
59719 for_each_possible_cpu(i) {
59720 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
59721 - end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
59722 - + per_cpu_offset(i);
59723 + end = start + PERCPU_ENOUGH_ROOM;
59724
59725 if ((addr >= start) && (addr < end))
59726 return 1;
59727 @@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *
59728 if (!static_obj(lock->key)) {
59729 debug_locks_off();
59730 printk("INFO: trying to register non-static key.\n");
59731 + printk("lock:%pS key:%pS.\n", lock, lock->key);
59732 printk("the code is fine but needs lockdep annotation.\n");
59733 printk("turning off the locking correctness validator.\n");
59734 dump_stack();
59735 @@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep
59736 if (!class)
59737 return 0;
59738 }
59739 - debug_atomic_inc((atomic_t *)&class->ops);
59740 + debug_atomic_inc((atomic_unchecked_t *)&class->ops);
59741 if (very_verbose(class)) {
59742 printk("\nacquire class [%p] %s", class->key, class->name);
59743 if (class->name_version > 1)
59744 diff -urNp linux-2.6.32.41/kernel/lockdep_internals.h linux-2.6.32.41/kernel/lockdep_internals.h
59745 --- linux-2.6.32.41/kernel/lockdep_internals.h 2011-03-27 14:31:47.000000000 -0400
59746 +++ linux-2.6.32.41/kernel/lockdep_internals.h 2011-04-17 15:56:46.000000000 -0400
59747 @@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_
59748 /*
59749 * Various lockdep statistics:
59750 */
59751 -extern atomic_t chain_lookup_hits;
59752 -extern atomic_t chain_lookup_misses;
59753 -extern atomic_t hardirqs_on_events;
59754 -extern atomic_t hardirqs_off_events;
59755 -extern atomic_t redundant_hardirqs_on;
59756 -extern atomic_t redundant_hardirqs_off;
59757 -extern atomic_t softirqs_on_events;
59758 -extern atomic_t softirqs_off_events;
59759 -extern atomic_t redundant_softirqs_on;
59760 -extern atomic_t redundant_softirqs_off;
59761 -extern atomic_t nr_unused_locks;
59762 -extern atomic_t nr_cyclic_checks;
59763 -extern atomic_t nr_cyclic_check_recursions;
59764 -extern atomic_t nr_find_usage_forwards_checks;
59765 -extern atomic_t nr_find_usage_forwards_recursions;
59766 -extern atomic_t nr_find_usage_backwards_checks;
59767 -extern atomic_t nr_find_usage_backwards_recursions;
59768 -# define debug_atomic_inc(ptr) atomic_inc(ptr)
59769 -# define debug_atomic_dec(ptr) atomic_dec(ptr)
59770 -# define debug_atomic_read(ptr) atomic_read(ptr)
59771 +extern atomic_unchecked_t chain_lookup_hits;
59772 +extern atomic_unchecked_t chain_lookup_misses;
59773 +extern atomic_unchecked_t hardirqs_on_events;
59774 +extern atomic_unchecked_t hardirqs_off_events;
59775 +extern atomic_unchecked_t redundant_hardirqs_on;
59776 +extern atomic_unchecked_t redundant_hardirqs_off;
59777 +extern atomic_unchecked_t softirqs_on_events;
59778 +extern atomic_unchecked_t softirqs_off_events;
59779 +extern atomic_unchecked_t redundant_softirqs_on;
59780 +extern atomic_unchecked_t redundant_softirqs_off;
59781 +extern atomic_unchecked_t nr_unused_locks;
59782 +extern atomic_unchecked_t nr_cyclic_checks;
59783 +extern atomic_unchecked_t nr_cyclic_check_recursions;
59784 +extern atomic_unchecked_t nr_find_usage_forwards_checks;
59785 +extern atomic_unchecked_t nr_find_usage_forwards_recursions;
59786 +extern atomic_unchecked_t nr_find_usage_backwards_checks;
59787 +extern atomic_unchecked_t nr_find_usage_backwards_recursions;
59788 +# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
59789 +# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
59790 +# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
59791 #else
59792 # define debug_atomic_inc(ptr) do { } while (0)
59793 # define debug_atomic_dec(ptr) do { } while (0)
59794 diff -urNp linux-2.6.32.41/kernel/lockdep_proc.c linux-2.6.32.41/kernel/lockdep_proc.c
59795 --- linux-2.6.32.41/kernel/lockdep_proc.c 2011-03-27 14:31:47.000000000 -0400
59796 +++ linux-2.6.32.41/kernel/lockdep_proc.c 2011-04-17 15:56:46.000000000 -0400
59797 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
59798
59799 static void print_name(struct seq_file *m, struct lock_class *class)
59800 {
59801 - char str[128];
59802 + char str[KSYM_NAME_LEN];
59803 const char *name = class->name;
59804
59805 if (!name) {
59806 diff -urNp linux-2.6.32.41/kernel/module.c linux-2.6.32.41/kernel/module.c
59807 --- linux-2.6.32.41/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
59808 +++ linux-2.6.32.41/kernel/module.c 2011-04-29 18:52:40.000000000 -0400
59809 @@ -55,6 +55,7 @@
59810 #include <linux/async.h>
59811 #include <linux/percpu.h>
59812 #include <linux/kmemleak.h>
59813 +#include <linux/grsecurity.h>
59814
59815 #define CREATE_TRACE_POINTS
59816 #include <trace/events/module.h>
59817 @@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq
59818 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
59819
59820 /* Bounds of module allocation, for speeding __module_address */
59821 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
59822 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
59823 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
59824
59825 int register_module_notifier(struct notifier_block * nb)
59826 {
59827 @@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct
59828 return true;
59829
59830 list_for_each_entry_rcu(mod, &modules, list) {
59831 - struct symsearch arr[] = {
59832 + struct symsearch modarr[] = {
59833 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
59834 NOT_GPL_ONLY, false },
59835 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
59836 @@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct
59837 #endif
59838 };
59839
59840 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
59841 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
59842 return true;
59843 }
59844 return false;
59845 @@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned lo
59846 void *ptr;
59847 int cpu;
59848
59849 - if (align > PAGE_SIZE) {
59850 + if (align-1 >= PAGE_SIZE) {
59851 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
59852 name, align, PAGE_SIZE);
59853 align = PAGE_SIZE;
59854 @@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resol
59855 * /sys/module/foo/sections stuff
59856 * J. Corbet <corbet@lwn.net>
59857 */
59858 -#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
59859 +#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59860
59861 static inline bool sect_empty(const Elf_Shdr *sect)
59862 {
59863 @@ -1545,7 +1547,8 @@ static void free_module(struct module *m
59864 destroy_params(mod->kp, mod->num_kp);
59865
59866 /* This may be NULL, but that's OK */
59867 - module_free(mod, mod->module_init);
59868 + module_free(mod, mod->module_init_rw);
59869 + module_free_exec(mod, mod->module_init_rx);
59870 kfree(mod->args);
59871 if (mod->percpu)
59872 percpu_modfree(mod->percpu);
59873 @@ -1554,10 +1557,12 @@ static void free_module(struct module *m
59874 percpu_modfree(mod->refptr);
59875 #endif
59876 /* Free lock-classes: */
59877 - lockdep_free_key_range(mod->module_core, mod->core_size);
59878 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
59879 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
59880
59881 /* Finally, free the core (containing the module structure) */
59882 - module_free(mod, mod->module_core);
59883 + module_free_exec(mod, mod->module_core_rx);
59884 + module_free(mod, mod->module_core_rw);
59885
59886 #ifdef CONFIG_MPU
59887 update_protections(current->mm);
59888 @@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *se
59889 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
59890 int ret = 0;
59891 const struct kernel_symbol *ksym;
59892 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
59893 + int is_fs_load = 0;
59894 + int register_filesystem_found = 0;
59895 + char *p;
59896 +
59897 + p = strstr(mod->args, "grsec_modharden_fs");
59898 +
59899 + if (p) {
59900 + char *endptr = p + strlen("grsec_modharden_fs");
59901 + /* copy \0 as well */
59902 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
59903 + is_fs_load = 1;
59904 + }
59905 +#endif
59906 +
59907
59908 for (i = 1; i < n; i++) {
59909 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
59910 + const char *name = strtab + sym[i].st_name;
59911 +
59912 + /* it's a real shame this will never get ripped and copied
59913 + upstream! ;(
59914 + */
59915 + if (is_fs_load && !strcmp(name, "register_filesystem"))
59916 + register_filesystem_found = 1;
59917 +#endif
59918 switch (sym[i].st_shndx) {
59919 case SHN_COMMON:
59920 /* We compiled with -fno-common. These are not
59921 @@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *se
59922 strtab + sym[i].st_name, mod);
59923 /* Ok if resolved. */
59924 if (ksym) {
59925 + pax_open_kernel();
59926 sym[i].st_value = ksym->value;
59927 + pax_close_kernel();
59928 break;
59929 }
59930
59931 @@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *se
59932 secbase = (unsigned long)mod->percpu;
59933 else
59934 secbase = sechdrs[sym[i].st_shndx].sh_addr;
59935 + pax_open_kernel();
59936 sym[i].st_value += secbase;
59937 + pax_close_kernel();
59938 break;
59939 }
59940 }
59941
59942 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
59943 + if (is_fs_load && !register_filesystem_found) {
59944 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
59945 + ret = -EPERM;
59946 + }
59947 +#endif
59948 +
59949 return ret;
59950 }
59951
59952 @@ -1731,11 +1771,12 @@ static void layout_sections(struct modul
59953 || s->sh_entsize != ~0UL
59954 || strstarts(secstrings + s->sh_name, ".init"))
59955 continue;
59956 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
59957 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
59958 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
59959 + else
59960 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
59961 DEBUGP("\t%s\n", secstrings + s->sh_name);
59962 }
59963 - if (m == 0)
59964 - mod->core_text_size = mod->core_size;
59965 }
59966
59967 DEBUGP("Init section allocation order:\n");
59968 @@ -1748,12 +1789,13 @@ static void layout_sections(struct modul
59969 || s->sh_entsize != ~0UL
59970 || !strstarts(secstrings + s->sh_name, ".init"))
59971 continue;
59972 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
59973 - | INIT_OFFSET_MASK);
59974 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
59975 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
59976 + else
59977 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
59978 + s->sh_entsize |= INIT_OFFSET_MASK;
59979 DEBUGP("\t%s\n", secstrings + s->sh_name);
59980 }
59981 - if (m == 0)
59982 - mod->init_text_size = mod->init_size;
59983 }
59984 }
59985
59986 @@ -1857,9 +1899,8 @@ static int is_exported(const char *name,
59987
59988 /* As per nm */
59989 static char elf_type(const Elf_Sym *sym,
59990 - Elf_Shdr *sechdrs,
59991 - const char *secstrings,
59992 - struct module *mod)
59993 + const Elf_Shdr *sechdrs,
59994 + const char *secstrings)
59995 {
59996 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
59997 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
59998 @@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struc
59999
60000 /* Put symbol section at end of init part of module. */
60001 symsect->sh_flags |= SHF_ALLOC;
60002 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
60003 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
60004 symindex) | INIT_OFFSET_MASK;
60005 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
60006
60007 @@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struc
60008 }
60009
60010 /* Append room for core symbols at end of core part. */
60011 - symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
60012 - mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
60013 + symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
60014 + mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
60015
60016 /* Put string table section at end of init part of module. */
60017 strsect->sh_flags |= SHF_ALLOC;
60018 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
60019 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
60020 strindex) | INIT_OFFSET_MASK;
60021 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
60022
60023 /* Append room for core symbols' strings at end of core part. */
60024 - *pstroffs = mod->core_size;
60025 + *pstroffs = mod->core_size_rx;
60026 __set_bit(0, strmap);
60027 - mod->core_size += bitmap_weight(strmap, strsect->sh_size);
60028 + mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
60029
60030 return symoffs;
60031 }
60032 @@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *
60033 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
60034 mod->strtab = (void *)sechdrs[strindex].sh_addr;
60035
60036 + pax_open_kernel();
60037 +
60038 /* Set types up while we still have access to sections. */
60039 for (i = 0; i < mod->num_symtab; i++)
60040 mod->symtab[i].st_info
60041 - = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
60042 + = elf_type(&mod->symtab[i], sechdrs, secstrings);
60043
60044 - mod->core_symtab = dst = mod->module_core + symoffs;
60045 + mod->core_symtab = dst = mod->module_core_rx + symoffs;
60046 src = mod->symtab;
60047 *dst = *src;
60048 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
60049 @@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *
60050 }
60051 mod->core_num_syms = ndst;
60052
60053 - mod->core_strtab = s = mod->module_core + stroffs;
60054 + mod->core_strtab = s = mod->module_core_rx + stroffs;
60055 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
60056 if (test_bit(i, strmap))
60057 *++s = mod->strtab[i];
60058 +
60059 + pax_close_kernel();
60060 }
60061 #else
60062 static inline unsigned long layout_symtab(struct module *mod,
60063 @@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _
60064 #endif
60065 }
60066
60067 -static void *module_alloc_update_bounds(unsigned long size)
60068 +static void *module_alloc_update_bounds_rw(unsigned long size)
60069 {
60070 void *ret = module_alloc(size);
60071
60072 if (ret) {
60073 /* Update module bounds. */
60074 - if ((unsigned long)ret < module_addr_min)
60075 - module_addr_min = (unsigned long)ret;
60076 - if ((unsigned long)ret + size > module_addr_max)
60077 - module_addr_max = (unsigned long)ret + size;
60078 + if ((unsigned long)ret < module_addr_min_rw)
60079 + module_addr_min_rw = (unsigned long)ret;
60080 + if ((unsigned long)ret + size > module_addr_max_rw)
60081 + module_addr_max_rw = (unsigned long)ret + size;
60082 + }
60083 + return ret;
60084 +}
60085 +
60086 +static void *module_alloc_update_bounds_rx(unsigned long size)
60087 +{
60088 + void *ret = module_alloc_exec(size);
60089 +
60090 + if (ret) {
60091 + /* Update module bounds. */
60092 + if ((unsigned long)ret < module_addr_min_rx)
60093 + module_addr_min_rx = (unsigned long)ret;
60094 + if ((unsigned long)ret + size > module_addr_max_rx)
60095 + module_addr_max_rx = (unsigned long)ret + size;
60096 }
60097 return ret;
60098 }
60099 @@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct
60100 unsigned int i;
60101
60102 /* only scan the sections containing data */
60103 - kmemleak_scan_area(mod->module_core, (unsigned long)mod -
60104 - (unsigned long)mod->module_core,
60105 + kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
60106 + (unsigned long)mod->module_core_rw,
60107 sizeof(struct module), GFP_KERNEL);
60108
60109 for (i = 1; i < hdr->e_shnum; i++) {
60110 @@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct
60111 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
60112 continue;
60113
60114 - kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
60115 - (unsigned long)mod->module_core,
60116 + kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
60117 + (unsigned long)mod->module_core_rw,
60118 sechdrs[i].sh_size, GFP_KERNEL);
60119 }
60120 }
60121 @@ -2263,7 +2322,7 @@ static noinline struct module *load_modu
60122 secstrings, &stroffs, strmap);
60123
60124 /* Do the allocs. */
60125 - ptr = module_alloc_update_bounds(mod->core_size);
60126 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
60127 /*
60128 * The pointer to this block is stored in the module structure
60129 * which is inside the block. Just mark it as not being a
60130 @@ -2274,23 +2333,47 @@ static noinline struct module *load_modu
60131 err = -ENOMEM;
60132 goto free_percpu;
60133 }
60134 - memset(ptr, 0, mod->core_size);
60135 - mod->module_core = ptr;
60136 + memset(ptr, 0, mod->core_size_rw);
60137 + mod->module_core_rw = ptr;
60138
60139 - ptr = module_alloc_update_bounds(mod->init_size);
60140 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
60141 /*
60142 * The pointer to this block is stored in the module structure
60143 * which is inside the block. This block doesn't need to be
60144 * scanned as it contains data and code that will be freed
60145 * after the module is initialized.
60146 */
60147 - kmemleak_ignore(ptr);
60148 - if (!ptr && mod->init_size) {
60149 + kmemleak_not_leak(ptr);
60150 + if (!ptr && mod->init_size_rw) {
60151 + err = -ENOMEM;
60152 + goto free_core_rw;
60153 + }
60154 + memset(ptr, 0, mod->init_size_rw);
60155 + mod->module_init_rw = ptr;
60156 +
60157 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
60158 + kmemleak_not_leak(ptr);
60159 + if (!ptr) {
60160 err = -ENOMEM;
60161 - goto free_core;
60162 + goto free_init_rw;
60163 }
60164 - memset(ptr, 0, mod->init_size);
60165 - mod->module_init = ptr;
60166 +
60167 + pax_open_kernel();
60168 + memset(ptr, 0, mod->core_size_rx);
60169 + pax_close_kernel();
60170 + mod->module_core_rx = ptr;
60171 +
60172 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
60173 + kmemleak_not_leak(ptr);
60174 + if (!ptr && mod->init_size_rx) {
60175 + err = -ENOMEM;
60176 + goto free_core_rx;
60177 + }
60178 +
60179 + pax_open_kernel();
60180 + memset(ptr, 0, mod->init_size_rx);
60181 + pax_close_kernel();
60182 + mod->module_init_rx = ptr;
60183
60184 /* Transfer each section which specifies SHF_ALLOC */
60185 DEBUGP("final section addresses:\n");
60186 @@ -2300,17 +2383,45 @@ static noinline struct module *load_modu
60187 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
60188 continue;
60189
60190 - if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
60191 - dest = mod->module_init
60192 - + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
60193 - else
60194 - dest = mod->module_core + sechdrs[i].sh_entsize;
60195 + if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
60196 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
60197 + dest = mod->module_init_rw
60198 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
60199 + else
60200 + dest = mod->module_init_rx
60201 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
60202 + } else {
60203 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
60204 + dest = mod->module_core_rw + sechdrs[i].sh_entsize;
60205 + else
60206 + dest = mod->module_core_rx + sechdrs[i].sh_entsize;
60207 + }
60208 +
60209 + if (sechdrs[i].sh_type != SHT_NOBITS) {
60210
60211 - if (sechdrs[i].sh_type != SHT_NOBITS)
60212 - memcpy(dest, (void *)sechdrs[i].sh_addr,
60213 - sechdrs[i].sh_size);
60214 +#ifdef CONFIG_PAX_KERNEXEC
60215 +#ifdef CONFIG_X86_64
60216 + if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
60217 + set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
60218 +#endif
60219 + if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
60220 + pax_open_kernel();
60221 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
60222 + pax_close_kernel();
60223 + } else
60224 +#endif
60225 +
60226 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
60227 + }
60228 /* Update sh_addr to point to copy in image. */
60229 - sechdrs[i].sh_addr = (unsigned long)dest;
60230 +
60231 +#ifdef CONFIG_PAX_KERNEXEC
60232 + if (sechdrs[i].sh_flags & SHF_EXECINSTR)
60233 + sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
60234 + else
60235 +#endif
60236 +
60237 + sechdrs[i].sh_addr = (unsigned long)dest;
60238 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
60239 }
60240 /* Module has been moved. */
60241 @@ -2322,7 +2433,7 @@ static noinline struct module *load_modu
60242 mod->name);
60243 if (!mod->refptr) {
60244 err = -ENOMEM;
60245 - goto free_init;
60246 + goto free_init_rx;
60247 }
60248 #endif
60249 /* Now we've moved module, initialize linked lists, etc. */
60250 @@ -2351,6 +2462,31 @@ static noinline struct module *load_modu
60251 /* Set up MODINFO_ATTR fields */
60252 setup_modinfo(mod, sechdrs, infoindex);
60253
60254 + mod->args = args;
60255 +
60256 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
60257 + {
60258 + char *p, *p2;
60259 +
60260 + if (strstr(mod->args, "grsec_modharden_netdev")) {
60261 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
60262 + err = -EPERM;
60263 + goto cleanup;
60264 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
60265 + p += strlen("grsec_modharden_normal");
60266 + p2 = strstr(p, "_");
60267 + if (p2) {
60268 + *p2 = '\0';
60269 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
60270 + *p2 = '_';
60271 + }
60272 + err = -EPERM;
60273 + goto cleanup;
60274 + }
60275 + }
60276 +#endif
60277 +
60278 +
60279 /* Fix up syms, so that st_value is a pointer to location. */
60280 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
60281 mod);
60282 @@ -2431,8 +2567,8 @@ static noinline struct module *load_modu
60283
60284 /* Now do relocations. */
60285 for (i = 1; i < hdr->e_shnum; i++) {
60286 - const char *strtab = (char *)sechdrs[strindex].sh_addr;
60287 unsigned int info = sechdrs[i].sh_info;
60288 + strtab = (char *)sechdrs[strindex].sh_addr;
60289
60290 /* Not a valid relocation section? */
60291 if (info >= hdr->e_shnum)
60292 @@ -2493,16 +2629,15 @@ static noinline struct module *load_modu
60293 * Do it before processing of module parameters, so the module
60294 * can provide parameter accessor functions of its own.
60295 */
60296 - if (mod->module_init)
60297 - flush_icache_range((unsigned long)mod->module_init,
60298 - (unsigned long)mod->module_init
60299 - + mod->init_size);
60300 - flush_icache_range((unsigned long)mod->module_core,
60301 - (unsigned long)mod->module_core + mod->core_size);
60302 + if (mod->module_init_rx)
60303 + flush_icache_range((unsigned long)mod->module_init_rx,
60304 + (unsigned long)mod->module_init_rx
60305 + + mod->init_size_rx);
60306 + flush_icache_range((unsigned long)mod->module_core_rx,
60307 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
60308
60309 set_fs(old_fs);
60310
60311 - mod->args = args;
60312 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
60313 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
60314 mod->name);
60315 @@ -2546,12 +2681,16 @@ static noinline struct module *load_modu
60316 free_unload:
60317 module_unload_free(mod);
60318 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
60319 + free_init_rx:
60320 percpu_modfree(mod->refptr);
60321 - free_init:
60322 #endif
60323 - module_free(mod, mod->module_init);
60324 - free_core:
60325 - module_free(mod, mod->module_core);
60326 + module_free_exec(mod, mod->module_init_rx);
60327 + free_core_rx:
60328 + module_free_exec(mod, mod->module_core_rx);
60329 + free_init_rw:
60330 + module_free(mod, mod->module_init_rw);
60331 + free_core_rw:
60332 + module_free(mod, mod->module_core_rw);
60333 /* mod will be freed with core. Don't access it beyond this line! */
60334 free_percpu:
60335 if (percpu)
60336 @@ -2653,10 +2792,12 @@ SYSCALL_DEFINE3(init_module, void __user
60337 mod->symtab = mod->core_symtab;
60338 mod->strtab = mod->core_strtab;
60339 #endif
60340 - module_free(mod, mod->module_init);
60341 - mod->module_init = NULL;
60342 - mod->init_size = 0;
60343 - mod->init_text_size = 0;
60344 + module_free(mod, mod->module_init_rw);
60345 + module_free_exec(mod, mod->module_init_rx);
60346 + mod->module_init_rw = NULL;
60347 + mod->module_init_rx = NULL;
60348 + mod->init_size_rw = 0;
60349 + mod->init_size_rx = 0;
60350 mutex_unlock(&module_mutex);
60351
60352 return 0;
60353 @@ -2687,10 +2828,16 @@ static const char *get_ksymbol(struct mo
60354 unsigned long nextval;
60355
60356 /* At worse, next value is at end of module */
60357 - if (within_module_init(addr, mod))
60358 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
60359 + if (within_module_init_rx(addr, mod))
60360 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
60361 + else if (within_module_init_rw(addr, mod))
60362 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
60363 + else if (within_module_core_rx(addr, mod))
60364 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
60365 + else if (within_module_core_rw(addr, mod))
60366 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
60367 else
60368 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
60369 + return NULL;
60370
60371 /* Scan for closest preceeding symbol, and next symbol. (ELF
60372 starts real symbols at 1). */
60373 @@ -2936,7 +3083,7 @@ static int m_show(struct seq_file *m, vo
60374 char buf[8];
60375
60376 seq_printf(m, "%s %u",
60377 - mod->name, mod->init_size + mod->core_size);
60378 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
60379 print_unload_info(m, mod);
60380
60381 /* Informative for users. */
60382 @@ -2945,7 +3092,7 @@ static int m_show(struct seq_file *m, vo
60383 mod->state == MODULE_STATE_COMING ? "Loading":
60384 "Live");
60385 /* Used by oprofile and other similar tools. */
60386 - seq_printf(m, " 0x%p", mod->module_core);
60387 + seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
60388
60389 /* Taints info */
60390 if (mod->taints)
60391 @@ -2981,7 +3128,17 @@ static const struct file_operations proc
60392
60393 static int __init proc_modules_init(void)
60394 {
60395 +#ifndef CONFIG_GRKERNSEC_HIDESYM
60396 +#ifdef CONFIG_GRKERNSEC_PROC_USER
60397 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
60398 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60399 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
60400 +#else
60401 proc_create("modules", 0, NULL, &proc_modules_operations);
60402 +#endif
60403 +#else
60404 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
60405 +#endif
60406 return 0;
60407 }
60408 module_init(proc_modules_init);
60409 @@ -3040,12 +3197,12 @@ struct module *__module_address(unsigned
60410 {
60411 struct module *mod;
60412
60413 - if (addr < module_addr_min || addr > module_addr_max)
60414 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
60415 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
60416 return NULL;
60417
60418 list_for_each_entry_rcu(mod, &modules, list)
60419 - if (within_module_core(addr, mod)
60420 - || within_module_init(addr, mod))
60421 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
60422 return mod;
60423 return NULL;
60424 }
60425 @@ -3079,11 +3236,20 @@ bool is_module_text_address(unsigned lon
60426 */
60427 struct module *__module_text_address(unsigned long addr)
60428 {
60429 - struct module *mod = __module_address(addr);
60430 + struct module *mod;
60431 +
60432 +#ifdef CONFIG_X86_32
60433 + addr = ktla_ktva(addr);
60434 +#endif
60435 +
60436 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
60437 + return NULL;
60438 +
60439 + mod = __module_address(addr);
60440 +
60441 if (mod) {
60442 /* Make sure it's within the text section. */
60443 - if (!within(addr, mod->module_init, mod->init_text_size)
60444 - && !within(addr, mod->module_core, mod->core_text_size))
60445 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
60446 mod = NULL;
60447 }
60448 return mod;
60449 diff -urNp linux-2.6.32.41/kernel/mutex.c linux-2.6.32.41/kernel/mutex.c
60450 --- linux-2.6.32.41/kernel/mutex.c 2011-03-27 14:31:47.000000000 -0400
60451 +++ linux-2.6.32.41/kernel/mutex.c 2011-04-17 15:56:46.000000000 -0400
60452 @@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock,
60453 */
60454
60455 for (;;) {
60456 - struct thread_info *owner;
60457 + struct task_struct *owner;
60458
60459 /*
60460 * If we own the BKL, then don't spin. The owner of
60461 @@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock,
60462 spin_lock_mutex(&lock->wait_lock, flags);
60463
60464 debug_mutex_lock_common(lock, &waiter);
60465 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
60466 + debug_mutex_add_waiter(lock, &waiter, task);
60467
60468 /* add waiting tasks to the end of the waitqueue (FIFO): */
60469 list_add_tail(&waiter.list, &lock->wait_list);
60470 @@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock,
60471 * TASK_UNINTERRUPTIBLE case.)
60472 */
60473 if (unlikely(signal_pending_state(state, task))) {
60474 - mutex_remove_waiter(lock, &waiter,
60475 - task_thread_info(task));
60476 + mutex_remove_waiter(lock, &waiter, task);
60477 mutex_release(&lock->dep_map, 1, ip);
60478 spin_unlock_mutex(&lock->wait_lock, flags);
60479
60480 @@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock,
60481 done:
60482 lock_acquired(&lock->dep_map, ip);
60483 /* got the lock - rejoice! */
60484 - mutex_remove_waiter(lock, &waiter, current_thread_info());
60485 + mutex_remove_waiter(lock, &waiter, task);
60486 mutex_set_owner(lock);
60487
60488 /* set it to 0 if there are no waiters left: */
60489 diff -urNp linux-2.6.32.41/kernel/mutex-debug.c linux-2.6.32.41/kernel/mutex-debug.c
60490 --- linux-2.6.32.41/kernel/mutex-debug.c 2011-03-27 14:31:47.000000000 -0400
60491 +++ linux-2.6.32.41/kernel/mutex-debug.c 2011-04-17 15:56:46.000000000 -0400
60492 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
60493 }
60494
60495 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
60496 - struct thread_info *ti)
60497 + struct task_struct *task)
60498 {
60499 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
60500
60501 /* Mark the current thread as blocked on the lock: */
60502 - ti->task->blocked_on = waiter;
60503 + task->blocked_on = waiter;
60504 }
60505
60506 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
60507 - struct thread_info *ti)
60508 + struct task_struct *task)
60509 {
60510 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
60511 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
60512 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
60513 - ti->task->blocked_on = NULL;
60514 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
60515 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
60516 + task->blocked_on = NULL;
60517
60518 list_del_init(&waiter->list);
60519 waiter->task = NULL;
60520 @@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lo
60521 return;
60522
60523 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
60524 - DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
60525 + DEBUG_LOCKS_WARN_ON(lock->owner != current);
60526 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
60527 mutex_clear_owner(lock);
60528 }
60529 diff -urNp linux-2.6.32.41/kernel/mutex-debug.h linux-2.6.32.41/kernel/mutex-debug.h
60530 --- linux-2.6.32.41/kernel/mutex-debug.h 2011-03-27 14:31:47.000000000 -0400
60531 +++ linux-2.6.32.41/kernel/mutex-debug.h 2011-04-17 15:56:46.000000000 -0400
60532 @@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(stru
60533 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
60534 extern void debug_mutex_add_waiter(struct mutex *lock,
60535 struct mutex_waiter *waiter,
60536 - struct thread_info *ti);
60537 + struct task_struct *task);
60538 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
60539 - struct thread_info *ti);
60540 + struct task_struct *task);
60541 extern void debug_mutex_unlock(struct mutex *lock);
60542 extern void debug_mutex_init(struct mutex *lock, const char *name,
60543 struct lock_class_key *key);
60544
60545 static inline void mutex_set_owner(struct mutex *lock)
60546 {
60547 - lock->owner = current_thread_info();
60548 + lock->owner = current;
60549 }
60550
60551 static inline void mutex_clear_owner(struct mutex *lock)
60552 diff -urNp linux-2.6.32.41/kernel/mutex.h linux-2.6.32.41/kernel/mutex.h
60553 --- linux-2.6.32.41/kernel/mutex.h 2011-03-27 14:31:47.000000000 -0400
60554 +++ linux-2.6.32.41/kernel/mutex.h 2011-04-17 15:56:46.000000000 -0400
60555 @@ -19,7 +19,7 @@
60556 #ifdef CONFIG_SMP
60557 static inline void mutex_set_owner(struct mutex *lock)
60558 {
60559 - lock->owner = current_thread_info();
60560 + lock->owner = current;
60561 }
60562
60563 static inline void mutex_clear_owner(struct mutex *lock)
60564 diff -urNp linux-2.6.32.41/kernel/panic.c linux-2.6.32.41/kernel/panic.c
60565 --- linux-2.6.32.41/kernel/panic.c 2011-03-27 14:31:47.000000000 -0400
60566 +++ linux-2.6.32.41/kernel/panic.c 2011-04-17 15:56:46.000000000 -0400
60567 @@ -352,7 +352,7 @@ static void warn_slowpath_common(const c
60568 const char *board;
60569
60570 printk(KERN_WARNING "------------[ cut here ]------------\n");
60571 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
60572 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
60573 board = dmi_get_system_info(DMI_PRODUCT_NAME);
60574 if (board)
60575 printk(KERN_WARNING "Hardware name: %s\n", board);
60576 @@ -392,7 +392,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
60577 */
60578 void __stack_chk_fail(void)
60579 {
60580 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
60581 + dump_stack();
60582 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
60583 __builtin_return_address(0));
60584 }
60585 EXPORT_SYMBOL(__stack_chk_fail);
60586 diff -urNp linux-2.6.32.41/kernel/params.c linux-2.6.32.41/kernel/params.c
60587 --- linux-2.6.32.41/kernel/params.c 2011-03-27 14:31:47.000000000 -0400
60588 +++ linux-2.6.32.41/kernel/params.c 2011-04-17 15:56:46.000000000 -0400
60589 @@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct
60590 return ret;
60591 }
60592
60593 -static struct sysfs_ops module_sysfs_ops = {
60594 +static const struct sysfs_ops module_sysfs_ops = {
60595 .show = module_attr_show,
60596 .store = module_attr_store,
60597 };
60598 @@ -739,7 +739,7 @@ static int uevent_filter(struct kset *ks
60599 return 0;
60600 }
60601
60602 -static struct kset_uevent_ops module_uevent_ops = {
60603 +static const struct kset_uevent_ops module_uevent_ops = {
60604 .filter = uevent_filter,
60605 };
60606
60607 diff -urNp linux-2.6.32.41/kernel/perf_event.c linux-2.6.32.41/kernel/perf_event.c
60608 --- linux-2.6.32.41/kernel/perf_event.c 2011-04-17 17:00:52.000000000 -0400
60609 +++ linux-2.6.32.41/kernel/perf_event.c 2011-05-04 17:56:28.000000000 -0400
60610 @@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostl
60611 */
60612 int sysctl_perf_event_sample_rate __read_mostly = 100000;
60613
60614 -static atomic64_t perf_event_id;
60615 +static atomic64_unchecked_t perf_event_id;
60616
60617 /*
60618 * Lock for (sysadmin-configurable) event reservations:
60619 @@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struc
60620 * In order to keep per-task stats reliable we need to flip the event
60621 * values when we flip the contexts.
60622 */
60623 - value = atomic64_read(&next_event->count);
60624 - value = atomic64_xchg(&event->count, value);
60625 - atomic64_set(&next_event->count, value);
60626 + value = atomic64_read_unchecked(&next_event->count);
60627 + value = atomic64_xchg_unchecked(&event->count, value);
60628 + atomic64_set_unchecked(&next_event->count, value);
60629
60630 swap(event->total_time_enabled, next_event->total_time_enabled);
60631 swap(event->total_time_running, next_event->total_time_running);
60632 @@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_e
60633 update_event_times(event);
60634 }
60635
60636 - return atomic64_read(&event->count);
60637 + return atomic64_read_unchecked(&event->count);
60638 }
60639
60640 /*
60641 @@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct
60642 values[n++] = 1 + leader->nr_siblings;
60643 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
60644 values[n++] = leader->total_time_enabled +
60645 - atomic64_read(&leader->child_total_time_enabled);
60646 + atomic64_read_unchecked(&leader->child_total_time_enabled);
60647 }
60648 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
60649 values[n++] = leader->total_time_running +
60650 - atomic64_read(&leader->child_total_time_running);
60651 + atomic64_read_unchecked(&leader->child_total_time_running);
60652 }
60653
60654 size = n * sizeof(u64);
60655 @@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct pe
60656 values[n++] = perf_event_read_value(event);
60657 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
60658 values[n++] = event->total_time_enabled +
60659 - atomic64_read(&event->child_total_time_enabled);
60660 + atomic64_read_unchecked(&event->child_total_time_enabled);
60661 }
60662 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
60663 values[n++] = event->total_time_running +
60664 - atomic64_read(&event->child_total_time_running);
60665 + atomic64_read_unchecked(&event->child_total_time_running);
60666 }
60667 if (read_format & PERF_FORMAT_ID)
60668 values[n++] = primary_event_id(event);
60669 @@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct fil
60670 static void perf_event_reset(struct perf_event *event)
60671 {
60672 (void)perf_event_read(event);
60673 - atomic64_set(&event->count, 0);
60674 + atomic64_set_unchecked(&event->count, 0);
60675 perf_event_update_userpage(event);
60676 }
60677
60678 @@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct p
60679 ++userpg->lock;
60680 barrier();
60681 userpg->index = perf_event_index(event);
60682 - userpg->offset = atomic64_read(&event->count);
60683 + userpg->offset = atomic64_read_unchecked(&event->count);
60684 if (event->state == PERF_EVENT_STATE_ACTIVE)
60685 - userpg->offset -= atomic64_read(&event->hw.prev_count);
60686 + userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
60687
60688 userpg->time_enabled = event->total_time_enabled +
60689 - atomic64_read(&event->child_total_time_enabled);
60690 + atomic64_read_unchecked(&event->child_total_time_enabled);
60691
60692 userpg->time_running = event->total_time_running +
60693 - atomic64_read(&event->child_total_time_running);
60694 + atomic64_read_unchecked(&event->child_total_time_running);
60695
60696 barrier();
60697 ++userpg->lock;
60698 @@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct
60699 u64 values[4];
60700 int n = 0;
60701
60702 - values[n++] = atomic64_read(&event->count);
60703 + values[n++] = atomic64_read_unchecked(&event->count);
60704 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
60705 values[n++] = event->total_time_enabled +
60706 - atomic64_read(&event->child_total_time_enabled);
60707 + atomic64_read_unchecked(&event->child_total_time_enabled);
60708 }
60709 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
60710 values[n++] = event->total_time_running +
60711 - atomic64_read(&event->child_total_time_running);
60712 + atomic64_read_unchecked(&event->child_total_time_running);
60713 }
60714 if (read_format & PERF_FORMAT_ID)
60715 values[n++] = primary_event_id(event);
60716 @@ -2940,7 +2940,7 @@ static void perf_output_read_group(struc
60717 if (leader != event)
60718 leader->pmu->read(leader);
60719
60720 - values[n++] = atomic64_read(&leader->count);
60721 + values[n++] = atomic64_read_unchecked(&leader->count);
60722 if (read_format & PERF_FORMAT_ID)
60723 values[n++] = primary_event_id(leader);
60724
60725 @@ -2952,7 +2952,7 @@ static void perf_output_read_group(struc
60726 if (sub != event)
60727 sub->pmu->read(sub);
60728
60729 - values[n++] = atomic64_read(&sub->count);
60730 + values[n++] = atomic64_read_unchecked(&sub->count);
60731 if (read_format & PERF_FORMAT_ID)
60732 values[n++] = primary_event_id(sub);
60733
60734 @@ -3787,7 +3787,7 @@ static void perf_swevent_add(struct perf
60735 {
60736 struct hw_perf_event *hwc = &event->hw;
60737
60738 - atomic64_add(nr, &event->count);
60739 + atomic64_add_unchecked(nr, &event->count);
60740
60741 if (!hwc->sample_period)
60742 return;
60743 @@ -4044,9 +4044,9 @@ static void cpu_clock_perf_event_update(
60744 u64 now;
60745
60746 now = cpu_clock(cpu);
60747 - prev = atomic64_read(&event->hw.prev_count);
60748 - atomic64_set(&event->hw.prev_count, now);
60749 - atomic64_add(now - prev, &event->count);
60750 + prev = atomic64_read_unchecked(&event->hw.prev_count);
60751 + atomic64_set_unchecked(&event->hw.prev_count, now);
60752 + atomic64_add_unchecked(now - prev, &event->count);
60753 }
60754
60755 static int cpu_clock_perf_event_enable(struct perf_event *event)
60756 @@ -4054,7 +4054,7 @@ static int cpu_clock_perf_event_enable(s
60757 struct hw_perf_event *hwc = &event->hw;
60758 int cpu = raw_smp_processor_id();
60759
60760 - atomic64_set(&hwc->prev_count, cpu_clock(cpu));
60761 + atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
60762 perf_swevent_start_hrtimer(event);
60763
60764 return 0;
60765 @@ -4086,9 +4086,9 @@ static void task_clock_perf_event_update
60766 u64 prev;
60767 s64 delta;
60768
60769 - prev = atomic64_xchg(&event->hw.prev_count, now);
60770 + prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
60771 delta = now - prev;
60772 - atomic64_add(delta, &event->count);
60773 + atomic64_add_unchecked(delta, &event->count);
60774 }
60775
60776 static int task_clock_perf_event_enable(struct perf_event *event)
60777 @@ -4098,7 +4098,7 @@ static int task_clock_perf_event_enable(
60778
60779 now = event->ctx->time;
60780
60781 - atomic64_set(&hwc->prev_count, now);
60782 + atomic64_set_unchecked(&hwc->prev_count, now);
60783
60784 perf_swevent_start_hrtimer(event);
60785
60786 @@ -4293,7 +4293,7 @@ perf_event_alloc(struct perf_event_attr
60787 event->parent = parent_event;
60788
60789 event->ns = get_pid_ns(current->nsproxy->pid_ns);
60790 - event->id = atomic64_inc_return(&perf_event_id);
60791 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
60792
60793 event->state = PERF_EVENT_STATE_INACTIVE;
60794
60795 @@ -4724,15 +4724,15 @@ static void sync_child_event(struct perf
60796 if (child_event->attr.inherit_stat)
60797 perf_event_read_event(child_event, child);
60798
60799 - child_val = atomic64_read(&child_event->count);
60800 + child_val = atomic64_read_unchecked(&child_event->count);
60801
60802 /*
60803 * Add back the child's count to the parent's count:
60804 */
60805 - atomic64_add(child_val, &parent_event->count);
60806 - atomic64_add(child_event->total_time_enabled,
60807 + atomic64_add_unchecked(child_val, &parent_event->count);
60808 + atomic64_add_unchecked(child_event->total_time_enabled,
60809 &parent_event->child_total_time_enabled);
60810 - atomic64_add(child_event->total_time_running,
60811 + atomic64_add_unchecked(child_event->total_time_running,
60812 &parent_event->child_total_time_running);
60813
60814 /*
60815 diff -urNp linux-2.6.32.41/kernel/pid.c linux-2.6.32.41/kernel/pid.c
60816 --- linux-2.6.32.41/kernel/pid.c 2011-04-22 19:16:29.000000000 -0400
60817 +++ linux-2.6.32.41/kernel/pid.c 2011-04-18 19:22:38.000000000 -0400
60818 @@ -33,6 +33,7 @@
60819 #include <linux/rculist.h>
60820 #include <linux/bootmem.h>
60821 #include <linux/hash.h>
60822 +#include <linux/security.h>
60823 #include <linux/pid_namespace.h>
60824 #include <linux/init_task.h>
60825 #include <linux/syscalls.h>
60826 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
60827
60828 int pid_max = PID_MAX_DEFAULT;
60829
60830 -#define RESERVED_PIDS 300
60831 +#define RESERVED_PIDS 500
60832
60833 int pid_max_min = RESERVED_PIDS + 1;
60834 int pid_max_max = PID_MAX_LIMIT;
60835 @@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
60836 */
60837 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
60838 {
60839 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
60840 + struct task_struct *task;
60841 +
60842 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
60843 +
60844 + if (gr_pid_is_chrooted(task))
60845 + return NULL;
60846 +
60847 + return task;
60848 }
60849
60850 struct task_struct *find_task_by_vpid(pid_t vnr)
60851 diff -urNp linux-2.6.32.41/kernel/posix-cpu-timers.c linux-2.6.32.41/kernel/posix-cpu-timers.c
60852 --- linux-2.6.32.41/kernel/posix-cpu-timers.c 2011-03-27 14:31:47.000000000 -0400
60853 +++ linux-2.6.32.41/kernel/posix-cpu-timers.c 2011-04-17 15:56:46.000000000 -0400
60854 @@ -6,6 +6,7 @@
60855 #include <linux/posix-timers.h>
60856 #include <linux/errno.h>
60857 #include <linux/math64.h>
60858 +#include <linux/security.h>
60859 #include <asm/uaccess.h>
60860 #include <linux/kernel_stat.h>
60861 #include <trace/events/timer.h>
60862 diff -urNp linux-2.6.32.41/kernel/posix-timers.c linux-2.6.32.41/kernel/posix-timers.c
60863 --- linux-2.6.32.41/kernel/posix-timers.c 2011-03-27 14:31:47.000000000 -0400
60864 +++ linux-2.6.32.41/kernel/posix-timers.c 2011-05-16 21:46:57.000000000 -0400
60865 @@ -42,6 +42,7 @@
60866 #include <linux/compiler.h>
60867 #include <linux/idr.h>
60868 #include <linux/posix-timers.h>
60869 +#include <linux/grsecurity.h>
60870 #include <linux/syscalls.h>
60871 #include <linux/wait.h>
60872 #include <linux/workqueue.h>
60873 @@ -296,6 +297,8 @@ static __init int init_posix_timers(void
60874 .nsleep = no_nsleep,
60875 };
60876
60877 + pax_track_stack();
60878 +
60879 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
60880 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
60881 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
60882 @@ -948,6 +951,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
60883 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
60884 return -EFAULT;
60885
60886 + /* only the CLOCK_REALTIME clock can be set, all other clocks
60887 + have their clock_set fptr set to a nosettime dummy function
60888 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
60889 + call common_clock_set, which calls do_sys_settimeofday, which
60890 + we hook
60891 + */
60892 +
60893 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
60894 }
60895
60896 diff -urNp linux-2.6.32.41/kernel/power/hibernate.c linux-2.6.32.41/kernel/power/hibernate.c
60897 --- linux-2.6.32.41/kernel/power/hibernate.c 2011-03-27 14:31:47.000000000 -0400
60898 +++ linux-2.6.32.41/kernel/power/hibernate.c 2011-04-17 15:56:46.000000000 -0400
60899 @@ -48,14 +48,14 @@ enum {
60900
60901 static int hibernation_mode = HIBERNATION_SHUTDOWN;
60902
60903 -static struct platform_hibernation_ops *hibernation_ops;
60904 +static const struct platform_hibernation_ops *hibernation_ops;
60905
60906 /**
60907 * hibernation_set_ops - set the global hibernate operations
60908 * @ops: the hibernation operations to use in subsequent hibernation transitions
60909 */
60910
60911 -void hibernation_set_ops(struct platform_hibernation_ops *ops)
60912 +void hibernation_set_ops(const struct platform_hibernation_ops *ops)
60913 {
60914 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
60915 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
60916 diff -urNp linux-2.6.32.41/kernel/power/poweroff.c linux-2.6.32.41/kernel/power/poweroff.c
60917 --- linux-2.6.32.41/kernel/power/poweroff.c 2011-03-27 14:31:47.000000000 -0400
60918 +++ linux-2.6.32.41/kernel/power/poweroff.c 2011-04-17 15:56:46.000000000 -0400
60919 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
60920 .enable_mask = SYSRQ_ENABLE_BOOT,
60921 };
60922
60923 -static int pm_sysrq_init(void)
60924 +static int __init pm_sysrq_init(void)
60925 {
60926 register_sysrq_key('o', &sysrq_poweroff_op);
60927 return 0;
60928 diff -urNp linux-2.6.32.41/kernel/power/process.c linux-2.6.32.41/kernel/power/process.c
60929 --- linux-2.6.32.41/kernel/power/process.c 2011-03-27 14:31:47.000000000 -0400
60930 +++ linux-2.6.32.41/kernel/power/process.c 2011-04-17 15:56:46.000000000 -0400
60931 @@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_
60932 struct timeval start, end;
60933 u64 elapsed_csecs64;
60934 unsigned int elapsed_csecs;
60935 + bool timedout = false;
60936
60937 do_gettimeofday(&start);
60938
60939 end_time = jiffies + TIMEOUT;
60940 do {
60941 todo = 0;
60942 + if (time_after(jiffies, end_time))
60943 + timedout = true;
60944 read_lock(&tasklist_lock);
60945 do_each_thread(g, p) {
60946 if (frozen(p) || !freezeable(p))
60947 @@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_
60948 * It is "frozen enough". If the task does wake
60949 * up, it will immediately call try_to_freeze.
60950 */
60951 - if (!task_is_stopped_or_traced(p) &&
60952 - !freezer_should_skip(p))
60953 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
60954 todo++;
60955 + if (timedout) {
60956 + printk(KERN_ERR "Task refusing to freeze:\n");
60957 + sched_show_task(p);
60958 + }
60959 + }
60960 } while_each_thread(g, p);
60961 read_unlock(&tasklist_lock);
60962 yield(); /* Yield is okay here */
60963 - if (time_after(jiffies, end_time))
60964 - break;
60965 - } while (todo);
60966 + } while (todo && !timedout);
60967
60968 do_gettimeofday(&end);
60969 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
60970 diff -urNp linux-2.6.32.41/kernel/power/suspend.c linux-2.6.32.41/kernel/power/suspend.c
60971 --- linux-2.6.32.41/kernel/power/suspend.c 2011-03-27 14:31:47.000000000 -0400
60972 +++ linux-2.6.32.41/kernel/power/suspend.c 2011-04-17 15:56:46.000000000 -0400
60973 @@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_M
60974 [PM_SUSPEND_MEM] = "mem",
60975 };
60976
60977 -static struct platform_suspend_ops *suspend_ops;
60978 +static const struct platform_suspend_ops *suspend_ops;
60979
60980 /**
60981 * suspend_set_ops - Set the global suspend method table.
60982 * @ops: Pointer to ops structure.
60983 */
60984 -void suspend_set_ops(struct platform_suspend_ops *ops)
60985 +void suspend_set_ops(const struct platform_suspend_ops *ops)
60986 {
60987 mutex_lock(&pm_mutex);
60988 suspend_ops = ops;
60989 diff -urNp linux-2.6.32.41/kernel/printk.c linux-2.6.32.41/kernel/printk.c
60990 --- linux-2.6.32.41/kernel/printk.c 2011-03-27 14:31:47.000000000 -0400
60991 +++ linux-2.6.32.41/kernel/printk.c 2011-04-17 15:56:46.000000000 -0400
60992 @@ -278,6 +278,11 @@ int do_syslog(int type, char __user *buf
60993 char c;
60994 int error = 0;
60995
60996 +#ifdef CONFIG_GRKERNSEC_DMESG
60997 + if (grsec_enable_dmesg && !capable(CAP_SYS_ADMIN))
60998 + return -EPERM;
60999 +#endif
61000 +
61001 error = security_syslog(type);
61002 if (error)
61003 return error;
61004 diff -urNp linux-2.6.32.41/kernel/profile.c linux-2.6.32.41/kernel/profile.c
61005 --- linux-2.6.32.41/kernel/profile.c 2011-03-27 14:31:47.000000000 -0400
61006 +++ linux-2.6.32.41/kernel/profile.c 2011-05-04 17:56:28.000000000 -0400
61007 @@ -39,7 +39,7 @@ struct profile_hit {
61008 /* Oprofile timer tick hook */
61009 static int (*timer_hook)(struct pt_regs *) __read_mostly;
61010
61011 -static atomic_t *prof_buffer;
61012 +static atomic_unchecked_t *prof_buffer;
61013 static unsigned long prof_len, prof_shift;
61014
61015 int prof_on __read_mostly;
61016 @@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
61017 hits[i].pc = 0;
61018 continue;
61019 }
61020 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
61021 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
61022 hits[i].hits = hits[i].pc = 0;
61023 }
61024 }
61025 @@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc,
61026 * Add the current hit(s) and flush the write-queue out
61027 * to the global buffer:
61028 */
61029 - atomic_add(nr_hits, &prof_buffer[pc]);
61030 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
61031 for (i = 0; i < NR_PROFILE_HIT; ++i) {
61032 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
61033 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
61034 hits[i].pc = hits[i].hits = 0;
61035 }
61036 out:
61037 @@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc,
61038 if (prof_on != type || !prof_buffer)
61039 return;
61040 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
61041 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
61042 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
61043 }
61044 #endif /* !CONFIG_SMP */
61045 EXPORT_SYMBOL_GPL(profile_hits);
61046 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
61047 return -EFAULT;
61048 buf++; p++; count--; read++;
61049 }
61050 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
61051 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
61052 if (copy_to_user(buf, (void *)pnt, count))
61053 return -EFAULT;
61054 read += count;
61055 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
61056 }
61057 #endif
61058 profile_discard_flip_buffers();
61059 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
61060 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
61061 return count;
61062 }
61063
61064 diff -urNp linux-2.6.32.41/kernel/ptrace.c linux-2.6.32.41/kernel/ptrace.c
61065 --- linux-2.6.32.41/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
61066 +++ linux-2.6.32.41/kernel/ptrace.c 2011-05-22 23:02:06.000000000 -0400
61067 @@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_stru
61068 return ret;
61069 }
61070
61071 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
61072 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
61073 + unsigned int log)
61074 {
61075 const struct cred *cred = current_cred(), *tcred;
61076
61077 @@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_stru
61078 cred->gid != tcred->egid ||
61079 cred->gid != tcred->sgid ||
61080 cred->gid != tcred->gid) &&
61081 - !capable(CAP_SYS_PTRACE)) {
61082 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
61083 + (log && !capable(CAP_SYS_PTRACE)))
61084 + ) {
61085 rcu_read_unlock();
61086 return -EPERM;
61087 }
61088 @@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_stru
61089 smp_rmb();
61090 if (task->mm)
61091 dumpable = get_dumpable(task->mm);
61092 - if (!dumpable && !capable(CAP_SYS_PTRACE))
61093 + if (!dumpable &&
61094 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
61095 + (log && !capable(CAP_SYS_PTRACE))))
61096 return -EPERM;
61097
61098 return security_ptrace_access_check(task, mode);
61099 @@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struc
61100 {
61101 int err;
61102 task_lock(task);
61103 - err = __ptrace_may_access(task, mode);
61104 + err = __ptrace_may_access(task, mode, 0);
61105 + task_unlock(task);
61106 + return !err;
61107 +}
61108 +
61109 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
61110 +{
61111 + int err;
61112 + task_lock(task);
61113 + err = __ptrace_may_access(task, mode, 1);
61114 task_unlock(task);
61115 return !err;
61116 }
61117 @@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *ta
61118 goto out;
61119
61120 task_lock(task);
61121 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
61122 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
61123 task_unlock(task);
61124 if (retval)
61125 goto unlock_creds;
61126 @@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *ta
61127 goto unlock_tasklist;
61128
61129 task->ptrace = PT_PTRACED;
61130 - if (capable(CAP_SYS_PTRACE))
61131 + if (capable_nolog(CAP_SYS_PTRACE))
61132 task->ptrace |= PT_PTRACE_CAP;
61133
61134 __ptrace_link(task, current);
61135 @@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *
61136 {
61137 int copied = 0;
61138
61139 + pax_track_stack();
61140 +
61141 while (len > 0) {
61142 char buf[128];
61143 int this_len, retval;
61144 @@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct
61145 {
61146 int copied = 0;
61147
61148 + pax_track_stack();
61149 +
61150 while (len > 0) {
61151 char buf[128];
61152 int this_len, retval;
61153 @@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *c
61154 int ret = -EIO;
61155 siginfo_t siginfo;
61156
61157 + pax_track_stack();
61158 +
61159 switch (request) {
61160 case PTRACE_PEEKTEXT:
61161 case PTRACE_PEEKDATA:
61162 @@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *c
61163 ret = ptrace_setoptions(child, data);
61164 break;
61165 case PTRACE_GETEVENTMSG:
61166 - ret = put_user(child->ptrace_message, (unsigned long __user *) data);
61167 + ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
61168 break;
61169
61170 case PTRACE_GETSIGINFO:
61171 ret = ptrace_getsiginfo(child, &siginfo);
61172 if (!ret)
61173 - ret = copy_siginfo_to_user((siginfo_t __user *) data,
61174 + ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
61175 &siginfo);
61176 break;
61177
61178 case PTRACE_SETSIGINFO:
61179 - if (copy_from_user(&siginfo, (siginfo_t __user *) data,
61180 + if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
61181 sizeof siginfo))
61182 ret = -EFAULT;
61183 else
61184 @@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
61185 goto out;
61186 }
61187
61188 + if (gr_handle_ptrace(child, request)) {
61189 + ret = -EPERM;
61190 + goto out_put_task_struct;
61191 + }
61192 +
61193 if (request == PTRACE_ATTACH) {
61194 ret = ptrace_attach(child);
61195 /*
61196 * Some architectures need to do book-keeping after
61197 * a ptrace attach.
61198 */
61199 - if (!ret)
61200 + if (!ret) {
61201 arch_ptrace_attach(child);
61202 + gr_audit_ptrace(child);
61203 + }
61204 goto out_put_task_struct;
61205 }
61206
61207 @@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_
61208 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
61209 if (copied != sizeof(tmp))
61210 return -EIO;
61211 - return put_user(tmp, (unsigned long __user *)data);
61212 + return put_user(tmp, (__force unsigned long __user *)data);
61213 }
61214
61215 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
61216 @@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_st
61217 siginfo_t siginfo;
61218 int ret;
61219
61220 + pax_track_stack();
61221 +
61222 switch (request) {
61223 case PTRACE_PEEKTEXT:
61224 case PTRACE_PEEKDATA:
61225 @@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat
61226 goto out;
61227 }
61228
61229 + if (gr_handle_ptrace(child, request)) {
61230 + ret = -EPERM;
61231 + goto out_put_task_struct;
61232 + }
61233 +
61234 if (request == PTRACE_ATTACH) {
61235 ret = ptrace_attach(child);
61236 /*
61237 * Some architectures need to do book-keeping after
61238 * a ptrace attach.
61239 */
61240 - if (!ret)
61241 + if (!ret) {
61242 arch_ptrace_attach(child);
61243 + gr_audit_ptrace(child);
61244 + }
61245 goto out_put_task_struct;
61246 }
61247
61248 diff -urNp linux-2.6.32.41/kernel/rcutorture.c linux-2.6.32.41/kernel/rcutorture.c
61249 --- linux-2.6.32.41/kernel/rcutorture.c 2011-03-27 14:31:47.000000000 -0400
61250 +++ linux-2.6.32.41/kernel/rcutorture.c 2011-05-04 17:56:28.000000000 -0400
61251 @@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
61252 { 0 };
61253 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
61254 { 0 };
61255 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
61256 -static atomic_t n_rcu_torture_alloc;
61257 -static atomic_t n_rcu_torture_alloc_fail;
61258 -static atomic_t n_rcu_torture_free;
61259 -static atomic_t n_rcu_torture_mberror;
61260 -static atomic_t n_rcu_torture_error;
61261 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
61262 +static atomic_unchecked_t n_rcu_torture_alloc;
61263 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
61264 +static atomic_unchecked_t n_rcu_torture_free;
61265 +static atomic_unchecked_t n_rcu_torture_mberror;
61266 +static atomic_unchecked_t n_rcu_torture_error;
61267 static long n_rcu_torture_timers;
61268 static struct list_head rcu_torture_removed;
61269 static cpumask_var_t shuffle_tmp_mask;
61270 @@ -187,11 +187,11 @@ rcu_torture_alloc(void)
61271
61272 spin_lock_bh(&rcu_torture_lock);
61273 if (list_empty(&rcu_torture_freelist)) {
61274 - atomic_inc(&n_rcu_torture_alloc_fail);
61275 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
61276 spin_unlock_bh(&rcu_torture_lock);
61277 return NULL;
61278 }
61279 - atomic_inc(&n_rcu_torture_alloc);
61280 + atomic_inc_unchecked(&n_rcu_torture_alloc);
61281 p = rcu_torture_freelist.next;
61282 list_del_init(p);
61283 spin_unlock_bh(&rcu_torture_lock);
61284 @@ -204,7 +204,7 @@ rcu_torture_alloc(void)
61285 static void
61286 rcu_torture_free(struct rcu_torture *p)
61287 {
61288 - atomic_inc(&n_rcu_torture_free);
61289 + atomic_inc_unchecked(&n_rcu_torture_free);
61290 spin_lock_bh(&rcu_torture_lock);
61291 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
61292 spin_unlock_bh(&rcu_torture_lock);
61293 @@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
61294 i = rp->rtort_pipe_count;
61295 if (i > RCU_TORTURE_PIPE_LEN)
61296 i = RCU_TORTURE_PIPE_LEN;
61297 - atomic_inc(&rcu_torture_wcount[i]);
61298 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
61299 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
61300 rp->rtort_mbtest = 0;
61301 rcu_torture_free(rp);
61302 @@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_fr
61303 i = rp->rtort_pipe_count;
61304 if (i > RCU_TORTURE_PIPE_LEN)
61305 i = RCU_TORTURE_PIPE_LEN;
61306 - atomic_inc(&rcu_torture_wcount[i]);
61307 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
61308 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
61309 rp->rtort_mbtest = 0;
61310 list_del(&rp->rtort_free);
61311 @@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
61312 i = old_rp->rtort_pipe_count;
61313 if (i > RCU_TORTURE_PIPE_LEN)
61314 i = RCU_TORTURE_PIPE_LEN;
61315 - atomic_inc(&rcu_torture_wcount[i]);
61316 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
61317 old_rp->rtort_pipe_count++;
61318 cur_ops->deferred_free(old_rp);
61319 }
61320 @@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned l
61321 return;
61322 }
61323 if (p->rtort_mbtest == 0)
61324 - atomic_inc(&n_rcu_torture_mberror);
61325 + atomic_inc_unchecked(&n_rcu_torture_mberror);
61326 spin_lock(&rand_lock);
61327 cur_ops->read_delay(&rand);
61328 n_rcu_torture_timers++;
61329 @@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
61330 continue;
61331 }
61332 if (p->rtort_mbtest == 0)
61333 - atomic_inc(&n_rcu_torture_mberror);
61334 + atomic_inc_unchecked(&n_rcu_torture_mberror);
61335 cur_ops->read_delay(&rand);
61336 preempt_disable();
61337 pipe_count = p->rtort_pipe_count;
61338 @@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
61339 rcu_torture_current,
61340 rcu_torture_current_version,
61341 list_empty(&rcu_torture_freelist),
61342 - atomic_read(&n_rcu_torture_alloc),
61343 - atomic_read(&n_rcu_torture_alloc_fail),
61344 - atomic_read(&n_rcu_torture_free),
61345 - atomic_read(&n_rcu_torture_mberror),
61346 + atomic_read_unchecked(&n_rcu_torture_alloc),
61347 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
61348 + atomic_read_unchecked(&n_rcu_torture_free),
61349 + atomic_read_unchecked(&n_rcu_torture_mberror),
61350 n_rcu_torture_timers);
61351 - if (atomic_read(&n_rcu_torture_mberror) != 0)
61352 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
61353 cnt += sprintf(&page[cnt], " !!!");
61354 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
61355 if (i > 1) {
61356 cnt += sprintf(&page[cnt], "!!! ");
61357 - atomic_inc(&n_rcu_torture_error);
61358 + atomic_inc_unchecked(&n_rcu_torture_error);
61359 WARN_ON_ONCE(1);
61360 }
61361 cnt += sprintf(&page[cnt], "Reader Pipe: ");
61362 @@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
61363 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
61364 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
61365 cnt += sprintf(&page[cnt], " %d",
61366 - atomic_read(&rcu_torture_wcount[i]));
61367 + atomic_read_unchecked(&rcu_torture_wcount[i]));
61368 }
61369 cnt += sprintf(&page[cnt], "\n");
61370 if (cur_ops->stats)
61371 @@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
61372
61373 if (cur_ops->cleanup)
61374 cur_ops->cleanup();
61375 - if (atomic_read(&n_rcu_torture_error))
61376 + if (atomic_read_unchecked(&n_rcu_torture_error))
61377 rcu_torture_print_module_parms("End of test: FAILURE");
61378 else
61379 rcu_torture_print_module_parms("End of test: SUCCESS");
61380 @@ -1138,13 +1138,13 @@ rcu_torture_init(void)
61381
61382 rcu_torture_current = NULL;
61383 rcu_torture_current_version = 0;
61384 - atomic_set(&n_rcu_torture_alloc, 0);
61385 - atomic_set(&n_rcu_torture_alloc_fail, 0);
61386 - atomic_set(&n_rcu_torture_free, 0);
61387 - atomic_set(&n_rcu_torture_mberror, 0);
61388 - atomic_set(&n_rcu_torture_error, 0);
61389 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
61390 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
61391 + atomic_set_unchecked(&n_rcu_torture_free, 0);
61392 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
61393 + atomic_set_unchecked(&n_rcu_torture_error, 0);
61394 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
61395 - atomic_set(&rcu_torture_wcount[i], 0);
61396 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
61397 for_each_possible_cpu(cpu) {
61398 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
61399 per_cpu(rcu_torture_count, cpu)[i] = 0;
61400 diff -urNp linux-2.6.32.41/kernel/rcutree.c linux-2.6.32.41/kernel/rcutree.c
61401 --- linux-2.6.32.41/kernel/rcutree.c 2011-03-27 14:31:47.000000000 -0400
61402 +++ linux-2.6.32.41/kernel/rcutree.c 2011-04-17 15:56:46.000000000 -0400
61403 @@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state
61404 /*
61405 * Do softirq processing for the current CPU.
61406 */
61407 -static void rcu_process_callbacks(struct softirq_action *unused)
61408 +static void rcu_process_callbacks(void)
61409 {
61410 /*
61411 * Memory references from any prior RCU read-side critical sections
61412 diff -urNp linux-2.6.32.41/kernel/rcutree_plugin.h linux-2.6.32.41/kernel/rcutree_plugin.h
61413 --- linux-2.6.32.41/kernel/rcutree_plugin.h 2011-03-27 14:31:47.000000000 -0400
61414 +++ linux-2.6.32.41/kernel/rcutree_plugin.h 2011-04-17 15:56:46.000000000 -0400
61415 @@ -145,7 +145,7 @@ static void rcu_preempt_note_context_swi
61416 */
61417 void __rcu_read_lock(void)
61418 {
61419 - ACCESS_ONCE(current->rcu_read_lock_nesting)++;
61420 + ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
61421 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
61422 }
61423 EXPORT_SYMBOL_GPL(__rcu_read_lock);
61424 @@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
61425 struct task_struct *t = current;
61426
61427 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
61428 - if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
61429 + if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
61430 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
61431 rcu_read_unlock_special(t);
61432 }
61433 diff -urNp linux-2.6.32.41/kernel/relay.c linux-2.6.32.41/kernel/relay.c
61434 --- linux-2.6.32.41/kernel/relay.c 2011-03-27 14:31:47.000000000 -0400
61435 +++ linux-2.6.32.41/kernel/relay.c 2011-05-16 21:46:57.000000000 -0400
61436 @@ -1222,7 +1222,7 @@ static int subbuf_splice_actor(struct fi
61437 unsigned int flags,
61438 int *nonpad_ret)
61439 {
61440 - unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
61441 + unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
61442 struct rchan_buf *rbuf = in->private_data;
61443 unsigned int subbuf_size = rbuf->chan->subbuf_size;
61444 uint64_t pos = (uint64_t) *ppos;
61445 @@ -1241,6 +1241,9 @@ static int subbuf_splice_actor(struct fi
61446 .ops = &relay_pipe_buf_ops,
61447 .spd_release = relay_page_release,
61448 };
61449 + ssize_t ret;
61450 +
61451 + pax_track_stack();
61452
61453 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
61454 return 0;
61455 diff -urNp linux-2.6.32.41/kernel/resource.c linux-2.6.32.41/kernel/resource.c
61456 --- linux-2.6.32.41/kernel/resource.c 2011-03-27 14:31:47.000000000 -0400
61457 +++ linux-2.6.32.41/kernel/resource.c 2011-04-17 15:56:46.000000000 -0400
61458 @@ -132,8 +132,18 @@ static const struct file_operations proc
61459
61460 static int __init ioresources_init(void)
61461 {
61462 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
61463 +#ifdef CONFIG_GRKERNSEC_PROC_USER
61464 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
61465 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
61466 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61467 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
61468 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
61469 +#endif
61470 +#else
61471 proc_create("ioports", 0, NULL, &proc_ioports_operations);
61472 proc_create("iomem", 0, NULL, &proc_iomem_operations);
61473 +#endif
61474 return 0;
61475 }
61476 __initcall(ioresources_init);
61477 diff -urNp linux-2.6.32.41/kernel/rtmutex.c linux-2.6.32.41/kernel/rtmutex.c
61478 --- linux-2.6.32.41/kernel/rtmutex.c 2011-03-27 14:31:47.000000000 -0400
61479 +++ linux-2.6.32.41/kernel/rtmutex.c 2011-04-17 15:56:46.000000000 -0400
61480 @@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt
61481 */
61482 spin_lock_irqsave(&pendowner->pi_lock, flags);
61483
61484 - WARN_ON(!pendowner->pi_blocked_on);
61485 + BUG_ON(!pendowner->pi_blocked_on);
61486 WARN_ON(pendowner->pi_blocked_on != waiter);
61487 WARN_ON(pendowner->pi_blocked_on->lock != lock);
61488
61489 diff -urNp linux-2.6.32.41/kernel/rtmutex-tester.c linux-2.6.32.41/kernel/rtmutex-tester.c
61490 --- linux-2.6.32.41/kernel/rtmutex-tester.c 2011-03-27 14:31:47.000000000 -0400
61491 +++ linux-2.6.32.41/kernel/rtmutex-tester.c 2011-05-04 17:56:28.000000000 -0400
61492 @@ -21,7 +21,7 @@
61493 #define MAX_RT_TEST_MUTEXES 8
61494
61495 static spinlock_t rttest_lock;
61496 -static atomic_t rttest_event;
61497 +static atomic_unchecked_t rttest_event;
61498
61499 struct test_thread_data {
61500 int opcode;
61501 @@ -64,7 +64,7 @@ static int handle_op(struct test_thread_
61502
61503 case RTTEST_LOCKCONT:
61504 td->mutexes[td->opdata] = 1;
61505 - td->event = atomic_add_return(1, &rttest_event);
61506 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61507 return 0;
61508
61509 case RTTEST_RESET:
61510 @@ -82,7 +82,7 @@ static int handle_op(struct test_thread_
61511 return 0;
61512
61513 case RTTEST_RESETEVENT:
61514 - atomic_set(&rttest_event, 0);
61515 + atomic_set_unchecked(&rttest_event, 0);
61516 return 0;
61517
61518 default:
61519 @@ -99,9 +99,9 @@ static int handle_op(struct test_thread_
61520 return ret;
61521
61522 td->mutexes[id] = 1;
61523 - td->event = atomic_add_return(1, &rttest_event);
61524 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61525 rt_mutex_lock(&mutexes[id]);
61526 - td->event = atomic_add_return(1, &rttest_event);
61527 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61528 td->mutexes[id] = 4;
61529 return 0;
61530
61531 @@ -112,9 +112,9 @@ static int handle_op(struct test_thread_
61532 return ret;
61533
61534 td->mutexes[id] = 1;
61535 - td->event = atomic_add_return(1, &rttest_event);
61536 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61537 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
61538 - td->event = atomic_add_return(1, &rttest_event);
61539 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61540 td->mutexes[id] = ret ? 0 : 4;
61541 return ret ? -EINTR : 0;
61542
61543 @@ -123,9 +123,9 @@ static int handle_op(struct test_thread_
61544 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
61545 return ret;
61546
61547 - td->event = atomic_add_return(1, &rttest_event);
61548 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61549 rt_mutex_unlock(&mutexes[id]);
61550 - td->event = atomic_add_return(1, &rttest_event);
61551 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61552 td->mutexes[id] = 0;
61553 return 0;
61554
61555 @@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mu
61556 break;
61557
61558 td->mutexes[dat] = 2;
61559 - td->event = atomic_add_return(1, &rttest_event);
61560 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61561 break;
61562
61563 case RTTEST_LOCKBKL:
61564 @@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mu
61565 return;
61566
61567 td->mutexes[dat] = 3;
61568 - td->event = atomic_add_return(1, &rttest_event);
61569 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61570 break;
61571
61572 case RTTEST_LOCKNOWAIT:
61573 @@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mu
61574 return;
61575
61576 td->mutexes[dat] = 1;
61577 - td->event = atomic_add_return(1, &rttest_event);
61578 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61579 return;
61580
61581 case RTTEST_LOCKBKL:
61582 diff -urNp linux-2.6.32.41/kernel/sched.c linux-2.6.32.41/kernel/sched.c
61583 --- linux-2.6.32.41/kernel/sched.c 2011-03-27 14:31:47.000000000 -0400
61584 +++ linux-2.6.32.41/kernel/sched.c 2011-05-22 23:02:06.000000000 -0400
61585 @@ -5043,7 +5043,7 @@ out:
61586 * In CONFIG_NO_HZ case, the idle load balance owner will do the
61587 * rebalancing for all the cpus for whom scheduler ticks are stopped.
61588 */
61589 -static void run_rebalance_domains(struct softirq_action *h)
61590 +static void run_rebalance_domains(void)
61591 {
61592 int this_cpu = smp_processor_id();
61593 struct rq *this_rq = cpu_rq(this_cpu);
61594 @@ -5700,6 +5700,8 @@ asmlinkage void __sched schedule(void)
61595 struct rq *rq;
61596 int cpu;
61597
61598 + pax_track_stack();
61599 +
61600 need_resched:
61601 preempt_disable();
61602 cpu = smp_processor_id();
61603 @@ -5770,7 +5772,7 @@ EXPORT_SYMBOL(schedule);
61604 * Look out! "owner" is an entirely speculative pointer
61605 * access and not reliable.
61606 */
61607 -int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
61608 +int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
61609 {
61610 unsigned int cpu;
61611 struct rq *rq;
61612 @@ -5784,10 +5786,10 @@ int mutex_spin_on_owner(struct mutex *lo
61613 * DEBUG_PAGEALLOC could have unmapped it if
61614 * the mutex owner just released it and exited.
61615 */
61616 - if (probe_kernel_address(&owner->cpu, cpu))
61617 + if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
61618 return 0;
61619 #else
61620 - cpu = owner->cpu;
61621 + cpu = task_thread_info(owner)->cpu;
61622 #endif
61623
61624 /*
61625 @@ -5816,7 +5818,7 @@ int mutex_spin_on_owner(struct mutex *lo
61626 /*
61627 * Is that owner really running on that cpu?
61628 */
61629 - if (task_thread_info(rq->curr) != owner || need_resched())
61630 + if (rq->curr != owner || need_resched())
61631 return 0;
61632
61633 cpu_relax();
61634 @@ -6359,6 +6361,8 @@ int can_nice(const struct task_struct *p
61635 /* convert nice value [19,-20] to rlimit style value [1,40] */
61636 int nice_rlim = 20 - nice;
61637
61638 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
61639 +
61640 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
61641 capable(CAP_SYS_NICE));
61642 }
61643 @@ -6392,7 +6396,8 @@ SYSCALL_DEFINE1(nice, int, increment)
61644 if (nice > 19)
61645 nice = 19;
61646
61647 - if (increment < 0 && !can_nice(current, nice))
61648 + if (increment < 0 && (!can_nice(current, nice) ||
61649 + gr_handle_chroot_nice()))
61650 return -EPERM;
61651
61652 retval = security_task_setnice(current, nice);
61653 @@ -8774,7 +8779,7 @@ static void init_sched_groups_power(int
61654 long power;
61655 int weight;
61656
61657 - WARN_ON(!sd || !sd->groups);
61658 + BUG_ON(!sd || !sd->groups);
61659
61660 if (cpu != group_first_cpu(sd->groups))
61661 return;
61662 diff -urNp linux-2.6.32.41/kernel/signal.c linux-2.6.32.41/kernel/signal.c
61663 --- linux-2.6.32.41/kernel/signal.c 2011-04-17 17:00:52.000000000 -0400
61664 +++ linux-2.6.32.41/kernel/signal.c 2011-05-22 23:02:06.000000000 -0400
61665 @@ -41,12 +41,12 @@
61666
61667 static struct kmem_cache *sigqueue_cachep;
61668
61669 -static void __user *sig_handler(struct task_struct *t, int sig)
61670 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
61671 {
61672 return t->sighand->action[sig - 1].sa.sa_handler;
61673 }
61674
61675 -static int sig_handler_ignored(void __user *handler, int sig)
61676 +static int sig_handler_ignored(__sighandler_t handler, int sig)
61677 {
61678 /* Is it explicitly or implicitly ignored? */
61679 return handler == SIG_IGN ||
61680 @@ -56,7 +56,7 @@ static int sig_handler_ignored(void __us
61681 static int sig_task_ignored(struct task_struct *t, int sig,
61682 int from_ancestor_ns)
61683 {
61684 - void __user *handler;
61685 + __sighandler_t handler;
61686
61687 handler = sig_handler(t, sig);
61688
61689 @@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc
61690 */
61691 user = get_uid(__task_cred(t)->user);
61692 atomic_inc(&user->sigpending);
61693 +
61694 + if (!override_rlimit)
61695 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
61696 if (override_rlimit ||
61697 atomic_read(&user->sigpending) <=
61698 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
61699 @@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct
61700
61701 int unhandled_signal(struct task_struct *tsk, int sig)
61702 {
61703 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
61704 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
61705 if (is_global_init(tsk))
61706 return 1;
61707 if (handler != SIG_IGN && handler != SIG_DFL)
61708 @@ -627,6 +630,9 @@ static int check_kill_permission(int sig
61709 }
61710 }
61711
61712 + if (gr_handle_signal(t, sig))
61713 + return -EPERM;
61714 +
61715 return security_task_kill(t, info, sig, 0);
61716 }
61717
61718 @@ -968,7 +974,7 @@ __group_send_sig_info(int sig, struct si
61719 return send_signal(sig, info, p, 1);
61720 }
61721
61722 -static int
61723 +int
61724 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
61725 {
61726 return send_signal(sig, info, t, 0);
61727 @@ -1005,6 +1011,7 @@ force_sig_info(int sig, struct siginfo *
61728 unsigned long int flags;
61729 int ret, blocked, ignored;
61730 struct k_sigaction *action;
61731 + int is_unhandled = 0;
61732
61733 spin_lock_irqsave(&t->sighand->siglock, flags);
61734 action = &t->sighand->action[sig-1];
61735 @@ -1019,9 +1026,18 @@ force_sig_info(int sig, struct siginfo *
61736 }
61737 if (action->sa.sa_handler == SIG_DFL)
61738 t->signal->flags &= ~SIGNAL_UNKILLABLE;
61739 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
61740 + is_unhandled = 1;
61741 ret = specific_send_sig_info(sig, info, t);
61742 spin_unlock_irqrestore(&t->sighand->siglock, flags);
61743
61744 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
61745 + normal operation */
61746 + if (is_unhandled) {
61747 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
61748 + gr_handle_crash(t, sig);
61749 + }
61750 +
61751 return ret;
61752 }
61753
61754 @@ -1081,8 +1097,11 @@ int group_send_sig_info(int sig, struct
61755 {
61756 int ret = check_kill_permission(sig, info, p);
61757
61758 - if (!ret && sig)
61759 + if (!ret && sig) {
61760 ret = do_send_sig_info(sig, info, p, true);
61761 + if (!ret)
61762 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
61763 + }
61764
61765 return ret;
61766 }
61767 @@ -1644,6 +1663,8 @@ void ptrace_notify(int exit_code)
61768 {
61769 siginfo_t info;
61770
61771 + pax_track_stack();
61772 +
61773 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
61774
61775 memset(&info, 0, sizeof info);
61776 diff -urNp linux-2.6.32.41/kernel/smp.c linux-2.6.32.41/kernel/smp.c
61777 --- linux-2.6.32.41/kernel/smp.c 2011-03-27 14:31:47.000000000 -0400
61778 +++ linux-2.6.32.41/kernel/smp.c 2011-04-17 15:56:46.000000000 -0400
61779 @@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void
61780 }
61781 EXPORT_SYMBOL(smp_call_function);
61782
61783 -void ipi_call_lock(void)
61784 +void ipi_call_lock(void) __acquires(call_function.lock)
61785 {
61786 spin_lock(&call_function.lock);
61787 }
61788
61789 -void ipi_call_unlock(void)
61790 +void ipi_call_unlock(void) __releases(call_function.lock)
61791 {
61792 spin_unlock(&call_function.lock);
61793 }
61794
61795 -void ipi_call_lock_irq(void)
61796 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
61797 {
61798 spin_lock_irq(&call_function.lock);
61799 }
61800
61801 -void ipi_call_unlock_irq(void)
61802 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
61803 {
61804 spin_unlock_irq(&call_function.lock);
61805 }
61806 diff -urNp linux-2.6.32.41/kernel/softirq.c linux-2.6.32.41/kernel/softirq.c
61807 --- linux-2.6.32.41/kernel/softirq.c 2011-03-27 14:31:47.000000000 -0400
61808 +++ linux-2.6.32.41/kernel/softirq.c 2011-04-17 15:56:46.000000000 -0400
61809 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
61810
61811 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
61812
61813 -char *softirq_to_name[NR_SOFTIRQS] = {
61814 +const char * const softirq_to_name[NR_SOFTIRQS] = {
61815 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
61816 "TASKLET", "SCHED", "HRTIMER", "RCU"
61817 };
61818 @@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
61819
61820 asmlinkage void __do_softirq(void)
61821 {
61822 - struct softirq_action *h;
61823 + const struct softirq_action *h;
61824 __u32 pending;
61825 int max_restart = MAX_SOFTIRQ_RESTART;
61826 int cpu;
61827 @@ -233,7 +233,7 @@ restart:
61828 kstat_incr_softirqs_this_cpu(h - softirq_vec);
61829
61830 trace_softirq_entry(h, softirq_vec);
61831 - h->action(h);
61832 + h->action();
61833 trace_softirq_exit(h, softirq_vec);
61834 if (unlikely(prev_count != preempt_count())) {
61835 printk(KERN_ERR "huh, entered softirq %td %s %p"
61836 @@ -363,7 +363,7 @@ void raise_softirq(unsigned int nr)
61837 local_irq_restore(flags);
61838 }
61839
61840 -void open_softirq(int nr, void (*action)(struct softirq_action *))
61841 +void open_softirq(int nr, void (*action)(void))
61842 {
61843 softirq_vec[nr].action = action;
61844 }
61845 @@ -419,7 +419,7 @@ void __tasklet_hi_schedule_first(struct
61846
61847 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
61848
61849 -static void tasklet_action(struct softirq_action *a)
61850 +static void tasklet_action(void)
61851 {
61852 struct tasklet_struct *list;
61853
61854 @@ -454,7 +454,7 @@ static void tasklet_action(struct softir
61855 }
61856 }
61857
61858 -static void tasklet_hi_action(struct softirq_action *a)
61859 +static void tasklet_hi_action(void)
61860 {
61861 struct tasklet_struct *list;
61862
61863 diff -urNp linux-2.6.32.41/kernel/sys.c linux-2.6.32.41/kernel/sys.c
61864 --- linux-2.6.32.41/kernel/sys.c 2011-03-27 14:31:47.000000000 -0400
61865 +++ linux-2.6.32.41/kernel/sys.c 2011-04-17 15:56:46.000000000 -0400
61866 @@ -133,6 +133,12 @@ static int set_one_prio(struct task_stru
61867 error = -EACCES;
61868 goto out;
61869 }
61870 +
61871 + if (gr_handle_chroot_setpriority(p, niceval)) {
61872 + error = -EACCES;
61873 + goto out;
61874 + }
61875 +
61876 no_nice = security_task_setnice(p, niceval);
61877 if (no_nice) {
61878 error = no_nice;
61879 @@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which,
61880 !(user = find_user(who)))
61881 goto out_unlock; /* No processes for this user */
61882
61883 - do_each_thread(g, p)
61884 + do_each_thread(g, p) {
61885 if (__task_cred(p)->uid == who)
61886 error = set_one_prio(p, niceval, error);
61887 - while_each_thread(g, p);
61888 + } while_each_thread(g, p);
61889 if (who != cred->uid)
61890 free_uid(user); /* For find_user() */
61891 break;
61892 @@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which,
61893 !(user = find_user(who)))
61894 goto out_unlock; /* No processes for this user */
61895
61896 - do_each_thread(g, p)
61897 + do_each_thread(g, p) {
61898 if (__task_cred(p)->uid == who) {
61899 niceval = 20 - task_nice(p);
61900 if (niceval > retval)
61901 retval = niceval;
61902 }
61903 - while_each_thread(g, p);
61904 + } while_each_thread(g, p);
61905 if (who != cred->uid)
61906 free_uid(user); /* for find_user() */
61907 break;
61908 @@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
61909 goto error;
61910 }
61911
61912 + if (gr_check_group_change(new->gid, new->egid, -1))
61913 + goto error;
61914 +
61915 if (rgid != (gid_t) -1 ||
61916 (egid != (gid_t) -1 && egid != old->gid))
61917 new->sgid = new->egid;
61918 @@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
61919 goto error;
61920
61921 retval = -EPERM;
61922 +
61923 + if (gr_check_group_change(gid, gid, gid))
61924 + goto error;
61925 +
61926 if (capable(CAP_SETGID))
61927 new->gid = new->egid = new->sgid = new->fsgid = gid;
61928 else if (gid == old->gid || gid == old->sgid)
61929 @@ -627,6 +640,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
61930 goto error;
61931 }
61932
61933 + if (gr_check_user_change(new->uid, new->euid, -1))
61934 + goto error;
61935 +
61936 if (new->uid != old->uid) {
61937 retval = set_user(new);
61938 if (retval < 0)
61939 @@ -675,6 +691,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
61940 goto error;
61941
61942 retval = -EPERM;
61943 +
61944 + if (gr_check_crash_uid(uid))
61945 + goto error;
61946 + if (gr_check_user_change(uid, uid, uid))
61947 + goto error;
61948 +
61949 if (capable(CAP_SETUID)) {
61950 new->suid = new->uid = uid;
61951 if (uid != old->uid) {
61952 @@ -732,6 +754,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
61953 goto error;
61954 }
61955
61956 + if (gr_check_user_change(ruid, euid, -1))
61957 + goto error;
61958 +
61959 if (ruid != (uid_t) -1) {
61960 new->uid = ruid;
61961 if (ruid != old->uid) {
61962 @@ -800,6 +825,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
61963 goto error;
61964 }
61965
61966 + if (gr_check_group_change(rgid, egid, -1))
61967 + goto error;
61968 +
61969 if (rgid != (gid_t) -1)
61970 new->gid = rgid;
61971 if (egid != (gid_t) -1)
61972 @@ -849,6 +877,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
61973 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
61974 goto error;
61975
61976 + if (gr_check_user_change(-1, -1, uid))
61977 + goto error;
61978 +
61979 if (uid == old->uid || uid == old->euid ||
61980 uid == old->suid || uid == old->fsuid ||
61981 capable(CAP_SETUID)) {
61982 @@ -889,6 +920,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
61983 if (gid == old->gid || gid == old->egid ||
61984 gid == old->sgid || gid == old->fsgid ||
61985 capable(CAP_SETGID)) {
61986 + if (gr_check_group_change(-1, -1, gid))
61987 + goto error;
61988 +
61989 if (gid != old_fsgid) {
61990 new->fsgid = gid;
61991 goto change_okay;
61992 @@ -1454,7 +1488,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
61993 error = get_dumpable(me->mm);
61994 break;
61995 case PR_SET_DUMPABLE:
61996 - if (arg2 < 0 || arg2 > 1) {
61997 + if (arg2 > 1) {
61998 error = -EINVAL;
61999 break;
62000 }
62001 diff -urNp linux-2.6.32.41/kernel/sysctl.c linux-2.6.32.41/kernel/sysctl.c
62002 --- linux-2.6.32.41/kernel/sysctl.c 2011-03-27 14:31:47.000000000 -0400
62003 +++ linux-2.6.32.41/kernel/sysctl.c 2011-04-17 15:56:46.000000000 -0400
62004 @@ -63,6 +63,13 @@
62005 static int deprecated_sysctl_warning(struct __sysctl_args *args);
62006
62007 #if defined(CONFIG_SYSCTL)
62008 +#include <linux/grsecurity.h>
62009 +#include <linux/grinternal.h>
62010 +
62011 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
62012 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
62013 + const int op);
62014 +extern int gr_handle_chroot_sysctl(const int op);
62015
62016 /* External variables not in a header file. */
62017 extern int C_A_D;
62018 @@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_ta
62019 static int proc_taint(struct ctl_table *table, int write,
62020 void __user *buffer, size_t *lenp, loff_t *ppos);
62021 #endif
62022 +extern ctl_table grsecurity_table[];
62023
62024 static struct ctl_table root_table[];
62025 static struct ctl_table_root sysctl_table_root;
62026 @@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
62027 int sysctl_legacy_va_layout;
62028 #endif
62029
62030 +#ifdef CONFIG_PAX_SOFTMODE
62031 +static ctl_table pax_table[] = {
62032 + {
62033 + .ctl_name = CTL_UNNUMBERED,
62034 + .procname = "softmode",
62035 + .data = &pax_softmode,
62036 + .maxlen = sizeof(unsigned int),
62037 + .mode = 0600,
62038 + .proc_handler = &proc_dointvec,
62039 + },
62040 +
62041 + { .ctl_name = 0 }
62042 +};
62043 +#endif
62044 +
62045 extern int prove_locking;
62046 extern int lock_stat;
62047
62048 @@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = N
62049 #endif
62050
62051 static struct ctl_table kern_table[] = {
62052 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
62053 + {
62054 + .ctl_name = CTL_UNNUMBERED,
62055 + .procname = "grsecurity",
62056 + .mode = 0500,
62057 + .child = grsecurity_table,
62058 + },
62059 +#endif
62060 +
62061 +#ifdef CONFIG_PAX_SOFTMODE
62062 + {
62063 + .ctl_name = CTL_UNNUMBERED,
62064 + .procname = "pax",
62065 + .mode = 0500,
62066 + .child = pax_table,
62067 + },
62068 +#endif
62069 +
62070 {
62071 .ctl_name = CTL_UNNUMBERED,
62072 .procname = "sched_child_runs_first",
62073 @@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
62074 .data = &modprobe_path,
62075 .maxlen = KMOD_PATH_LEN,
62076 .mode = 0644,
62077 - .proc_handler = &proc_dostring,
62078 - .strategy = &sysctl_string,
62079 + .proc_handler = &proc_dostring_modpriv,
62080 + .strategy = &sysctl_string_modpriv,
62081 },
62082 {
62083 .ctl_name = CTL_UNNUMBERED,
62084 @@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
62085 .mode = 0644,
62086 .proc_handler = &proc_dointvec
62087 },
62088 + {
62089 + .procname = "heap_stack_gap",
62090 + .data = &sysctl_heap_stack_gap,
62091 + .maxlen = sizeof(sysctl_heap_stack_gap),
62092 + .mode = 0644,
62093 + .proc_handler = proc_doulongvec_minmax,
62094 + },
62095 #else
62096 {
62097 .ctl_name = CTL_UNNUMBERED,
62098 @@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl
62099 return 0;
62100 }
62101
62102 +static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
62103 +
62104 static int parse_table(int __user *name, int nlen,
62105 void __user *oldval, size_t __user *oldlenp,
62106 void __user *newval, size_t newlen,
62107 @@ -1821,7 +1871,7 @@ repeat:
62108 if (n == table->ctl_name) {
62109 int error;
62110 if (table->child) {
62111 - if (sysctl_perm(root, table, MAY_EXEC))
62112 + if (sysctl_perm_nochk(root, table, MAY_EXEC))
62113 return -EPERM;
62114 name++;
62115 nlen--;
62116 @@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *r
62117 int error;
62118 int mode;
62119
62120 + if (table->parent != NULL && table->parent->procname != NULL &&
62121 + table->procname != NULL &&
62122 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
62123 + return -EACCES;
62124 + if (gr_handle_chroot_sysctl(op))
62125 + return -EACCES;
62126 + error = gr_handle_sysctl(table, op);
62127 + if (error)
62128 + return error;
62129 +
62130 + error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
62131 + if (error)
62132 + return error;
62133 +
62134 + if (root->permissions)
62135 + mode = root->permissions(root, current->nsproxy, table);
62136 + else
62137 + mode = table->mode;
62138 +
62139 + return test_perm(mode, op);
62140 +}
62141 +
62142 +int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
62143 +{
62144 + int error;
62145 + int mode;
62146 +
62147 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
62148 if (error)
62149 return error;
62150 @@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *tabl
62151 buffer, lenp, ppos);
62152 }
62153
62154 +int proc_dostring_modpriv(struct ctl_table *table, int write,
62155 + void __user *buffer, size_t *lenp, loff_t *ppos)
62156 +{
62157 + if (write && !capable(CAP_SYS_MODULE))
62158 + return -EPERM;
62159 +
62160 + return _proc_do_string(table->data, table->maxlen, write,
62161 + buffer, lenp, ppos);
62162 +}
62163 +
62164
62165 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
62166 int *valp,
62167 @@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(v
62168 vleft = table->maxlen / sizeof(unsigned long);
62169 left = *lenp;
62170
62171 - for (; left && vleft--; i++, min++, max++, first=0) {
62172 + for (; left && vleft--; i++, first=0) {
62173 if (write) {
62174 while (left) {
62175 char c;
62176 @@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *tabl
62177 return -ENOSYS;
62178 }
62179
62180 +int proc_dostring_modpriv(struct ctl_table *table, int write,
62181 + void __user *buffer, size_t *lenp, loff_t *ppos)
62182 +{
62183 + return -ENOSYS;
62184 +}
62185 +
62186 int proc_dointvec(struct ctl_table *table, int write,
62187 void __user *buffer, size_t *lenp, loff_t *ppos)
62188 {
62189 @@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *tabl
62190 return 1;
62191 }
62192
62193 +int sysctl_string_modpriv(struct ctl_table *table,
62194 + void __user *oldval, size_t __user *oldlenp,
62195 + void __user *newval, size_t newlen)
62196 +{
62197 + if (newval && newlen && !capable(CAP_SYS_MODULE))
62198 + return -EPERM;
62199 +
62200 + return sysctl_string(table, oldval, oldlenp, newval, newlen);
62201 +}
62202 +
62203 /*
62204 * This function makes sure that all of the integers in the vector
62205 * are between the minimum and maximum values given in the arrays
62206 @@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *tabl
62207 return -ENOSYS;
62208 }
62209
62210 +int sysctl_string_modpriv(struct ctl_table *table,
62211 + void __user *oldval, size_t __user *oldlenp,
62212 + void __user *newval, size_t newlen)
62213 +{
62214 + return -ENOSYS;
62215 +}
62216 +
62217 int sysctl_intvec(struct ctl_table *table,
62218 void __user *oldval, size_t __user *oldlenp,
62219 void __user *newval, size_t newlen)
62220 @@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
62221 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
62222 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
62223 EXPORT_SYMBOL(proc_dostring);
62224 +EXPORT_SYMBOL(proc_dostring_modpriv);
62225 EXPORT_SYMBOL(proc_doulongvec_minmax);
62226 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
62227 EXPORT_SYMBOL(register_sysctl_table);
62228 @@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
62229 EXPORT_SYMBOL(sysctl_jiffies);
62230 EXPORT_SYMBOL(sysctl_ms_jiffies);
62231 EXPORT_SYMBOL(sysctl_string);
62232 +EXPORT_SYMBOL(sysctl_string_modpriv);
62233 EXPORT_SYMBOL(sysctl_data);
62234 EXPORT_SYMBOL(unregister_sysctl_table);
62235 diff -urNp linux-2.6.32.41/kernel/sysctl_check.c linux-2.6.32.41/kernel/sysctl_check.c
62236 --- linux-2.6.32.41/kernel/sysctl_check.c 2011-03-27 14:31:47.000000000 -0400
62237 +++ linux-2.6.32.41/kernel/sysctl_check.c 2011-04-17 15:56:46.000000000 -0400
62238 @@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *n
62239 } else {
62240 if ((table->strategy == sysctl_data) ||
62241 (table->strategy == sysctl_string) ||
62242 + (table->strategy == sysctl_string_modpriv) ||
62243 (table->strategy == sysctl_intvec) ||
62244 (table->strategy == sysctl_jiffies) ||
62245 (table->strategy == sysctl_ms_jiffies) ||
62246 (table->proc_handler == proc_dostring) ||
62247 + (table->proc_handler == proc_dostring_modpriv) ||
62248 (table->proc_handler == proc_dointvec) ||
62249 (table->proc_handler == proc_dointvec_minmax) ||
62250 (table->proc_handler == proc_dointvec_jiffies) ||
62251 diff -urNp linux-2.6.32.41/kernel/taskstats.c linux-2.6.32.41/kernel/taskstats.c
62252 --- linux-2.6.32.41/kernel/taskstats.c 2011-03-27 14:31:47.000000000 -0400
62253 +++ linux-2.6.32.41/kernel/taskstats.c 2011-04-17 15:56:46.000000000 -0400
62254 @@ -26,9 +26,12 @@
62255 #include <linux/cgroup.h>
62256 #include <linux/fs.h>
62257 #include <linux/file.h>
62258 +#include <linux/grsecurity.h>
62259 #include <net/genetlink.h>
62260 #include <asm/atomic.h>
62261
62262 +extern int gr_is_taskstats_denied(int pid);
62263 +
62264 /*
62265 * Maximum length of a cpumask that can be specified in
62266 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
62267 @@ -433,6 +436,9 @@ static int taskstats_user_cmd(struct sk_
62268 size_t size;
62269 cpumask_var_t mask;
62270
62271 + if (gr_is_taskstats_denied(current->pid))
62272 + return -EACCES;
62273 +
62274 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
62275 return -ENOMEM;
62276
62277 diff -urNp linux-2.6.32.41/kernel/time/tick-broadcast.c linux-2.6.32.41/kernel/time/tick-broadcast.c
62278 --- linux-2.6.32.41/kernel/time/tick-broadcast.c 2011-05-23 16:56:59.000000000 -0400
62279 +++ linux-2.6.32.41/kernel/time/tick-broadcast.c 2011-05-23 16:57:13.000000000 -0400
62280 @@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct cl
62281 * then clear the broadcast bit.
62282 */
62283 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
62284 - int cpu = smp_processor_id();
62285 + cpu = smp_processor_id();
62286
62287 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
62288 tick_broadcast_clear_oneshot(cpu);
62289 diff -urNp linux-2.6.32.41/kernel/time/timekeeping.c linux-2.6.32.41/kernel/time/timekeeping.c
62290 --- linux-2.6.32.41/kernel/time/timekeeping.c 2011-05-23 16:56:59.000000000 -0400
62291 +++ linux-2.6.32.41/kernel/time/timekeeping.c 2011-05-23 19:09:33.000000000 -0400
62292 @@ -14,6 +14,7 @@
62293 #include <linux/init.h>
62294 #include <linux/mm.h>
62295 #include <linux/sched.h>
62296 +#include <linux/grsecurity.h>
62297 #include <linux/sysdev.h>
62298 #include <linux/clocksource.h>
62299 #include <linux/jiffies.h>
62300 @@ -176,7 +177,7 @@ void update_xtime_cache(u64 nsec)
62301 */
62302 struct timespec ts = xtime;
62303 timespec_add_ns(&ts, nsec);
62304 - ACCESS_ONCE(xtime_cache) = ts;
62305 + ACCESS_ONCE_RW(xtime_cache) = ts;
62306 }
62307
62308 /* must hold xtime_lock */
62309 @@ -329,6 +330,8 @@ int do_settimeofday(struct timespec *tv)
62310 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
62311 return -EINVAL;
62312
62313 + gr_log_timechange();
62314 +
62315 write_seqlock_irqsave(&xtime_lock, flags);
62316
62317 timekeeping_forward_now();
62318 diff -urNp linux-2.6.32.41/kernel/time/timer_list.c linux-2.6.32.41/kernel/time/timer_list.c
62319 --- linux-2.6.32.41/kernel/time/timer_list.c 2011-03-27 14:31:47.000000000 -0400
62320 +++ linux-2.6.32.41/kernel/time/timer_list.c 2011-04-17 15:56:46.000000000 -0400
62321 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
62322
62323 static void print_name_offset(struct seq_file *m, void *sym)
62324 {
62325 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62326 + SEQ_printf(m, "<%p>", NULL);
62327 +#else
62328 char symname[KSYM_NAME_LEN];
62329
62330 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
62331 SEQ_printf(m, "<%p>", sym);
62332 else
62333 SEQ_printf(m, "%s", symname);
62334 +#endif
62335 }
62336
62337 static void
62338 @@ -112,7 +116,11 @@ next_one:
62339 static void
62340 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
62341 {
62342 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62343 + SEQ_printf(m, " .base: %p\n", NULL);
62344 +#else
62345 SEQ_printf(m, " .base: %p\n", base);
62346 +#endif
62347 SEQ_printf(m, " .index: %d\n",
62348 base->index);
62349 SEQ_printf(m, " .resolution: %Lu nsecs\n",
62350 @@ -289,7 +297,11 @@ static int __init init_timer_list_procfs
62351 {
62352 struct proc_dir_entry *pe;
62353
62354 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
62355 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
62356 +#else
62357 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
62358 +#endif
62359 if (!pe)
62360 return -ENOMEM;
62361 return 0;
62362 diff -urNp linux-2.6.32.41/kernel/time/timer_stats.c linux-2.6.32.41/kernel/time/timer_stats.c
62363 --- linux-2.6.32.41/kernel/time/timer_stats.c 2011-03-27 14:31:47.000000000 -0400
62364 +++ linux-2.6.32.41/kernel/time/timer_stats.c 2011-05-04 17:56:28.000000000 -0400
62365 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
62366 static unsigned long nr_entries;
62367 static struct entry entries[MAX_ENTRIES];
62368
62369 -static atomic_t overflow_count;
62370 +static atomic_unchecked_t overflow_count;
62371
62372 /*
62373 * The entries are in a hash-table, for fast lookup:
62374 @@ -140,7 +140,7 @@ static void reset_entries(void)
62375 nr_entries = 0;
62376 memset(entries, 0, sizeof(entries));
62377 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
62378 - atomic_set(&overflow_count, 0);
62379 + atomic_set_unchecked(&overflow_count, 0);
62380 }
62381
62382 static struct entry *alloc_entry(void)
62383 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
62384 if (likely(entry))
62385 entry->count++;
62386 else
62387 - atomic_inc(&overflow_count);
62388 + atomic_inc_unchecked(&overflow_count);
62389
62390 out_unlock:
62391 spin_unlock_irqrestore(lock, flags);
62392 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
62393
62394 static void print_name_offset(struct seq_file *m, unsigned long addr)
62395 {
62396 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62397 + seq_printf(m, "<%p>", NULL);
62398 +#else
62399 char symname[KSYM_NAME_LEN];
62400
62401 if (lookup_symbol_name(addr, symname) < 0)
62402 seq_printf(m, "<%p>", (void *)addr);
62403 else
62404 seq_printf(m, "%s", symname);
62405 +#endif
62406 }
62407
62408 static int tstats_show(struct seq_file *m, void *v)
62409 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
62410
62411 seq_puts(m, "Timer Stats Version: v0.2\n");
62412 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
62413 - if (atomic_read(&overflow_count))
62414 + if (atomic_read_unchecked(&overflow_count))
62415 seq_printf(m, "Overflow: %d entries\n",
62416 - atomic_read(&overflow_count));
62417 + atomic_read_unchecked(&overflow_count));
62418
62419 for (i = 0; i < nr_entries; i++) {
62420 entry = entries + i;
62421 @@ -415,7 +419,11 @@ static int __init init_tstats_procfs(voi
62422 {
62423 struct proc_dir_entry *pe;
62424
62425 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
62426 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
62427 +#else
62428 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
62429 +#endif
62430 if (!pe)
62431 return -ENOMEM;
62432 return 0;
62433 diff -urNp linux-2.6.32.41/kernel/time.c linux-2.6.32.41/kernel/time.c
62434 --- linux-2.6.32.41/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
62435 +++ linux-2.6.32.41/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
62436 @@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec
62437 return error;
62438
62439 if (tz) {
62440 + /* we log in do_settimeofday called below, so don't log twice
62441 + */
62442 + if (!tv)
62443 + gr_log_timechange();
62444 +
62445 /* SMP safe, global irq locking makes it work. */
62446 sys_tz = *tz;
62447 update_vsyscall_tz();
62448 @@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
62449 * Avoid unnecessary multiplications/divisions in the
62450 * two most common HZ cases:
62451 */
62452 -unsigned int inline jiffies_to_msecs(const unsigned long j)
62453 +inline unsigned int jiffies_to_msecs(const unsigned long j)
62454 {
62455 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
62456 return (MSEC_PER_SEC / HZ) * j;
62457 @@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(con
62458 }
62459 EXPORT_SYMBOL(jiffies_to_msecs);
62460
62461 -unsigned int inline jiffies_to_usecs(const unsigned long j)
62462 +inline unsigned int jiffies_to_usecs(const unsigned long j)
62463 {
62464 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
62465 return (USEC_PER_SEC / HZ) * j;
62466 diff -urNp linux-2.6.32.41/kernel/timer.c linux-2.6.32.41/kernel/timer.c
62467 --- linux-2.6.32.41/kernel/timer.c 2011-03-27 14:31:47.000000000 -0400
62468 +++ linux-2.6.32.41/kernel/timer.c 2011-04-17 15:56:46.000000000 -0400
62469 @@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
62470 /*
62471 * This function runs timers and the timer-tq in bottom half context.
62472 */
62473 -static void run_timer_softirq(struct softirq_action *h)
62474 +static void run_timer_softirq(void)
62475 {
62476 struct tvec_base *base = __get_cpu_var(tvec_bases);
62477
62478 diff -urNp linux-2.6.32.41/kernel/trace/blktrace.c linux-2.6.32.41/kernel/trace/blktrace.c
62479 --- linux-2.6.32.41/kernel/trace/blktrace.c 2011-03-27 14:31:47.000000000 -0400
62480 +++ linux-2.6.32.41/kernel/trace/blktrace.c 2011-05-04 17:56:28.000000000 -0400
62481 @@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct f
62482 struct blk_trace *bt = filp->private_data;
62483 char buf[16];
62484
62485 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
62486 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
62487
62488 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
62489 }
62490 @@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(str
62491 return 1;
62492
62493 bt = buf->chan->private_data;
62494 - atomic_inc(&bt->dropped);
62495 + atomic_inc_unchecked(&bt->dropped);
62496 return 0;
62497 }
62498
62499 @@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_qu
62500
62501 bt->dir = dir;
62502 bt->dev = dev;
62503 - atomic_set(&bt->dropped, 0);
62504 + atomic_set_unchecked(&bt->dropped, 0);
62505
62506 ret = -EIO;
62507 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
62508 diff -urNp linux-2.6.32.41/kernel/trace/ftrace.c linux-2.6.32.41/kernel/trace/ftrace.c
62509 --- linux-2.6.32.41/kernel/trace/ftrace.c 2011-03-27 14:31:47.000000000 -0400
62510 +++ linux-2.6.32.41/kernel/trace/ftrace.c 2011-04-17 15:56:46.000000000 -0400
62511 @@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod,
62512
62513 ip = rec->ip;
62514
62515 + ret = ftrace_arch_code_modify_prepare();
62516 + FTRACE_WARN_ON(ret);
62517 + if (ret)
62518 + return 0;
62519 +
62520 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
62521 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
62522 if (ret) {
62523 ftrace_bug(ret, ip);
62524 rec->flags |= FTRACE_FL_FAILED;
62525 - return 0;
62526 }
62527 - return 1;
62528 + return ret ? 0 : 1;
62529 }
62530
62531 /*
62532 diff -urNp linux-2.6.32.41/kernel/trace/ring_buffer.c linux-2.6.32.41/kernel/trace/ring_buffer.c
62533 --- linux-2.6.32.41/kernel/trace/ring_buffer.c 2011-03-27 14:31:47.000000000 -0400
62534 +++ linux-2.6.32.41/kernel/trace/ring_buffer.c 2011-04-17 15:56:46.000000000 -0400
62535 @@ -606,7 +606,7 @@ static struct list_head *rb_list_head(st
62536 * the reader page). But if the next page is a header page,
62537 * its flags will be non zero.
62538 */
62539 -static int inline
62540 +static inline int
62541 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
62542 struct buffer_page *page, struct list_head *list)
62543 {
62544 diff -urNp linux-2.6.32.41/kernel/trace/trace.c linux-2.6.32.41/kernel/trace/trace.c
62545 --- linux-2.6.32.41/kernel/trace/trace.c 2011-03-27 14:31:47.000000000 -0400
62546 +++ linux-2.6.32.41/kernel/trace/trace.c 2011-05-16 21:46:57.000000000 -0400
62547 @@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(
62548 size_t rem;
62549 unsigned int i;
62550
62551 + pax_track_stack();
62552 +
62553 /* copy the tracer to avoid using a global lock all around */
62554 mutex_lock(&trace_types_lock);
62555 if (unlikely(old_tracer != current_trace && current_trace)) {
62556 @@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file
62557 int entries, size, i;
62558 size_t ret;
62559
62560 + pax_track_stack();
62561 +
62562 if (*ppos & (PAGE_SIZE - 1)) {
62563 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
62564 return -EINVAL;
62565 @@ -3816,10 +3820,9 @@ static const struct file_operations trac
62566 };
62567 #endif
62568
62569 -static struct dentry *d_tracer;
62570 -
62571 struct dentry *tracing_init_dentry(void)
62572 {
62573 + static struct dentry *d_tracer;
62574 static int once;
62575
62576 if (d_tracer)
62577 @@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
62578 return d_tracer;
62579 }
62580
62581 -static struct dentry *d_percpu;
62582 -
62583 struct dentry *tracing_dentry_percpu(void)
62584 {
62585 + static struct dentry *d_percpu;
62586 static int once;
62587 struct dentry *d_tracer;
62588
62589 diff -urNp linux-2.6.32.41/kernel/trace/trace_events.c linux-2.6.32.41/kernel/trace/trace_events.c
62590 --- linux-2.6.32.41/kernel/trace/trace_events.c 2011-03-27 14:31:47.000000000 -0400
62591 +++ linux-2.6.32.41/kernel/trace/trace_events.c 2011-04-17 15:56:46.000000000 -0400
62592 @@ -951,6 +951,8 @@ static LIST_HEAD(ftrace_module_file_list
62593 * Modules must own their file_operations to keep up with
62594 * reference counting.
62595 */
62596 +
62597 +/* cannot be const */
62598 struct ftrace_module_file_ops {
62599 struct list_head list;
62600 struct module *mod;
62601 diff -urNp linux-2.6.32.41/kernel/trace/trace_mmiotrace.c linux-2.6.32.41/kernel/trace/trace_mmiotrace.c
62602 --- linux-2.6.32.41/kernel/trace/trace_mmiotrace.c 2011-03-27 14:31:47.000000000 -0400
62603 +++ linux-2.6.32.41/kernel/trace/trace_mmiotrace.c 2011-05-04 17:56:28.000000000 -0400
62604 @@ -23,7 +23,7 @@ struct header_iter {
62605 static struct trace_array *mmio_trace_array;
62606 static bool overrun_detected;
62607 static unsigned long prev_overruns;
62608 -static atomic_t dropped_count;
62609 +static atomic_unchecked_t dropped_count;
62610
62611 static void mmio_reset_data(struct trace_array *tr)
62612 {
62613 @@ -126,7 +126,7 @@ static void mmio_close(struct trace_iter
62614
62615 static unsigned long count_overruns(struct trace_iterator *iter)
62616 {
62617 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
62618 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
62619 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
62620
62621 if (over > prev_overruns)
62622 @@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct
62623 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
62624 sizeof(*entry), 0, pc);
62625 if (!event) {
62626 - atomic_inc(&dropped_count);
62627 + atomic_inc_unchecked(&dropped_count);
62628 return;
62629 }
62630 entry = ring_buffer_event_data(event);
62631 @@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct
62632 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
62633 sizeof(*entry), 0, pc);
62634 if (!event) {
62635 - atomic_inc(&dropped_count);
62636 + atomic_inc_unchecked(&dropped_count);
62637 return;
62638 }
62639 entry = ring_buffer_event_data(event);
62640 diff -urNp linux-2.6.32.41/kernel/trace/trace_output.c linux-2.6.32.41/kernel/trace/trace_output.c
62641 --- linux-2.6.32.41/kernel/trace/trace_output.c 2011-03-27 14:31:47.000000000 -0400
62642 +++ linux-2.6.32.41/kernel/trace/trace_output.c 2011-04-17 15:56:46.000000000 -0400
62643 @@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s,
62644 return 0;
62645 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
62646 if (!IS_ERR(p)) {
62647 - p = mangle_path(s->buffer + s->len, p, "\n");
62648 + p = mangle_path(s->buffer + s->len, p, "\n\\");
62649 if (p) {
62650 s->len = p - s->buffer;
62651 return 1;
62652 diff -urNp linux-2.6.32.41/kernel/trace/trace_stack.c linux-2.6.32.41/kernel/trace/trace_stack.c
62653 --- linux-2.6.32.41/kernel/trace/trace_stack.c 2011-03-27 14:31:47.000000000 -0400
62654 +++ linux-2.6.32.41/kernel/trace/trace_stack.c 2011-04-17 15:56:46.000000000 -0400
62655 @@ -50,7 +50,7 @@ static inline void check_stack(void)
62656 return;
62657
62658 /* we do not handle interrupt stacks yet */
62659 - if (!object_is_on_stack(&this_size))
62660 + if (!object_starts_on_stack(&this_size))
62661 return;
62662
62663 local_irq_save(flags);
62664 diff -urNp linux-2.6.32.41/kernel/trace/trace_workqueue.c linux-2.6.32.41/kernel/trace/trace_workqueue.c
62665 --- linux-2.6.32.41/kernel/trace/trace_workqueue.c 2011-03-27 14:31:47.000000000 -0400
62666 +++ linux-2.6.32.41/kernel/trace/trace_workqueue.c 2011-04-17 15:56:46.000000000 -0400
62667 @@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
62668 int cpu;
62669 pid_t pid;
62670 /* Can be inserted from interrupt or user context, need to be atomic */
62671 - atomic_t inserted;
62672 + atomic_unchecked_t inserted;
62673 /*
62674 * Don't need to be atomic, works are serialized in a single workqueue thread
62675 * on a single CPU.
62676 @@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_st
62677 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
62678 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
62679 if (node->pid == wq_thread->pid) {
62680 - atomic_inc(&node->inserted);
62681 + atomic_inc_unchecked(&node->inserted);
62682 goto found;
62683 }
62684 }
62685 @@ -205,7 +205,7 @@ static int workqueue_stat_show(struct se
62686 tsk = get_pid_task(pid, PIDTYPE_PID);
62687 if (tsk) {
62688 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
62689 - atomic_read(&cws->inserted), cws->executed,
62690 + atomic_read_unchecked(&cws->inserted), cws->executed,
62691 tsk->comm);
62692 put_task_struct(tsk);
62693 }
62694 diff -urNp linux-2.6.32.41/kernel/user.c linux-2.6.32.41/kernel/user.c
62695 --- linux-2.6.32.41/kernel/user.c 2011-03-27 14:31:47.000000000 -0400
62696 +++ linux-2.6.32.41/kernel/user.c 2011-04-17 15:56:46.000000000 -0400
62697 @@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct use
62698 spin_lock_irq(&uidhash_lock);
62699 up = uid_hash_find(uid, hashent);
62700 if (up) {
62701 + put_user_ns(ns);
62702 key_put(new->uid_keyring);
62703 key_put(new->session_keyring);
62704 kmem_cache_free(uid_cachep, new);
62705 diff -urNp linux-2.6.32.41/lib/bug.c linux-2.6.32.41/lib/bug.c
62706 --- linux-2.6.32.41/lib/bug.c 2011-03-27 14:31:47.000000000 -0400
62707 +++ linux-2.6.32.41/lib/bug.c 2011-04-17 15:56:46.000000000 -0400
62708 @@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned l
62709 return BUG_TRAP_TYPE_NONE;
62710
62711 bug = find_bug(bugaddr);
62712 + if (!bug)
62713 + return BUG_TRAP_TYPE_NONE;
62714
62715 printk(KERN_EMERG "------------[ cut here ]------------\n");
62716
62717 diff -urNp linux-2.6.32.41/lib/debugobjects.c linux-2.6.32.41/lib/debugobjects.c
62718 --- linux-2.6.32.41/lib/debugobjects.c 2011-03-27 14:31:47.000000000 -0400
62719 +++ linux-2.6.32.41/lib/debugobjects.c 2011-04-17 15:56:46.000000000 -0400
62720 @@ -277,7 +277,7 @@ static void debug_object_is_on_stack(voi
62721 if (limit > 4)
62722 return;
62723
62724 - is_on_stack = object_is_on_stack(addr);
62725 + is_on_stack = object_starts_on_stack(addr);
62726 if (is_on_stack == onstack)
62727 return;
62728
62729 diff -urNp linux-2.6.32.41/lib/dma-debug.c linux-2.6.32.41/lib/dma-debug.c
62730 --- linux-2.6.32.41/lib/dma-debug.c 2011-03-27 14:31:47.000000000 -0400
62731 +++ linux-2.6.32.41/lib/dma-debug.c 2011-04-17 15:56:46.000000000 -0400
62732 @@ -861,7 +861,7 @@ out:
62733
62734 static void check_for_stack(struct device *dev, void *addr)
62735 {
62736 - if (object_is_on_stack(addr))
62737 + if (object_starts_on_stack(addr))
62738 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
62739 "stack [addr=%p]\n", addr);
62740 }
62741 diff -urNp linux-2.6.32.41/lib/idr.c linux-2.6.32.41/lib/idr.c
62742 --- linux-2.6.32.41/lib/idr.c 2011-03-27 14:31:47.000000000 -0400
62743 +++ linux-2.6.32.41/lib/idr.c 2011-04-17 15:56:46.000000000 -0400
62744 @@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, in
62745 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
62746
62747 /* if already at the top layer, we need to grow */
62748 - if (id >= 1 << (idp->layers * IDR_BITS)) {
62749 + if (id >= (1 << (idp->layers * IDR_BITS))) {
62750 *starting_id = id;
62751 return IDR_NEED_TO_GROW;
62752 }
62753 diff -urNp linux-2.6.32.41/lib/inflate.c linux-2.6.32.41/lib/inflate.c
62754 --- linux-2.6.32.41/lib/inflate.c 2011-03-27 14:31:47.000000000 -0400
62755 +++ linux-2.6.32.41/lib/inflate.c 2011-04-17 15:56:46.000000000 -0400
62756 @@ -266,7 +266,7 @@ static void free(void *where)
62757 malloc_ptr = free_mem_ptr;
62758 }
62759 #else
62760 -#define malloc(a) kmalloc(a, GFP_KERNEL)
62761 +#define malloc(a) kmalloc((a), GFP_KERNEL)
62762 #define free(a) kfree(a)
62763 #endif
62764
62765 diff -urNp linux-2.6.32.41/lib/Kconfig.debug linux-2.6.32.41/lib/Kconfig.debug
62766 --- linux-2.6.32.41/lib/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
62767 +++ linux-2.6.32.41/lib/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
62768 @@ -905,7 +905,7 @@ config LATENCYTOP
62769 select STACKTRACE
62770 select SCHEDSTATS
62771 select SCHED_DEBUG
62772 - depends on HAVE_LATENCYTOP_SUPPORT
62773 + depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
62774 help
62775 Enable this option if you want to use the LatencyTOP tool
62776 to find out which userspace is blocking on what kernel operations.
62777 diff -urNp linux-2.6.32.41/lib/kobject.c linux-2.6.32.41/lib/kobject.c
62778 --- linux-2.6.32.41/lib/kobject.c 2011-03-27 14:31:47.000000000 -0400
62779 +++ linux-2.6.32.41/lib/kobject.c 2011-04-17 15:56:46.000000000 -0400
62780 @@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct ko
62781 return ret;
62782 }
62783
62784 -struct sysfs_ops kobj_sysfs_ops = {
62785 +const struct sysfs_ops kobj_sysfs_ops = {
62786 .show = kobj_attr_show,
62787 .store = kobj_attr_store,
62788 };
62789 @@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
62790 * If the kset was not able to be created, NULL will be returned.
62791 */
62792 static struct kset *kset_create(const char *name,
62793 - struct kset_uevent_ops *uevent_ops,
62794 + const struct kset_uevent_ops *uevent_ops,
62795 struct kobject *parent_kobj)
62796 {
62797 struct kset *kset;
62798 @@ -832,7 +832,7 @@ static struct kset *kset_create(const ch
62799 * If the kset was not able to be created, NULL will be returned.
62800 */
62801 struct kset *kset_create_and_add(const char *name,
62802 - struct kset_uevent_ops *uevent_ops,
62803 + const struct kset_uevent_ops *uevent_ops,
62804 struct kobject *parent_kobj)
62805 {
62806 struct kset *kset;
62807 diff -urNp linux-2.6.32.41/lib/kobject_uevent.c linux-2.6.32.41/lib/kobject_uevent.c
62808 --- linux-2.6.32.41/lib/kobject_uevent.c 2011-03-27 14:31:47.000000000 -0400
62809 +++ linux-2.6.32.41/lib/kobject_uevent.c 2011-04-17 15:56:46.000000000 -0400
62810 @@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *k
62811 const char *subsystem;
62812 struct kobject *top_kobj;
62813 struct kset *kset;
62814 - struct kset_uevent_ops *uevent_ops;
62815 + const struct kset_uevent_ops *uevent_ops;
62816 u64 seq;
62817 int i = 0;
62818 int retval = 0;
62819 diff -urNp linux-2.6.32.41/lib/kref.c linux-2.6.32.41/lib/kref.c
62820 --- linux-2.6.32.41/lib/kref.c 2011-03-27 14:31:47.000000000 -0400
62821 +++ linux-2.6.32.41/lib/kref.c 2011-04-17 15:56:46.000000000 -0400
62822 @@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
62823 */
62824 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
62825 {
62826 - WARN_ON(release == NULL);
62827 + BUG_ON(release == NULL);
62828 WARN_ON(release == (void (*)(struct kref *))kfree);
62829
62830 if (atomic_dec_and_test(&kref->refcount)) {
62831 diff -urNp linux-2.6.32.41/lib/parser.c linux-2.6.32.41/lib/parser.c
62832 --- linux-2.6.32.41/lib/parser.c 2011-03-27 14:31:47.000000000 -0400
62833 +++ linux-2.6.32.41/lib/parser.c 2011-04-17 15:56:46.000000000 -0400
62834 @@ -126,7 +126,7 @@ static int match_number(substring_t *s,
62835 char *buf;
62836 int ret;
62837
62838 - buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
62839 + buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
62840 if (!buf)
62841 return -ENOMEM;
62842 memcpy(buf, s->from, s->to - s->from);
62843 diff -urNp linux-2.6.32.41/lib/radix-tree.c linux-2.6.32.41/lib/radix-tree.c
62844 --- linux-2.6.32.41/lib/radix-tree.c 2011-03-27 14:31:47.000000000 -0400
62845 +++ linux-2.6.32.41/lib/radix-tree.c 2011-04-17 15:56:46.000000000 -0400
62846 @@ -81,7 +81,7 @@ struct radix_tree_preload {
62847 int nr;
62848 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
62849 };
62850 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
62851 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
62852
62853 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
62854 {
62855 diff -urNp linux-2.6.32.41/lib/random32.c linux-2.6.32.41/lib/random32.c
62856 --- linux-2.6.32.41/lib/random32.c 2011-03-27 14:31:47.000000000 -0400
62857 +++ linux-2.6.32.41/lib/random32.c 2011-04-17 15:56:46.000000000 -0400
62858 @@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *
62859 */
62860 static inline u32 __seed(u32 x, u32 m)
62861 {
62862 - return (x < m) ? x + m : x;
62863 + return (x <= m) ? x + m + 1 : x;
62864 }
62865
62866 /**
62867 diff -urNp linux-2.6.32.41/lib/vsprintf.c linux-2.6.32.41/lib/vsprintf.c
62868 --- linux-2.6.32.41/lib/vsprintf.c 2011-03-27 14:31:47.000000000 -0400
62869 +++ linux-2.6.32.41/lib/vsprintf.c 2011-04-17 15:56:46.000000000 -0400
62870 @@ -16,6 +16,9 @@
62871 * - scnprintf and vscnprintf
62872 */
62873
62874 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62875 +#define __INCLUDED_BY_HIDESYM 1
62876 +#endif
62877 #include <stdarg.h>
62878 #include <linux/module.h>
62879 #include <linux/types.h>
62880 @@ -546,12 +549,12 @@ static char *number(char *buf, char *end
62881 return buf;
62882 }
62883
62884 -static char *string(char *buf, char *end, char *s, struct printf_spec spec)
62885 +static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
62886 {
62887 int len, i;
62888
62889 if ((unsigned long)s < PAGE_SIZE)
62890 - s = "<NULL>";
62891 + s = "(null)";
62892
62893 len = strnlen(s, spec.precision);
62894
62895 @@ -581,7 +584,7 @@ static char *symbol_string(char *buf, ch
62896 unsigned long value = (unsigned long) ptr;
62897 #ifdef CONFIG_KALLSYMS
62898 char sym[KSYM_SYMBOL_LEN];
62899 - if (ext != 'f' && ext != 's')
62900 + if (ext != 'f' && ext != 's' && ext != 'a')
62901 sprint_symbol(sym, value);
62902 else
62903 kallsyms_lookup(value, NULL, NULL, NULL, sym);
62904 @@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf,
62905 * - 'f' For simple symbolic function names without offset
62906 * - 'S' For symbolic direct pointers with offset
62907 * - 's' For symbolic direct pointers without offset
62908 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
62909 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
62910 * - 'R' For a struct resource pointer, it prints the range of
62911 * addresses (not the name nor the flags)
62912 * - 'M' For a 6-byte MAC address, it prints the address in the
62913 @@ -822,7 +827,7 @@ static char *pointer(const char *fmt, ch
62914 struct printf_spec spec)
62915 {
62916 if (!ptr)
62917 - return string(buf, end, "(null)", spec);
62918 + return string(buf, end, "(nil)", spec);
62919
62920 switch (*fmt) {
62921 case 'F':
62922 @@ -831,6 +836,14 @@ static char *pointer(const char *fmt, ch
62923 case 's':
62924 /* Fallthrough */
62925 case 'S':
62926 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62927 + break;
62928 +#else
62929 + return symbol_string(buf, end, ptr, spec, *fmt);
62930 +#endif
62931 + case 'a':
62932 + /* Fallthrough */
62933 + case 'A':
62934 return symbol_string(buf, end, ptr, spec, *fmt);
62935 case 'R':
62936 return resource_string(buf, end, ptr, spec);
62937 @@ -1445,7 +1458,7 @@ do { \
62938 size_t len;
62939 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
62940 || (unsigned long)save_str < PAGE_SIZE)
62941 - save_str = "<NULL>";
62942 + save_str = "(null)";
62943 len = strlen(save_str);
62944 if (str + len + 1 < end)
62945 memcpy(str, save_str, len + 1);
62946 @@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size,
62947 typeof(type) value; \
62948 if (sizeof(type) == 8) { \
62949 args = PTR_ALIGN(args, sizeof(u32)); \
62950 - *(u32 *)&value = *(u32 *)args; \
62951 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
62952 + *(u32 *)&value = *(const u32 *)args; \
62953 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
62954 } else { \
62955 args = PTR_ALIGN(args, sizeof(type)); \
62956 - value = *(typeof(type) *)args; \
62957 + value = *(const typeof(type) *)args; \
62958 } \
62959 args += sizeof(type); \
62960 value; \
62961 @@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size,
62962 const char *str_arg = args;
62963 size_t len = strlen(str_arg);
62964 args += len + 1;
62965 - str = string(str, end, (char *)str_arg, spec);
62966 + str = string(str, end, str_arg, spec);
62967 break;
62968 }
62969
62970 diff -urNp linux-2.6.32.41/localversion-grsec linux-2.6.32.41/localversion-grsec
62971 --- linux-2.6.32.41/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
62972 +++ linux-2.6.32.41/localversion-grsec 2011-04-17 15:56:46.000000000 -0400
62973 @@ -0,0 +1 @@
62974 +-grsec
62975 diff -urNp linux-2.6.32.41/Makefile linux-2.6.32.41/Makefile
62976 --- linux-2.6.32.41/Makefile 2011-05-23 16:56:59.000000000 -0400
62977 +++ linux-2.6.32.41/Makefile 2011-06-07 18:06:04.000000000 -0400
62978 @@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
62979
62980 HOSTCC = gcc
62981 HOSTCXX = g++
62982 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
62983 -HOSTCXXFLAGS = -O2
62984 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
62985 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
62986 +HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
62987
62988 # Decide whether to build built-in, modular, or both.
62989 # Normally, just do built-in.
62990 @@ -342,10 +343,12 @@ LINUXINCLUDE := -Iinclude \
62991 KBUILD_CPPFLAGS := -D__KERNEL__
62992
62993 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
62994 + -W -Wno-unused-parameter -Wno-missing-field-initializers \
62995 -fno-strict-aliasing -fno-common \
62996 -Werror-implicit-function-declaration \
62997 -Wno-format-security \
62998 -fno-delete-null-pointer-checks
62999 +KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
63000 KBUILD_AFLAGS := -D__ASSEMBLY__
63001
63002 # Read KERNELRELEASE from include/config/kernel.release (if it exists)
63003 @@ -403,7 +406,7 @@ endif
63004 # of make so .config is not included in this case either (for *config).
63005
63006 no-dot-config-targets := clean mrproper distclean \
63007 - cscope TAGS tags help %docs check% \
63008 + cscope gtags TAGS tags help %docs check% \
63009 include/linux/version.h headers_% \
63010 kernelrelease kernelversion
63011
63012 @@ -644,7 +647,7 @@ export mod_strip_cmd
63013
63014
63015 ifeq ($(KBUILD_EXTMOD),)
63016 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
63017 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
63018
63019 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
63020 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
63021 @@ -949,7 +952,19 @@ include/config/kernel.release: include/c
63022 # version.h and scripts_basic is processed / created.
63023
63024 # Listed in dependency order
63025 -PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3
63026 +PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3 pax-plugin
63027 +
63028 +ifeq ($(CONFIG_PAX_MEMORY_STACKLEAK),y)
63029 +KBUILD_CFLAGS += $(call cc-ifversion, -ge, 0405, -fplugin=$(objtree)/tools/gcc/pax_plugin.so -fplugin-arg-pax_plugin-track-lowest-sp=100)
63030 +endif
63031 +pax-plugin:
63032 +ifneq (,$(findstring pax_plugin, $(KBUILD_CFLAGS)))
63033 + $(Q)$(MAKE) $(build)=tools/gcc
63034 +else
63035 +ifeq ($(CONFIG_PAX_MEMORY_STACKLEAK),y)
63036 + $(Q)echo "warning, your gcc does not support plugins, PAX_MEMORY_STACKLEAK will be less secure"
63037 +endif
63038 +endif
63039
63040 # prepare3 is used to check if we are building in a separate output directory,
63041 # and if so do:
63042 @@ -970,7 +985,7 @@ ifneq ($(KBUILD_SRC),)
63043 endif
63044
63045 # prepare2 creates a makefile if using a separate output directory
63046 -prepare2: prepare3 outputmakefile
63047 +prepare2: prepare3 outputmakefile pax-plugin
63048
63049 prepare1: prepare2 include/linux/version.h include/linux/utsrelease.h \
63050 include/asm include/config/auto.conf
63051 @@ -1198,7 +1213,7 @@ MRPROPER_FILES += .config .config.old in
63052 include/linux/autoconf.h include/linux/version.h \
63053 include/linux/utsrelease.h \
63054 include/linux/bounds.h include/asm*/asm-offsets.h \
63055 - Module.symvers Module.markers tags TAGS cscope*
63056 + Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
63057
63058 # clean - Delete most, but leave enough to build external modules
63059 #
63060 @@ -1289,6 +1304,7 @@ help:
63061 @echo ' modules_prepare - Set up for building external modules'
63062 @echo ' tags/TAGS - Generate tags file for editors'
63063 @echo ' cscope - Generate cscope index'
63064 + @echo ' gtags - Generate GNU GLOBAL index'
63065 @echo ' kernelrelease - Output the release version string'
63066 @echo ' kernelversion - Output the version stored in Makefile'
63067 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
63068 @@ -1445,7 +1461,7 @@ endif # KBUILD_EXTMOD
63069 quiet_cmd_tags = GEN $@
63070 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
63071
63072 -tags TAGS cscope: FORCE
63073 +tags TAGS cscope gtags: FORCE
63074 $(call cmd,tags)
63075
63076 # Scripts to check various things for consistency
63077 diff -urNp linux-2.6.32.41/mm/backing-dev.c linux-2.6.32.41/mm/backing-dev.c
63078 --- linux-2.6.32.41/mm/backing-dev.c 2011-03-27 14:31:47.000000000 -0400
63079 +++ linux-2.6.32.41/mm/backing-dev.c 2011-05-04 17:56:28.000000000 -0400
63080 @@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rc
63081 * Add the default flusher task that gets created for any bdi
63082 * that has dirty data pending writeout
63083 */
63084 -void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
63085 +static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
63086 {
63087 if (!bdi_cap_writeback_dirty(bdi))
63088 return;
63089 diff -urNp linux-2.6.32.41/mm/filemap.c linux-2.6.32.41/mm/filemap.c
63090 --- linux-2.6.32.41/mm/filemap.c 2011-03-27 14:31:47.000000000 -0400
63091 +++ linux-2.6.32.41/mm/filemap.c 2011-04-17 15:56:46.000000000 -0400
63092 @@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file
63093 struct address_space *mapping = file->f_mapping;
63094
63095 if (!mapping->a_ops->readpage)
63096 - return -ENOEXEC;
63097 + return -ENODEV;
63098 file_accessed(file);
63099 vma->vm_ops = &generic_file_vm_ops;
63100 vma->vm_flags |= VM_CAN_NONLINEAR;
63101 @@ -2027,6 +2027,7 @@ inline int generic_write_checks(struct f
63102 *pos = i_size_read(inode);
63103
63104 if (limit != RLIM_INFINITY) {
63105 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
63106 if (*pos >= limit) {
63107 send_sig(SIGXFSZ, current, 0);
63108 return -EFBIG;
63109 diff -urNp linux-2.6.32.41/mm/fremap.c linux-2.6.32.41/mm/fremap.c
63110 --- linux-2.6.32.41/mm/fremap.c 2011-03-27 14:31:47.000000000 -0400
63111 +++ linux-2.6.32.41/mm/fremap.c 2011-04-17 15:56:46.000000000 -0400
63112 @@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
63113 retry:
63114 vma = find_vma(mm, start);
63115
63116 +#ifdef CONFIG_PAX_SEGMEXEC
63117 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
63118 + goto out;
63119 +#endif
63120 +
63121 /*
63122 * Make sure the vma is shared, that it supports prefaulting,
63123 * and that the remapped range is valid and fully within
63124 @@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
63125 /*
63126 * drop PG_Mlocked flag for over-mapped range
63127 */
63128 - unsigned int saved_flags = vma->vm_flags;
63129 + unsigned long saved_flags = vma->vm_flags;
63130 munlock_vma_pages_range(vma, start, start + size);
63131 vma->vm_flags = saved_flags;
63132 }
63133 diff -urNp linux-2.6.32.41/mm/highmem.c linux-2.6.32.41/mm/highmem.c
63134 --- linux-2.6.32.41/mm/highmem.c 2011-03-27 14:31:47.000000000 -0400
63135 +++ linux-2.6.32.41/mm/highmem.c 2011-04-17 15:56:46.000000000 -0400
63136 @@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
63137 * So no dangers, even with speculative execution.
63138 */
63139 page = pte_page(pkmap_page_table[i]);
63140 + pax_open_kernel();
63141 pte_clear(&init_mm, (unsigned long)page_address(page),
63142 &pkmap_page_table[i]);
63143 -
63144 + pax_close_kernel();
63145 set_page_address(page, NULL);
63146 need_flush = 1;
63147 }
63148 @@ -177,9 +178,11 @@ start:
63149 }
63150 }
63151 vaddr = PKMAP_ADDR(last_pkmap_nr);
63152 +
63153 + pax_open_kernel();
63154 set_pte_at(&init_mm, vaddr,
63155 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
63156 -
63157 + pax_close_kernel();
63158 pkmap_count[last_pkmap_nr] = 1;
63159 set_page_address(page, (void *)vaddr);
63160
63161 diff -urNp linux-2.6.32.41/mm/hugetlb.c linux-2.6.32.41/mm/hugetlb.c
63162 --- linux-2.6.32.41/mm/hugetlb.c 2011-03-27 14:31:47.000000000 -0400
63163 +++ linux-2.6.32.41/mm/hugetlb.c 2011-04-17 15:56:46.000000000 -0400
63164 @@ -1925,6 +1925,26 @@ static int unmap_ref_private(struct mm_s
63165 return 1;
63166 }
63167
63168 +#ifdef CONFIG_PAX_SEGMEXEC
63169 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
63170 +{
63171 + struct mm_struct *mm = vma->vm_mm;
63172 + struct vm_area_struct *vma_m;
63173 + unsigned long address_m;
63174 + pte_t *ptep_m;
63175 +
63176 + vma_m = pax_find_mirror_vma(vma);
63177 + if (!vma_m)
63178 + return;
63179 +
63180 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63181 + address_m = address + SEGMEXEC_TASK_SIZE;
63182 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
63183 + get_page(page_m);
63184 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
63185 +}
63186 +#endif
63187 +
63188 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
63189 unsigned long address, pte_t *ptep, pte_t pte,
63190 struct page *pagecache_page)
63191 @@ -1996,6 +2016,11 @@ retry_avoidcopy:
63192 huge_ptep_clear_flush(vma, address, ptep);
63193 set_huge_pte_at(mm, address, ptep,
63194 make_huge_pte(vma, new_page, 1));
63195 +
63196 +#ifdef CONFIG_PAX_SEGMEXEC
63197 + pax_mirror_huge_pte(vma, address, new_page);
63198 +#endif
63199 +
63200 /* Make the old page be freed below */
63201 new_page = old_page;
63202 }
63203 @@ -2127,6 +2152,10 @@ retry:
63204 && (vma->vm_flags & VM_SHARED)));
63205 set_huge_pte_at(mm, address, ptep, new_pte);
63206
63207 +#ifdef CONFIG_PAX_SEGMEXEC
63208 + pax_mirror_huge_pte(vma, address, page);
63209 +#endif
63210 +
63211 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
63212 /* Optimization, do the COW without a second fault */
63213 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
63214 @@ -2155,6 +2184,28 @@ int hugetlb_fault(struct mm_struct *mm,
63215 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
63216 struct hstate *h = hstate_vma(vma);
63217
63218 +#ifdef CONFIG_PAX_SEGMEXEC
63219 + struct vm_area_struct *vma_m;
63220 +
63221 + vma_m = pax_find_mirror_vma(vma);
63222 + if (vma_m) {
63223 + unsigned long address_m;
63224 +
63225 + if (vma->vm_start > vma_m->vm_start) {
63226 + address_m = address;
63227 + address -= SEGMEXEC_TASK_SIZE;
63228 + vma = vma_m;
63229 + h = hstate_vma(vma);
63230 + } else
63231 + address_m = address + SEGMEXEC_TASK_SIZE;
63232 +
63233 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
63234 + return VM_FAULT_OOM;
63235 + address_m &= HPAGE_MASK;
63236 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
63237 + }
63238 +#endif
63239 +
63240 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
63241 if (!ptep)
63242 return VM_FAULT_OOM;
63243 diff -urNp linux-2.6.32.41/mm/Kconfig linux-2.6.32.41/mm/Kconfig
63244 --- linux-2.6.32.41/mm/Kconfig 2011-03-27 14:31:47.000000000 -0400
63245 +++ linux-2.6.32.41/mm/Kconfig 2011-04-17 15:56:46.000000000 -0400
63246 @@ -228,7 +228,7 @@ config KSM
63247 config DEFAULT_MMAP_MIN_ADDR
63248 int "Low address space to protect from user allocation"
63249 depends on MMU
63250 - default 4096
63251 + default 65536
63252 help
63253 This is the portion of low virtual memory which should be protected
63254 from userspace allocation. Keeping a user from writing to low pages
63255 diff -urNp linux-2.6.32.41/mm/kmemleak.c linux-2.6.32.41/mm/kmemleak.c
63256 --- linux-2.6.32.41/mm/kmemleak.c 2011-03-27 14:31:47.000000000 -0400
63257 +++ linux-2.6.32.41/mm/kmemleak.c 2011-04-17 15:56:46.000000000 -0400
63258 @@ -358,7 +358,7 @@ static void print_unreferenced(struct se
63259
63260 for (i = 0; i < object->trace_len; i++) {
63261 void *ptr = (void *)object->trace[i];
63262 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
63263 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
63264 }
63265 }
63266
63267 diff -urNp linux-2.6.32.41/mm/maccess.c linux-2.6.32.41/mm/maccess.c
63268 --- linux-2.6.32.41/mm/maccess.c 2011-03-27 14:31:47.000000000 -0400
63269 +++ linux-2.6.32.41/mm/maccess.c 2011-04-17 15:56:46.000000000 -0400
63270 @@ -14,7 +14,7 @@
63271 * Safely read from address @src to the buffer at @dst. If a kernel fault
63272 * happens, handle that and return -EFAULT.
63273 */
63274 -long probe_kernel_read(void *dst, void *src, size_t size)
63275 +long probe_kernel_read(void *dst, const void *src, size_t size)
63276 {
63277 long ret;
63278 mm_segment_t old_fs = get_fs();
63279 @@ -39,7 +39,7 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
63280 * Safely write to address @dst from the buffer at @src. If a kernel fault
63281 * happens, handle that and return -EFAULT.
63282 */
63283 -long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
63284 +long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
63285 {
63286 long ret;
63287 mm_segment_t old_fs = get_fs();
63288 diff -urNp linux-2.6.32.41/mm/madvise.c linux-2.6.32.41/mm/madvise.c
63289 --- linux-2.6.32.41/mm/madvise.c 2011-03-27 14:31:47.000000000 -0400
63290 +++ linux-2.6.32.41/mm/madvise.c 2011-04-17 15:56:46.000000000 -0400
63291 @@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_a
63292 pgoff_t pgoff;
63293 unsigned long new_flags = vma->vm_flags;
63294
63295 +#ifdef CONFIG_PAX_SEGMEXEC
63296 + struct vm_area_struct *vma_m;
63297 +#endif
63298 +
63299 switch (behavior) {
63300 case MADV_NORMAL:
63301 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
63302 @@ -103,6 +107,13 @@ success:
63303 /*
63304 * vm_flags is protected by the mmap_sem held in write mode.
63305 */
63306 +
63307 +#ifdef CONFIG_PAX_SEGMEXEC
63308 + vma_m = pax_find_mirror_vma(vma);
63309 + if (vma_m)
63310 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
63311 +#endif
63312 +
63313 vma->vm_flags = new_flags;
63314
63315 out:
63316 @@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_a
63317 struct vm_area_struct ** prev,
63318 unsigned long start, unsigned long end)
63319 {
63320 +
63321 +#ifdef CONFIG_PAX_SEGMEXEC
63322 + struct vm_area_struct *vma_m;
63323 +#endif
63324 +
63325 *prev = vma;
63326 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
63327 return -EINVAL;
63328 @@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_a
63329 zap_page_range(vma, start, end - start, &details);
63330 } else
63331 zap_page_range(vma, start, end - start, NULL);
63332 +
63333 +#ifdef CONFIG_PAX_SEGMEXEC
63334 + vma_m = pax_find_mirror_vma(vma);
63335 + if (vma_m) {
63336 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
63337 + struct zap_details details = {
63338 + .nonlinear_vma = vma_m,
63339 + .last_index = ULONG_MAX,
63340 + };
63341 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
63342 + } else
63343 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
63344 + }
63345 +#endif
63346 +
63347 return 0;
63348 }
63349
63350 @@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
63351 if (end < start)
63352 goto out;
63353
63354 +#ifdef CONFIG_PAX_SEGMEXEC
63355 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
63356 + if (end > SEGMEXEC_TASK_SIZE)
63357 + goto out;
63358 + } else
63359 +#endif
63360 +
63361 + if (end > TASK_SIZE)
63362 + goto out;
63363 +
63364 error = 0;
63365 if (end == start)
63366 goto out;
63367 diff -urNp linux-2.6.32.41/mm/memory.c linux-2.6.32.41/mm/memory.c
63368 --- linux-2.6.32.41/mm/memory.c 2011-03-27 14:31:47.000000000 -0400
63369 +++ linux-2.6.32.41/mm/memory.c 2011-04-17 15:56:46.000000000 -0400
63370 @@ -187,8 +187,12 @@ static inline void free_pmd_range(struct
63371 return;
63372
63373 pmd = pmd_offset(pud, start);
63374 +
63375 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
63376 pud_clear(pud);
63377 pmd_free_tlb(tlb, pmd, start);
63378 +#endif
63379 +
63380 }
63381
63382 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
63383 @@ -219,9 +223,12 @@ static inline void free_pud_range(struct
63384 if (end - 1 > ceiling - 1)
63385 return;
63386
63387 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
63388 pud = pud_offset(pgd, start);
63389 pgd_clear(pgd);
63390 pud_free_tlb(tlb, pud, start);
63391 +#endif
63392 +
63393 }
63394
63395 /*
63396 @@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct
63397 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
63398 i = 0;
63399
63400 - do {
63401 + while (nr_pages) {
63402 struct vm_area_struct *vma;
63403
63404 - vma = find_extend_vma(mm, start);
63405 + vma = find_vma(mm, start);
63406 if (!vma && in_gate_area(tsk, start)) {
63407 unsigned long pg = start & PAGE_MASK;
63408 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
63409 @@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct
63410 continue;
63411 }
63412
63413 - if (!vma ||
63414 + if (!vma || start < vma->vm_start ||
63415 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
63416 !(vm_flags & vma->vm_flags))
63417 return i ? : -EFAULT;
63418 @@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct
63419 start += PAGE_SIZE;
63420 nr_pages--;
63421 } while (nr_pages && start < vma->vm_end);
63422 - } while (nr_pages);
63423 + }
63424 return i;
63425 }
63426
63427 @@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_st
63428 page_add_file_rmap(page);
63429 set_pte_at(mm, addr, pte, mk_pte(page, prot));
63430
63431 +#ifdef CONFIG_PAX_SEGMEXEC
63432 + pax_mirror_file_pte(vma, addr, page, ptl);
63433 +#endif
63434 +
63435 retval = 0;
63436 pte_unmap_unlock(pte, ptl);
63437 return retval;
63438 @@ -1560,10 +1571,22 @@ out:
63439 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
63440 struct page *page)
63441 {
63442 +
63443 +#ifdef CONFIG_PAX_SEGMEXEC
63444 + struct vm_area_struct *vma_m;
63445 +#endif
63446 +
63447 if (addr < vma->vm_start || addr >= vma->vm_end)
63448 return -EFAULT;
63449 if (!page_count(page))
63450 return -EINVAL;
63451 +
63452 +#ifdef CONFIG_PAX_SEGMEXEC
63453 + vma_m = pax_find_mirror_vma(vma);
63454 + if (vma_m)
63455 + vma_m->vm_flags |= VM_INSERTPAGE;
63456 +#endif
63457 +
63458 vma->vm_flags |= VM_INSERTPAGE;
63459 return insert_page(vma, addr, page, vma->vm_page_prot);
63460 }
63461 @@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struc
63462 unsigned long pfn)
63463 {
63464 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
63465 + BUG_ON(vma->vm_mirror);
63466
63467 if (addr < vma->vm_start || addr >= vma->vm_end)
63468 return -EFAULT;
63469 @@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct
63470 copy_user_highpage(dst, src, va, vma);
63471 }
63472
63473 +#ifdef CONFIG_PAX_SEGMEXEC
63474 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
63475 +{
63476 + struct mm_struct *mm = vma->vm_mm;
63477 + spinlock_t *ptl;
63478 + pte_t *pte, entry;
63479 +
63480 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
63481 + entry = *pte;
63482 + if (!pte_present(entry)) {
63483 + if (!pte_none(entry)) {
63484 + BUG_ON(pte_file(entry));
63485 + free_swap_and_cache(pte_to_swp_entry(entry));
63486 + pte_clear_not_present_full(mm, address, pte, 0);
63487 + }
63488 + } else {
63489 + struct page *page;
63490 +
63491 + flush_cache_page(vma, address, pte_pfn(entry));
63492 + entry = ptep_clear_flush(vma, address, pte);
63493 + BUG_ON(pte_dirty(entry));
63494 + page = vm_normal_page(vma, address, entry);
63495 + if (page) {
63496 + update_hiwater_rss(mm);
63497 + if (PageAnon(page))
63498 + dec_mm_counter(mm, anon_rss);
63499 + else
63500 + dec_mm_counter(mm, file_rss);
63501 + page_remove_rmap(page);
63502 + page_cache_release(page);
63503 + }
63504 + }
63505 + pte_unmap_unlock(pte, ptl);
63506 +}
63507 +
63508 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
63509 + *
63510 + * the ptl of the lower mapped page is held on entry and is not released on exit
63511 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
63512 + */
63513 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
63514 +{
63515 + struct mm_struct *mm = vma->vm_mm;
63516 + unsigned long address_m;
63517 + spinlock_t *ptl_m;
63518 + struct vm_area_struct *vma_m;
63519 + pmd_t *pmd_m;
63520 + pte_t *pte_m, entry_m;
63521 +
63522 + BUG_ON(!page_m || !PageAnon(page_m));
63523 +
63524 + vma_m = pax_find_mirror_vma(vma);
63525 + if (!vma_m)
63526 + return;
63527 +
63528 + BUG_ON(!PageLocked(page_m));
63529 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63530 + address_m = address + SEGMEXEC_TASK_SIZE;
63531 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
63532 + pte_m = pte_offset_map_nested(pmd_m, address_m);
63533 + ptl_m = pte_lockptr(mm, pmd_m);
63534 + if (ptl != ptl_m) {
63535 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
63536 + if (!pte_none(*pte_m))
63537 + goto out;
63538 + }
63539 +
63540 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
63541 + page_cache_get(page_m);
63542 + page_add_anon_rmap(page_m, vma_m, address_m);
63543 + inc_mm_counter(mm, anon_rss);
63544 + set_pte_at(mm, address_m, pte_m, entry_m);
63545 + update_mmu_cache(vma_m, address_m, entry_m);
63546 +out:
63547 + if (ptl != ptl_m)
63548 + spin_unlock(ptl_m);
63549 + pte_unmap_nested(pte_m);
63550 + unlock_page(page_m);
63551 +}
63552 +
63553 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
63554 +{
63555 + struct mm_struct *mm = vma->vm_mm;
63556 + unsigned long address_m;
63557 + spinlock_t *ptl_m;
63558 + struct vm_area_struct *vma_m;
63559 + pmd_t *pmd_m;
63560 + pte_t *pte_m, entry_m;
63561 +
63562 + BUG_ON(!page_m || PageAnon(page_m));
63563 +
63564 + vma_m = pax_find_mirror_vma(vma);
63565 + if (!vma_m)
63566 + return;
63567 +
63568 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63569 + address_m = address + SEGMEXEC_TASK_SIZE;
63570 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
63571 + pte_m = pte_offset_map_nested(pmd_m, address_m);
63572 + ptl_m = pte_lockptr(mm, pmd_m);
63573 + if (ptl != ptl_m) {
63574 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
63575 + if (!pte_none(*pte_m))
63576 + goto out;
63577 + }
63578 +
63579 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
63580 + page_cache_get(page_m);
63581 + page_add_file_rmap(page_m);
63582 + inc_mm_counter(mm, file_rss);
63583 + set_pte_at(mm, address_m, pte_m, entry_m);
63584 + update_mmu_cache(vma_m, address_m, entry_m);
63585 +out:
63586 + if (ptl != ptl_m)
63587 + spin_unlock(ptl_m);
63588 + pte_unmap_nested(pte_m);
63589 +}
63590 +
63591 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
63592 +{
63593 + struct mm_struct *mm = vma->vm_mm;
63594 + unsigned long address_m;
63595 + spinlock_t *ptl_m;
63596 + struct vm_area_struct *vma_m;
63597 + pmd_t *pmd_m;
63598 + pte_t *pte_m, entry_m;
63599 +
63600 + vma_m = pax_find_mirror_vma(vma);
63601 + if (!vma_m)
63602 + return;
63603 +
63604 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63605 + address_m = address + SEGMEXEC_TASK_SIZE;
63606 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
63607 + pte_m = pte_offset_map_nested(pmd_m, address_m);
63608 + ptl_m = pte_lockptr(mm, pmd_m);
63609 + if (ptl != ptl_m) {
63610 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
63611 + if (!pte_none(*pte_m))
63612 + goto out;
63613 + }
63614 +
63615 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
63616 + set_pte_at(mm, address_m, pte_m, entry_m);
63617 +out:
63618 + if (ptl != ptl_m)
63619 + spin_unlock(ptl_m);
63620 + pte_unmap_nested(pte_m);
63621 +}
63622 +
63623 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
63624 +{
63625 + struct page *page_m;
63626 + pte_t entry;
63627 +
63628 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
63629 + goto out;
63630 +
63631 + entry = *pte;
63632 + page_m = vm_normal_page(vma, address, entry);
63633 + if (!page_m)
63634 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
63635 + else if (PageAnon(page_m)) {
63636 + if (pax_find_mirror_vma(vma)) {
63637 + pte_unmap_unlock(pte, ptl);
63638 + lock_page(page_m);
63639 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
63640 + if (pte_same(entry, *pte))
63641 + pax_mirror_anon_pte(vma, address, page_m, ptl);
63642 + else
63643 + unlock_page(page_m);
63644 + }
63645 + } else
63646 + pax_mirror_file_pte(vma, address, page_m, ptl);
63647 +
63648 +out:
63649 + pte_unmap_unlock(pte, ptl);
63650 +}
63651 +#endif
63652 +
63653 /*
63654 * This routine handles present pages, when users try to write
63655 * to a shared page. It is done by copying the page to a new address
63656 @@ -2156,6 +2360,12 @@ gotten:
63657 */
63658 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
63659 if (likely(pte_same(*page_table, orig_pte))) {
63660 +
63661 +#ifdef CONFIG_PAX_SEGMEXEC
63662 + if (pax_find_mirror_vma(vma))
63663 + BUG_ON(!trylock_page(new_page));
63664 +#endif
63665 +
63666 if (old_page) {
63667 if (!PageAnon(old_page)) {
63668 dec_mm_counter(mm, file_rss);
63669 @@ -2207,6 +2417,10 @@ gotten:
63670 page_remove_rmap(old_page);
63671 }
63672
63673 +#ifdef CONFIG_PAX_SEGMEXEC
63674 + pax_mirror_anon_pte(vma, address, new_page, ptl);
63675 +#endif
63676 +
63677 /* Free the old page.. */
63678 new_page = old_page;
63679 ret |= VM_FAULT_WRITE;
63680 @@ -2604,6 +2818,11 @@ static int do_swap_page(struct mm_struct
63681 swap_free(entry);
63682 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
63683 try_to_free_swap(page);
63684 +
63685 +#ifdef CONFIG_PAX_SEGMEXEC
63686 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
63687 +#endif
63688 +
63689 unlock_page(page);
63690
63691 if (flags & FAULT_FLAG_WRITE) {
63692 @@ -2615,6 +2834,11 @@ static int do_swap_page(struct mm_struct
63693
63694 /* No need to invalidate - it was non-present before */
63695 update_mmu_cache(vma, address, pte);
63696 +
63697 +#ifdef CONFIG_PAX_SEGMEXEC
63698 + pax_mirror_anon_pte(vma, address, page, ptl);
63699 +#endif
63700 +
63701 unlock:
63702 pte_unmap_unlock(page_table, ptl);
63703 out:
63704 @@ -2630,40 +2854,6 @@ out_release:
63705 }
63706
63707 /*
63708 - * This is like a special single-page "expand_{down|up}wards()",
63709 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
63710 - * doesn't hit another vma.
63711 - */
63712 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
63713 -{
63714 - address &= PAGE_MASK;
63715 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
63716 - struct vm_area_struct *prev = vma->vm_prev;
63717 -
63718 - /*
63719 - * Is there a mapping abutting this one below?
63720 - *
63721 - * That's only ok if it's the same stack mapping
63722 - * that has gotten split..
63723 - */
63724 - if (prev && prev->vm_end == address)
63725 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
63726 -
63727 - expand_stack(vma, address - PAGE_SIZE);
63728 - }
63729 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
63730 - struct vm_area_struct *next = vma->vm_next;
63731 -
63732 - /* As VM_GROWSDOWN but s/below/above/ */
63733 - if (next && next->vm_start == address + PAGE_SIZE)
63734 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
63735 -
63736 - expand_upwards(vma, address + PAGE_SIZE);
63737 - }
63738 - return 0;
63739 -}
63740 -
63741 -/*
63742 * We enter with non-exclusive mmap_sem (to exclude vma changes,
63743 * but allow concurrent faults), and pte mapped but not yet locked.
63744 * We return with mmap_sem still held, but pte unmapped and unlocked.
63745 @@ -2672,27 +2862,23 @@ static int do_anonymous_page(struct mm_s
63746 unsigned long address, pte_t *page_table, pmd_t *pmd,
63747 unsigned int flags)
63748 {
63749 - struct page *page;
63750 + struct page *page = NULL;
63751 spinlock_t *ptl;
63752 pte_t entry;
63753
63754 - pte_unmap(page_table);
63755 -
63756 - /* Check if we need to add a guard page to the stack */
63757 - if (check_stack_guard_page(vma, address) < 0)
63758 - return VM_FAULT_SIGBUS;
63759 -
63760 - /* Use the zero-page for reads */
63761 if (!(flags & FAULT_FLAG_WRITE)) {
63762 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
63763 vma->vm_page_prot));
63764 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
63765 + ptl = pte_lockptr(mm, pmd);
63766 + spin_lock(ptl);
63767 if (!pte_none(*page_table))
63768 goto unlock;
63769 goto setpte;
63770 }
63771
63772 /* Allocate our own private page. */
63773 + pte_unmap(page_table);
63774 +
63775 if (unlikely(anon_vma_prepare(vma)))
63776 goto oom;
63777 page = alloc_zeroed_user_highpage_movable(vma, address);
63778 @@ -2711,6 +2897,11 @@ static int do_anonymous_page(struct mm_s
63779 if (!pte_none(*page_table))
63780 goto release;
63781
63782 +#ifdef CONFIG_PAX_SEGMEXEC
63783 + if (pax_find_mirror_vma(vma))
63784 + BUG_ON(!trylock_page(page));
63785 +#endif
63786 +
63787 inc_mm_counter(mm, anon_rss);
63788 page_add_new_anon_rmap(page, vma, address);
63789 setpte:
63790 @@ -2718,6 +2909,12 @@ setpte:
63791
63792 /* No need to invalidate - it was non-present before */
63793 update_mmu_cache(vma, address, entry);
63794 +
63795 +#ifdef CONFIG_PAX_SEGMEXEC
63796 + if (page)
63797 + pax_mirror_anon_pte(vma, address, page, ptl);
63798 +#endif
63799 +
63800 unlock:
63801 pte_unmap_unlock(page_table, ptl);
63802 return 0;
63803 @@ -2860,6 +3057,12 @@ static int __do_fault(struct mm_struct *
63804 */
63805 /* Only go through if we didn't race with anybody else... */
63806 if (likely(pte_same(*page_table, orig_pte))) {
63807 +
63808 +#ifdef CONFIG_PAX_SEGMEXEC
63809 + if (anon && pax_find_mirror_vma(vma))
63810 + BUG_ON(!trylock_page(page));
63811 +#endif
63812 +
63813 flush_icache_page(vma, page);
63814 entry = mk_pte(page, vma->vm_page_prot);
63815 if (flags & FAULT_FLAG_WRITE)
63816 @@ -2879,6 +3082,14 @@ static int __do_fault(struct mm_struct *
63817
63818 /* no need to invalidate: a not-present page won't be cached */
63819 update_mmu_cache(vma, address, entry);
63820 +
63821 +#ifdef CONFIG_PAX_SEGMEXEC
63822 + if (anon)
63823 + pax_mirror_anon_pte(vma, address, page, ptl);
63824 + else
63825 + pax_mirror_file_pte(vma, address, page, ptl);
63826 +#endif
63827 +
63828 } else {
63829 if (charged)
63830 mem_cgroup_uncharge_page(page);
63831 @@ -3026,6 +3237,12 @@ static inline int handle_pte_fault(struc
63832 if (flags & FAULT_FLAG_WRITE)
63833 flush_tlb_page(vma, address);
63834 }
63835 +
63836 +#ifdef CONFIG_PAX_SEGMEXEC
63837 + pax_mirror_pte(vma, address, pte, pmd, ptl);
63838 + return 0;
63839 +#endif
63840 +
63841 unlock:
63842 pte_unmap_unlock(pte, ptl);
63843 return 0;
63844 @@ -3042,6 +3259,10 @@ int handle_mm_fault(struct mm_struct *mm
63845 pmd_t *pmd;
63846 pte_t *pte;
63847
63848 +#ifdef CONFIG_PAX_SEGMEXEC
63849 + struct vm_area_struct *vma_m;
63850 +#endif
63851 +
63852 __set_current_state(TASK_RUNNING);
63853
63854 count_vm_event(PGFAULT);
63855 @@ -3049,6 +3270,34 @@ int handle_mm_fault(struct mm_struct *mm
63856 if (unlikely(is_vm_hugetlb_page(vma)))
63857 return hugetlb_fault(mm, vma, address, flags);
63858
63859 +#ifdef CONFIG_PAX_SEGMEXEC
63860 + vma_m = pax_find_mirror_vma(vma);
63861 + if (vma_m) {
63862 + unsigned long address_m;
63863 + pgd_t *pgd_m;
63864 + pud_t *pud_m;
63865 + pmd_t *pmd_m;
63866 +
63867 + if (vma->vm_start > vma_m->vm_start) {
63868 + address_m = address;
63869 + address -= SEGMEXEC_TASK_SIZE;
63870 + vma = vma_m;
63871 + } else
63872 + address_m = address + SEGMEXEC_TASK_SIZE;
63873 +
63874 + pgd_m = pgd_offset(mm, address_m);
63875 + pud_m = pud_alloc(mm, pgd_m, address_m);
63876 + if (!pud_m)
63877 + return VM_FAULT_OOM;
63878 + pmd_m = pmd_alloc(mm, pud_m, address_m);
63879 + if (!pmd_m)
63880 + return VM_FAULT_OOM;
63881 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
63882 + return VM_FAULT_OOM;
63883 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
63884 + }
63885 +#endif
63886 +
63887 pgd = pgd_offset(mm, address);
63888 pud = pud_alloc(mm, pgd, address);
63889 if (!pud)
63890 @@ -3146,7 +3395,7 @@ static int __init gate_vma_init(void)
63891 gate_vma.vm_start = FIXADDR_USER_START;
63892 gate_vma.vm_end = FIXADDR_USER_END;
63893 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
63894 - gate_vma.vm_page_prot = __P101;
63895 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
63896 /*
63897 * Make sure the vDSO gets into every core dump.
63898 * Dumping its contents makes post-mortem fully interpretable later
63899 diff -urNp linux-2.6.32.41/mm/memory-failure.c linux-2.6.32.41/mm/memory-failure.c
63900 --- linux-2.6.32.41/mm/memory-failure.c 2011-03-27 14:31:47.000000000 -0400
63901 +++ linux-2.6.32.41/mm/memory-failure.c 2011-04-17 15:56:46.000000000 -0400
63902 @@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __r
63903
63904 int sysctl_memory_failure_recovery __read_mostly = 1;
63905
63906 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
63907 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
63908
63909 /*
63910 * Send all the processes who have the page mapped an ``action optional''
63911 @@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn,
63912 return 0;
63913 }
63914
63915 - atomic_long_add(1, &mce_bad_pages);
63916 + atomic_long_add_unchecked(1, &mce_bad_pages);
63917
63918 /*
63919 * We need/can do nothing about count=0 pages.
63920 diff -urNp linux-2.6.32.41/mm/mempolicy.c linux-2.6.32.41/mm/mempolicy.c
63921 --- linux-2.6.32.41/mm/mempolicy.c 2011-03-27 14:31:47.000000000 -0400
63922 +++ linux-2.6.32.41/mm/mempolicy.c 2011-04-17 15:56:46.000000000 -0400
63923 @@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_st
63924 struct vm_area_struct *next;
63925 int err;
63926
63927 +#ifdef CONFIG_PAX_SEGMEXEC
63928 + struct vm_area_struct *vma_m;
63929 +#endif
63930 +
63931 err = 0;
63932 for (; vma && vma->vm_start < end; vma = next) {
63933 next = vma->vm_next;
63934 @@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_st
63935 err = policy_vma(vma, new);
63936 if (err)
63937 break;
63938 +
63939 +#ifdef CONFIG_PAX_SEGMEXEC
63940 + vma_m = pax_find_mirror_vma(vma);
63941 + if (vma_m) {
63942 + err = policy_vma(vma_m, new);
63943 + if (err)
63944 + break;
63945 + }
63946 +#endif
63947 +
63948 }
63949 return err;
63950 }
63951 @@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start
63952
63953 if (end < start)
63954 return -EINVAL;
63955 +
63956 +#ifdef CONFIG_PAX_SEGMEXEC
63957 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
63958 + if (end > SEGMEXEC_TASK_SIZE)
63959 + return -EINVAL;
63960 + } else
63961 +#endif
63962 +
63963 + if (end > TASK_SIZE)
63964 + return -EINVAL;
63965 +
63966 if (end == start)
63967 return 0;
63968
63969 @@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
63970 if (!mm)
63971 return -EINVAL;
63972
63973 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63974 + if (mm != current->mm &&
63975 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
63976 + err = -EPERM;
63977 + goto out;
63978 + }
63979 +#endif
63980 +
63981 /*
63982 * Check if this process has the right to modify the specified
63983 * process. The right exists if the process has administrative
63984 @@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
63985 rcu_read_lock();
63986 tcred = __task_cred(task);
63987 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
63988 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
63989 - !capable(CAP_SYS_NICE)) {
63990 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
63991 rcu_read_unlock();
63992 err = -EPERM;
63993 goto out;
63994 @@ -2396,7 +2428,7 @@ int show_numa_map(struct seq_file *m, vo
63995
63996 if (file) {
63997 seq_printf(m, " file=");
63998 - seq_path(m, &file->f_path, "\n\t= ");
63999 + seq_path(m, &file->f_path, "\n\t\\= ");
64000 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
64001 seq_printf(m, " heap");
64002 } else if (vma->vm_start <= mm->start_stack &&
64003 diff -urNp linux-2.6.32.41/mm/migrate.c linux-2.6.32.41/mm/migrate.c
64004 --- linux-2.6.32.41/mm/migrate.c 2011-03-27 14:31:47.000000000 -0400
64005 +++ linux-2.6.32.41/mm/migrate.c 2011-05-16 21:46:57.000000000 -0400
64006 @@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struc
64007 unsigned long chunk_start;
64008 int err;
64009
64010 + pax_track_stack();
64011 +
64012 task_nodes = cpuset_mems_allowed(task);
64013
64014 err = -ENOMEM;
64015 @@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
64016 if (!mm)
64017 return -EINVAL;
64018
64019 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64020 + if (mm != current->mm &&
64021 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
64022 + err = -EPERM;
64023 + goto out;
64024 + }
64025 +#endif
64026 +
64027 /*
64028 * Check if this process has the right to modify the specified
64029 * process. The right exists if the process has administrative
64030 @@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
64031 rcu_read_lock();
64032 tcred = __task_cred(task);
64033 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
64034 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
64035 - !capable(CAP_SYS_NICE)) {
64036 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
64037 rcu_read_unlock();
64038 err = -EPERM;
64039 goto out;
64040 diff -urNp linux-2.6.32.41/mm/mlock.c linux-2.6.32.41/mm/mlock.c
64041 --- linux-2.6.32.41/mm/mlock.c 2011-03-27 14:31:47.000000000 -0400
64042 +++ linux-2.6.32.41/mm/mlock.c 2011-04-17 15:56:46.000000000 -0400
64043 @@ -13,6 +13,7 @@
64044 #include <linux/pagemap.h>
64045 #include <linux/mempolicy.h>
64046 #include <linux/syscalls.h>
64047 +#include <linux/security.h>
64048 #include <linux/sched.h>
64049 #include <linux/module.h>
64050 #include <linux/rmap.h>
64051 @@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
64052 }
64053 }
64054
64055 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
64056 -{
64057 - return (vma->vm_flags & VM_GROWSDOWN) &&
64058 - (vma->vm_start == addr) &&
64059 - !vma_stack_continue(vma->vm_prev, addr);
64060 -}
64061 -
64062 /**
64063 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
64064 * @vma: target vma
64065 @@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(stru
64066 if (vma->vm_flags & VM_WRITE)
64067 gup_flags |= FOLL_WRITE;
64068
64069 - /* We don't try to access the guard page of a stack vma */
64070 - if (stack_guard_page(vma, start)) {
64071 - addr += PAGE_SIZE;
64072 - nr_pages--;
64073 - }
64074 -
64075 while (nr_pages > 0) {
64076 int i;
64077
64078 @@ -440,7 +428,7 @@ static int do_mlock(unsigned long start,
64079 {
64080 unsigned long nstart, end, tmp;
64081 struct vm_area_struct * vma, * prev;
64082 - int error;
64083 + int error = -EINVAL;
64084
64085 len = PAGE_ALIGN(len);
64086 end = start + len;
64087 @@ -448,6 +436,9 @@ static int do_mlock(unsigned long start,
64088 return -EINVAL;
64089 if (end == start)
64090 return 0;
64091 + if (end > TASK_SIZE)
64092 + return -EINVAL;
64093 +
64094 vma = find_vma_prev(current->mm, start, &prev);
64095 if (!vma || vma->vm_start > start)
64096 return -ENOMEM;
64097 @@ -458,6 +449,11 @@ static int do_mlock(unsigned long start,
64098 for (nstart = start ; ; ) {
64099 unsigned int newflags;
64100
64101 +#ifdef CONFIG_PAX_SEGMEXEC
64102 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
64103 + break;
64104 +#endif
64105 +
64106 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
64107
64108 newflags = vma->vm_flags | VM_LOCKED;
64109 @@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
64110 lock_limit >>= PAGE_SHIFT;
64111
64112 /* check against resource limits */
64113 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
64114 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
64115 error = do_mlock(start, len, 1);
64116 up_write(&current->mm->mmap_sem);
64117 @@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
64118 static int do_mlockall(int flags)
64119 {
64120 struct vm_area_struct * vma, * prev = NULL;
64121 - unsigned int def_flags = 0;
64122
64123 if (flags & MCL_FUTURE)
64124 - def_flags = VM_LOCKED;
64125 - current->mm->def_flags = def_flags;
64126 + current->mm->def_flags |= VM_LOCKED;
64127 + else
64128 + current->mm->def_flags &= ~VM_LOCKED;
64129 if (flags == MCL_FUTURE)
64130 goto out;
64131
64132 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
64133 - unsigned int newflags;
64134 + unsigned long newflags;
64135 +
64136 +#ifdef CONFIG_PAX_SEGMEXEC
64137 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
64138 + break;
64139 +#endif
64140
64141 + BUG_ON(vma->vm_end > TASK_SIZE);
64142 newflags = vma->vm_flags | VM_LOCKED;
64143 if (!(flags & MCL_CURRENT))
64144 newflags &= ~VM_LOCKED;
64145 @@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
64146 lock_limit >>= PAGE_SHIFT;
64147
64148 ret = -ENOMEM;
64149 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
64150 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
64151 capable(CAP_IPC_LOCK))
64152 ret = do_mlockall(flags);
64153 diff -urNp linux-2.6.32.41/mm/mmap.c linux-2.6.32.41/mm/mmap.c
64154 --- linux-2.6.32.41/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
64155 +++ linux-2.6.32.41/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
64156 @@ -45,6 +45,16 @@
64157 #define arch_rebalance_pgtables(addr, len) (addr)
64158 #endif
64159
64160 +static inline void verify_mm_writelocked(struct mm_struct *mm)
64161 +{
64162 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
64163 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
64164 + up_read(&mm->mmap_sem);
64165 + BUG();
64166 + }
64167 +#endif
64168 +}
64169 +
64170 static void unmap_region(struct mm_struct *mm,
64171 struct vm_area_struct *vma, struct vm_area_struct *prev,
64172 unsigned long start, unsigned long end);
64173 @@ -70,22 +80,32 @@ static void unmap_region(struct mm_struc
64174 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
64175 *
64176 */
64177 -pgprot_t protection_map[16] = {
64178 +pgprot_t protection_map[16] __read_only = {
64179 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
64180 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
64181 };
64182
64183 pgprot_t vm_get_page_prot(unsigned long vm_flags)
64184 {
64185 - return __pgprot(pgprot_val(protection_map[vm_flags &
64186 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
64187 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
64188 pgprot_val(arch_vm_get_page_prot(vm_flags)));
64189 +
64190 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
64191 + if (!nx_enabled &&
64192 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
64193 + (vm_flags & (VM_READ | VM_WRITE)))
64194 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
64195 +#endif
64196 +
64197 + return prot;
64198 }
64199 EXPORT_SYMBOL(vm_get_page_prot);
64200
64201 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
64202 int sysctl_overcommit_ratio = 50; /* default is 50% */
64203 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
64204 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
64205 struct percpu_counter vm_committed_as;
64206
64207 /*
64208 @@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma
64209 struct vm_area_struct *next = vma->vm_next;
64210
64211 might_sleep();
64212 + BUG_ON(vma->vm_mirror);
64213 if (vma->vm_ops && vma->vm_ops->close)
64214 vma->vm_ops->close(vma);
64215 if (vma->vm_file) {
64216 @@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
64217 * not page aligned -Ram Gupta
64218 */
64219 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
64220 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
64221 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
64222 (mm->end_data - mm->start_data) > rlim)
64223 goto out;
64224 @@ -704,6 +726,12 @@ static int
64225 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
64226 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
64227 {
64228 +
64229 +#ifdef CONFIG_PAX_SEGMEXEC
64230 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
64231 + return 0;
64232 +#endif
64233 +
64234 if (is_mergeable_vma(vma, file, vm_flags) &&
64235 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
64236 if (vma->vm_pgoff == vm_pgoff)
64237 @@ -723,6 +751,12 @@ static int
64238 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
64239 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
64240 {
64241 +
64242 +#ifdef CONFIG_PAX_SEGMEXEC
64243 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
64244 + return 0;
64245 +#endif
64246 +
64247 if (is_mergeable_vma(vma, file, vm_flags) &&
64248 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
64249 pgoff_t vm_pglen;
64250 @@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struc
64251 struct vm_area_struct *vma_merge(struct mm_struct *mm,
64252 struct vm_area_struct *prev, unsigned long addr,
64253 unsigned long end, unsigned long vm_flags,
64254 - struct anon_vma *anon_vma, struct file *file,
64255 + struct anon_vma *anon_vma, struct file *file,
64256 pgoff_t pgoff, struct mempolicy *policy)
64257 {
64258 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
64259 struct vm_area_struct *area, *next;
64260
64261 +#ifdef CONFIG_PAX_SEGMEXEC
64262 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
64263 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
64264 +
64265 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
64266 +#endif
64267 +
64268 /*
64269 * We later require that vma->vm_flags == vm_flags,
64270 * so this tests vma->vm_flags & VM_SPECIAL, too.
64271 @@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct
64272 if (next && next->vm_end == end) /* cases 6, 7, 8 */
64273 next = next->vm_next;
64274
64275 +#ifdef CONFIG_PAX_SEGMEXEC
64276 + if (prev)
64277 + prev_m = pax_find_mirror_vma(prev);
64278 + if (area)
64279 + area_m = pax_find_mirror_vma(area);
64280 + if (next)
64281 + next_m = pax_find_mirror_vma(next);
64282 +#endif
64283 +
64284 /*
64285 * Can it merge with the predecessor?
64286 */
64287 @@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct
64288 /* cases 1, 6 */
64289 vma_adjust(prev, prev->vm_start,
64290 next->vm_end, prev->vm_pgoff, NULL);
64291 - } else /* cases 2, 5, 7 */
64292 +
64293 +#ifdef CONFIG_PAX_SEGMEXEC
64294 + if (prev_m)
64295 + vma_adjust(prev_m, prev_m->vm_start,
64296 + next_m->vm_end, prev_m->vm_pgoff, NULL);
64297 +#endif
64298 +
64299 + } else { /* cases 2, 5, 7 */
64300 vma_adjust(prev, prev->vm_start,
64301 end, prev->vm_pgoff, NULL);
64302 +
64303 +#ifdef CONFIG_PAX_SEGMEXEC
64304 + if (prev_m)
64305 + vma_adjust(prev_m, prev_m->vm_start,
64306 + end_m, prev_m->vm_pgoff, NULL);
64307 +#endif
64308 +
64309 + }
64310 return prev;
64311 }
64312
64313 @@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct
64314 mpol_equal(policy, vma_policy(next)) &&
64315 can_vma_merge_before(next, vm_flags,
64316 anon_vma, file, pgoff+pglen)) {
64317 - if (prev && addr < prev->vm_end) /* case 4 */
64318 + if (prev && addr < prev->vm_end) { /* case 4 */
64319 vma_adjust(prev, prev->vm_start,
64320 addr, prev->vm_pgoff, NULL);
64321 - else /* cases 3, 8 */
64322 +
64323 +#ifdef CONFIG_PAX_SEGMEXEC
64324 + if (prev_m)
64325 + vma_adjust(prev_m, prev_m->vm_start,
64326 + addr_m, prev_m->vm_pgoff, NULL);
64327 +#endif
64328 +
64329 + } else { /* cases 3, 8 */
64330 vma_adjust(area, addr, next->vm_end,
64331 next->vm_pgoff - pglen, NULL);
64332 +
64333 +#ifdef CONFIG_PAX_SEGMEXEC
64334 + if (area_m)
64335 + vma_adjust(area_m, addr_m, next_m->vm_end,
64336 + next_m->vm_pgoff - pglen, NULL);
64337 +#endif
64338 +
64339 + }
64340 return area;
64341 }
64342
64343 @@ -898,14 +978,11 @@ none:
64344 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
64345 struct file *file, long pages)
64346 {
64347 - const unsigned long stack_flags
64348 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
64349 -
64350 if (file) {
64351 mm->shared_vm += pages;
64352 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
64353 mm->exec_vm += pages;
64354 - } else if (flags & stack_flags)
64355 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
64356 mm->stack_vm += pages;
64357 if (flags & (VM_RESERVED|VM_IO))
64358 mm->reserved_vm += pages;
64359 @@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file
64360 * (the exception is when the underlying filesystem is noexec
64361 * mounted, in which case we dont add PROT_EXEC.)
64362 */
64363 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
64364 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
64365 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
64366 prot |= PROT_EXEC;
64367
64368 @@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file
64369 /* Obtain the address to map to. we verify (or select) it and ensure
64370 * that it represents a valid section of the address space.
64371 */
64372 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
64373 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
64374 if (addr & ~PAGE_MASK)
64375 return addr;
64376
64377 @@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file
64378 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
64379 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
64380
64381 +#ifdef CONFIG_PAX_MPROTECT
64382 + if (mm->pax_flags & MF_PAX_MPROTECT) {
64383 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
64384 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
64385 + gr_log_rwxmmap(file);
64386 +
64387 +#ifdef CONFIG_PAX_EMUPLT
64388 + vm_flags &= ~VM_EXEC;
64389 +#else
64390 + return -EPERM;
64391 +#endif
64392 +
64393 + }
64394 +
64395 + if (!(vm_flags & VM_EXEC))
64396 + vm_flags &= ~VM_MAYEXEC;
64397 +#else
64398 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
64399 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
64400 +#endif
64401 + else
64402 + vm_flags &= ~VM_MAYWRITE;
64403 + }
64404 +#endif
64405 +
64406 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
64407 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
64408 + vm_flags &= ~VM_PAGEEXEC;
64409 +#endif
64410 +
64411 if (flags & MAP_LOCKED)
64412 if (!can_do_mlock())
64413 return -EPERM;
64414 @@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file
64415 locked += mm->locked_vm;
64416 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
64417 lock_limit >>= PAGE_SHIFT;
64418 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
64419 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
64420 return -EAGAIN;
64421 }
64422 @@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file
64423 if (error)
64424 return error;
64425
64426 + if (!gr_acl_handle_mmap(file, prot))
64427 + return -EACCES;
64428 +
64429 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
64430 }
64431 EXPORT_SYMBOL(do_mmap_pgoff);
64432 @@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
64433 */
64434 int vma_wants_writenotify(struct vm_area_struct *vma)
64435 {
64436 - unsigned int vm_flags = vma->vm_flags;
64437 + unsigned long vm_flags = vma->vm_flags;
64438
64439 /* If it was private or non-writable, the write bit is already clear */
64440 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
64441 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
64442 return 0;
64443
64444 /* The backer wishes to know when pages are first written to? */
64445 @@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *f
64446 unsigned long charged = 0;
64447 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
64448
64449 +#ifdef CONFIG_PAX_SEGMEXEC
64450 + struct vm_area_struct *vma_m = NULL;
64451 +#endif
64452 +
64453 + /*
64454 + * mm->mmap_sem is required to protect against another thread
64455 + * changing the mappings in case we sleep.
64456 + */
64457 + verify_mm_writelocked(mm);
64458 +
64459 /* Clear old maps */
64460 error = -ENOMEM;
64461 -munmap_back:
64462 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
64463 if (vma && vma->vm_start < addr + len) {
64464 if (do_munmap(mm, addr, len))
64465 return -ENOMEM;
64466 - goto munmap_back;
64467 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
64468 + BUG_ON(vma && vma->vm_start < addr + len);
64469 }
64470
64471 /* Check against address space limit. */
64472 @@ -1173,6 +1294,16 @@ munmap_back:
64473 goto unacct_error;
64474 }
64475
64476 +#ifdef CONFIG_PAX_SEGMEXEC
64477 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
64478 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
64479 + if (!vma_m) {
64480 + error = -ENOMEM;
64481 + goto free_vma;
64482 + }
64483 + }
64484 +#endif
64485 +
64486 vma->vm_mm = mm;
64487 vma->vm_start = addr;
64488 vma->vm_end = addr + len;
64489 @@ -1195,6 +1326,19 @@ munmap_back:
64490 error = file->f_op->mmap(file, vma);
64491 if (error)
64492 goto unmap_and_free_vma;
64493 +
64494 +#ifdef CONFIG_PAX_SEGMEXEC
64495 + if (vma_m && (vm_flags & VM_EXECUTABLE))
64496 + added_exe_file_vma(mm);
64497 +#endif
64498 +
64499 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
64500 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
64501 + vma->vm_flags |= VM_PAGEEXEC;
64502 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
64503 + }
64504 +#endif
64505 +
64506 if (vm_flags & VM_EXECUTABLE)
64507 added_exe_file_vma(mm);
64508
64509 @@ -1218,6 +1362,11 @@ munmap_back:
64510 vma_link(mm, vma, prev, rb_link, rb_parent);
64511 file = vma->vm_file;
64512
64513 +#ifdef CONFIG_PAX_SEGMEXEC
64514 + if (vma_m)
64515 + pax_mirror_vma(vma_m, vma);
64516 +#endif
64517 +
64518 /* Once vma denies write, undo our temporary denial count */
64519 if (correct_wcount)
64520 atomic_inc(&inode->i_writecount);
64521 @@ -1226,6 +1375,7 @@ out:
64522
64523 mm->total_vm += len >> PAGE_SHIFT;
64524 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
64525 + track_exec_limit(mm, addr, addr + len, vm_flags);
64526 if (vm_flags & VM_LOCKED) {
64527 /*
64528 * makes pages present; downgrades, drops, reacquires mmap_sem
64529 @@ -1248,6 +1398,12 @@ unmap_and_free_vma:
64530 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
64531 charged = 0;
64532 free_vma:
64533 +
64534 +#ifdef CONFIG_PAX_SEGMEXEC
64535 + if (vma_m)
64536 + kmem_cache_free(vm_area_cachep, vma_m);
64537 +#endif
64538 +
64539 kmem_cache_free(vm_area_cachep, vma);
64540 unacct_error:
64541 if (charged)
64542 @@ -1255,6 +1411,44 @@ unacct_error:
64543 return error;
64544 }
64545
64546 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
64547 +{
64548 + if (!vma) {
64549 +#ifdef CONFIG_STACK_GROWSUP
64550 + if (addr > sysctl_heap_stack_gap)
64551 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
64552 + else
64553 + vma = find_vma(current->mm, 0);
64554 + if (vma && (vma->vm_flags & VM_GROWSUP))
64555 + return false;
64556 +#endif
64557 + return true;
64558 + }
64559 +
64560 + if (addr + len > vma->vm_start)
64561 + return false;
64562 +
64563 + if (vma->vm_flags & VM_GROWSDOWN)
64564 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
64565 +#ifdef CONFIG_STACK_GROWSUP
64566 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
64567 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
64568 +#endif
64569 +
64570 + return true;
64571 +}
64572 +
64573 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
64574 +{
64575 + if (vma->vm_start < len)
64576 + return -ENOMEM;
64577 + if (!(vma->vm_flags & VM_GROWSDOWN))
64578 + return vma->vm_start - len;
64579 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
64580 + return vma->vm_start - len - sysctl_heap_stack_gap;
64581 + return -ENOMEM;
64582 +}
64583 +
64584 /* Get an address range which is currently unmapped.
64585 * For shmat() with addr=0.
64586 *
64587 @@ -1281,18 +1475,23 @@ arch_get_unmapped_area(struct file *filp
64588 if (flags & MAP_FIXED)
64589 return addr;
64590
64591 +#ifdef CONFIG_PAX_RANDMMAP
64592 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
64593 +#endif
64594 +
64595 if (addr) {
64596 addr = PAGE_ALIGN(addr);
64597 - vma = find_vma(mm, addr);
64598 - if (TASK_SIZE - len >= addr &&
64599 - (!vma || addr + len <= vma->vm_start))
64600 - return addr;
64601 + if (TASK_SIZE - len >= addr) {
64602 + vma = find_vma(mm, addr);
64603 + if (check_heap_stack_gap(vma, addr, len))
64604 + return addr;
64605 + }
64606 }
64607 if (len > mm->cached_hole_size) {
64608 - start_addr = addr = mm->free_area_cache;
64609 + start_addr = addr = mm->free_area_cache;
64610 } else {
64611 - start_addr = addr = TASK_UNMAPPED_BASE;
64612 - mm->cached_hole_size = 0;
64613 + start_addr = addr = mm->mmap_base;
64614 + mm->cached_hole_size = 0;
64615 }
64616
64617 full_search:
64618 @@ -1303,34 +1502,40 @@ full_search:
64619 * Start a new search - just in case we missed
64620 * some holes.
64621 */
64622 - if (start_addr != TASK_UNMAPPED_BASE) {
64623 - addr = TASK_UNMAPPED_BASE;
64624 - start_addr = addr;
64625 + if (start_addr != mm->mmap_base) {
64626 + start_addr = addr = mm->mmap_base;
64627 mm->cached_hole_size = 0;
64628 goto full_search;
64629 }
64630 return -ENOMEM;
64631 }
64632 - if (!vma || addr + len <= vma->vm_start) {
64633 - /*
64634 - * Remember the place where we stopped the search:
64635 - */
64636 - mm->free_area_cache = addr + len;
64637 - return addr;
64638 - }
64639 + if (check_heap_stack_gap(vma, addr, len))
64640 + break;
64641 if (addr + mm->cached_hole_size < vma->vm_start)
64642 mm->cached_hole_size = vma->vm_start - addr;
64643 addr = vma->vm_end;
64644 }
64645 +
64646 + /*
64647 + * Remember the place where we stopped the search:
64648 + */
64649 + mm->free_area_cache = addr + len;
64650 + return addr;
64651 }
64652 #endif
64653
64654 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
64655 {
64656 +
64657 +#ifdef CONFIG_PAX_SEGMEXEC
64658 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
64659 + return;
64660 +#endif
64661 +
64662 /*
64663 * Is this a new hole at the lowest possible address?
64664 */
64665 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
64666 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
64667 mm->free_area_cache = addr;
64668 mm->cached_hole_size = ~0UL;
64669 }
64670 @@ -1348,7 +1553,7 @@ arch_get_unmapped_area_topdown(struct fi
64671 {
64672 struct vm_area_struct *vma;
64673 struct mm_struct *mm = current->mm;
64674 - unsigned long addr = addr0;
64675 + unsigned long base = mm->mmap_base, addr = addr0;
64676
64677 /* requested length too big for entire address space */
64678 if (len > TASK_SIZE)
64679 @@ -1357,13 +1562,18 @@ arch_get_unmapped_area_topdown(struct fi
64680 if (flags & MAP_FIXED)
64681 return addr;
64682
64683 +#ifdef CONFIG_PAX_RANDMMAP
64684 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
64685 +#endif
64686 +
64687 /* requesting a specific address */
64688 if (addr) {
64689 addr = PAGE_ALIGN(addr);
64690 - vma = find_vma(mm, addr);
64691 - if (TASK_SIZE - len >= addr &&
64692 - (!vma || addr + len <= vma->vm_start))
64693 - return addr;
64694 + if (TASK_SIZE - len >= addr) {
64695 + vma = find_vma(mm, addr);
64696 + if (check_heap_stack_gap(vma, addr, len))
64697 + return addr;
64698 + }
64699 }
64700
64701 /* check if free_area_cache is useful for us */
64702 @@ -1378,7 +1588,7 @@ arch_get_unmapped_area_topdown(struct fi
64703 /* make sure it can fit in the remaining address space */
64704 if (addr > len) {
64705 vma = find_vma(mm, addr-len);
64706 - if (!vma || addr <= vma->vm_start)
64707 + if (check_heap_stack_gap(vma, addr - len, len))
64708 /* remember the address as a hint for next time */
64709 return (mm->free_area_cache = addr-len);
64710 }
64711 @@ -1395,7 +1605,7 @@ arch_get_unmapped_area_topdown(struct fi
64712 * return with success:
64713 */
64714 vma = find_vma(mm, addr);
64715 - if (!vma || addr+len <= vma->vm_start)
64716 + if (check_heap_stack_gap(vma, addr, len))
64717 /* remember the address as a hint for next time */
64718 return (mm->free_area_cache = addr);
64719
64720 @@ -1404,8 +1614,8 @@ arch_get_unmapped_area_topdown(struct fi
64721 mm->cached_hole_size = vma->vm_start - addr;
64722
64723 /* try just below the current vma->vm_start */
64724 - addr = vma->vm_start-len;
64725 - } while (len < vma->vm_start);
64726 + addr = skip_heap_stack_gap(vma, len);
64727 + } while (!IS_ERR_VALUE(addr));
64728
64729 bottomup:
64730 /*
64731 @@ -1414,13 +1624,21 @@ bottomup:
64732 * can happen with large stack limits and large mmap()
64733 * allocations.
64734 */
64735 + mm->mmap_base = TASK_UNMAPPED_BASE;
64736 +
64737 +#ifdef CONFIG_PAX_RANDMMAP
64738 + if (mm->pax_flags & MF_PAX_RANDMMAP)
64739 + mm->mmap_base += mm->delta_mmap;
64740 +#endif
64741 +
64742 + mm->free_area_cache = mm->mmap_base;
64743 mm->cached_hole_size = ~0UL;
64744 - mm->free_area_cache = TASK_UNMAPPED_BASE;
64745 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
64746 /*
64747 * Restore the topdown base:
64748 */
64749 - mm->free_area_cache = mm->mmap_base;
64750 + mm->mmap_base = base;
64751 + mm->free_area_cache = base;
64752 mm->cached_hole_size = ~0UL;
64753
64754 return addr;
64755 @@ -1429,6 +1647,12 @@ bottomup:
64756
64757 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
64758 {
64759 +
64760 +#ifdef CONFIG_PAX_SEGMEXEC
64761 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
64762 + return;
64763 +#endif
64764 +
64765 /*
64766 * Is this a new hole at the highest possible address?
64767 */
64768 @@ -1436,8 +1660,10 @@ void arch_unmap_area_topdown(struct mm_s
64769 mm->free_area_cache = addr;
64770
64771 /* dont allow allocations above current base */
64772 - if (mm->free_area_cache > mm->mmap_base)
64773 + if (mm->free_area_cache > mm->mmap_base) {
64774 mm->free_area_cache = mm->mmap_base;
64775 + mm->cached_hole_size = ~0UL;
64776 + }
64777 }
64778
64779 unsigned long
64780 @@ -1545,6 +1771,27 @@ out:
64781 return prev ? prev->vm_next : vma;
64782 }
64783
64784 +#ifdef CONFIG_PAX_SEGMEXEC
64785 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
64786 +{
64787 + struct vm_area_struct *vma_m;
64788 +
64789 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
64790 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
64791 + BUG_ON(vma->vm_mirror);
64792 + return NULL;
64793 + }
64794 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
64795 + vma_m = vma->vm_mirror;
64796 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
64797 + BUG_ON(vma->vm_file != vma_m->vm_file);
64798 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
64799 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
64800 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
64801 + return vma_m;
64802 +}
64803 +#endif
64804 +
64805 /*
64806 * Verify that the stack growth is acceptable and
64807 * update accounting. This is shared with both the
64808 @@ -1561,6 +1808,7 @@ static int acct_stack_growth(struct vm_a
64809 return -ENOMEM;
64810
64811 /* Stack limit test */
64812 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
64813 if (size > rlim[RLIMIT_STACK].rlim_cur)
64814 return -ENOMEM;
64815
64816 @@ -1570,6 +1818,7 @@ static int acct_stack_growth(struct vm_a
64817 unsigned long limit;
64818 locked = mm->locked_vm + grow;
64819 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
64820 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
64821 if (locked > limit && !capable(CAP_IPC_LOCK))
64822 return -ENOMEM;
64823 }
64824 @@ -1600,37 +1849,48 @@ static int acct_stack_growth(struct vm_a
64825 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
64826 * vma is the last one with address > vma->vm_end. Have to extend vma.
64827 */
64828 +#ifndef CONFIG_IA64
64829 +static
64830 +#endif
64831 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
64832 {
64833 int error;
64834 + bool locknext;
64835
64836 if (!(vma->vm_flags & VM_GROWSUP))
64837 return -EFAULT;
64838
64839 + /* Also guard against wrapping around to address 0. */
64840 + if (address < PAGE_ALIGN(address+1))
64841 + address = PAGE_ALIGN(address+1);
64842 + else
64843 + return -ENOMEM;
64844 +
64845 /*
64846 * We must make sure the anon_vma is allocated
64847 * so that the anon_vma locking is not a noop.
64848 */
64849 if (unlikely(anon_vma_prepare(vma)))
64850 return -ENOMEM;
64851 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
64852 + if (locknext && anon_vma_prepare(vma->vm_next))
64853 + return -ENOMEM;
64854 anon_vma_lock(vma);
64855 + if (locknext)
64856 + anon_vma_lock(vma->vm_next);
64857
64858 /*
64859 * vma->vm_start/vm_end cannot change under us because the caller
64860 * is required to hold the mmap_sem in read mode. We need the
64861 - * anon_vma lock to serialize against concurrent expand_stacks.
64862 - * Also guard against wrapping around to address 0.
64863 + * anon_vma locks to serialize against concurrent expand_stacks
64864 + * and expand_upwards.
64865 */
64866 - if (address < PAGE_ALIGN(address+4))
64867 - address = PAGE_ALIGN(address+4);
64868 - else {
64869 - anon_vma_unlock(vma);
64870 - return -ENOMEM;
64871 - }
64872 error = 0;
64873
64874 /* Somebody else might have raced and expanded it already */
64875 - if (address > vma->vm_end) {
64876 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
64877 + error = -ENOMEM;
64878 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
64879 unsigned long size, grow;
64880
64881 size = address - vma->vm_start;
64882 @@ -1640,6 +1900,8 @@ int expand_upwards(struct vm_area_struct
64883 if (!error)
64884 vma->vm_end = address;
64885 }
64886 + if (locknext)
64887 + anon_vma_unlock(vma->vm_next);
64888 anon_vma_unlock(vma);
64889 return error;
64890 }
64891 @@ -1652,6 +1914,8 @@ static int expand_downwards(struct vm_ar
64892 unsigned long address)
64893 {
64894 int error;
64895 + bool lockprev = false;
64896 + struct vm_area_struct *prev;
64897
64898 /*
64899 * We must make sure the anon_vma is allocated
64900 @@ -1665,6 +1929,15 @@ static int expand_downwards(struct vm_ar
64901 if (error)
64902 return error;
64903
64904 + prev = vma->vm_prev;
64905 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
64906 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
64907 +#endif
64908 + if (lockprev && anon_vma_prepare(prev))
64909 + return -ENOMEM;
64910 + if (lockprev)
64911 + anon_vma_lock(prev);
64912 +
64913 anon_vma_lock(vma);
64914
64915 /*
64916 @@ -1674,9 +1947,17 @@ static int expand_downwards(struct vm_ar
64917 */
64918
64919 /* Somebody else might have raced and expanded it already */
64920 - if (address < vma->vm_start) {
64921 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
64922 + error = -ENOMEM;
64923 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
64924 unsigned long size, grow;
64925
64926 +#ifdef CONFIG_PAX_SEGMEXEC
64927 + struct vm_area_struct *vma_m;
64928 +
64929 + vma_m = pax_find_mirror_vma(vma);
64930 +#endif
64931 +
64932 size = vma->vm_end - address;
64933 grow = (vma->vm_start - address) >> PAGE_SHIFT;
64934
64935 @@ -1684,9 +1965,20 @@ static int expand_downwards(struct vm_ar
64936 if (!error) {
64937 vma->vm_start = address;
64938 vma->vm_pgoff -= grow;
64939 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
64940 +
64941 +#ifdef CONFIG_PAX_SEGMEXEC
64942 + if (vma_m) {
64943 + vma_m->vm_start -= grow << PAGE_SHIFT;
64944 + vma_m->vm_pgoff -= grow;
64945 + }
64946 +#endif
64947 +
64948 }
64949 }
64950 anon_vma_unlock(vma);
64951 + if (lockprev)
64952 + anon_vma_unlock(prev);
64953 return error;
64954 }
64955
64956 @@ -1762,6 +2054,13 @@ static void remove_vma_list(struct mm_st
64957 do {
64958 long nrpages = vma_pages(vma);
64959
64960 +#ifdef CONFIG_PAX_SEGMEXEC
64961 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
64962 + vma = remove_vma(vma);
64963 + continue;
64964 + }
64965 +#endif
64966 +
64967 mm->total_vm -= nrpages;
64968 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
64969 vma = remove_vma(vma);
64970 @@ -1807,6 +2106,16 @@ detach_vmas_to_be_unmapped(struct mm_str
64971 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
64972 vma->vm_prev = NULL;
64973 do {
64974 +
64975 +#ifdef CONFIG_PAX_SEGMEXEC
64976 + if (vma->vm_mirror) {
64977 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
64978 + vma->vm_mirror->vm_mirror = NULL;
64979 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
64980 + vma->vm_mirror = NULL;
64981 + }
64982 +#endif
64983 +
64984 rb_erase(&vma->vm_rb, &mm->mm_rb);
64985 mm->map_count--;
64986 tail_vma = vma;
64987 @@ -1834,10 +2143,25 @@ int split_vma(struct mm_struct * mm, str
64988 struct mempolicy *pol;
64989 struct vm_area_struct *new;
64990
64991 +#ifdef CONFIG_PAX_SEGMEXEC
64992 + struct vm_area_struct *vma_m, *new_m = NULL;
64993 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
64994 +#endif
64995 +
64996 if (is_vm_hugetlb_page(vma) && (addr &
64997 ~(huge_page_mask(hstate_vma(vma)))))
64998 return -EINVAL;
64999
65000 +#ifdef CONFIG_PAX_SEGMEXEC
65001 + vma_m = pax_find_mirror_vma(vma);
65002 +
65003 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
65004 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
65005 + if (mm->map_count >= sysctl_max_map_count-1)
65006 + return -ENOMEM;
65007 + } else
65008 +#endif
65009 +
65010 if (mm->map_count >= sysctl_max_map_count)
65011 return -ENOMEM;
65012
65013 @@ -1845,6 +2169,16 @@ int split_vma(struct mm_struct * mm, str
65014 if (!new)
65015 return -ENOMEM;
65016
65017 +#ifdef CONFIG_PAX_SEGMEXEC
65018 + if (vma_m) {
65019 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65020 + if (!new_m) {
65021 + kmem_cache_free(vm_area_cachep, new);
65022 + return -ENOMEM;
65023 + }
65024 + }
65025 +#endif
65026 +
65027 /* most fields are the same, copy all, and then fixup */
65028 *new = *vma;
65029
65030 @@ -1855,8 +2189,29 @@ int split_vma(struct mm_struct * mm, str
65031 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
65032 }
65033
65034 +#ifdef CONFIG_PAX_SEGMEXEC
65035 + if (vma_m) {
65036 + *new_m = *vma_m;
65037 + new_m->vm_mirror = new;
65038 + new->vm_mirror = new_m;
65039 +
65040 + if (new_below)
65041 + new_m->vm_end = addr_m;
65042 + else {
65043 + new_m->vm_start = addr_m;
65044 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
65045 + }
65046 + }
65047 +#endif
65048 +
65049 pol = mpol_dup(vma_policy(vma));
65050 if (IS_ERR(pol)) {
65051 +
65052 +#ifdef CONFIG_PAX_SEGMEXEC
65053 + if (new_m)
65054 + kmem_cache_free(vm_area_cachep, new_m);
65055 +#endif
65056 +
65057 kmem_cache_free(vm_area_cachep, new);
65058 return PTR_ERR(pol);
65059 }
65060 @@ -1877,6 +2232,28 @@ int split_vma(struct mm_struct * mm, str
65061 else
65062 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
65063
65064 +#ifdef CONFIG_PAX_SEGMEXEC
65065 + if (vma_m) {
65066 + mpol_get(pol);
65067 + vma_set_policy(new_m, pol);
65068 +
65069 + if (new_m->vm_file) {
65070 + get_file(new_m->vm_file);
65071 + if (vma_m->vm_flags & VM_EXECUTABLE)
65072 + added_exe_file_vma(mm);
65073 + }
65074 +
65075 + if (new_m->vm_ops && new_m->vm_ops->open)
65076 + new_m->vm_ops->open(new_m);
65077 +
65078 + if (new_below)
65079 + vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
65080 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
65081 + else
65082 + vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
65083 + }
65084 +#endif
65085 +
65086 return 0;
65087 }
65088
65089 @@ -1885,11 +2262,30 @@ int split_vma(struct mm_struct * mm, str
65090 * work. This now handles partial unmappings.
65091 * Jeremy Fitzhardinge <jeremy@goop.org>
65092 */
65093 +#ifdef CONFIG_PAX_SEGMEXEC
65094 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
65095 +{
65096 + int ret = __do_munmap(mm, start, len);
65097 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
65098 + return ret;
65099 +
65100 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
65101 +}
65102 +
65103 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
65104 +#else
65105 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
65106 +#endif
65107 {
65108 unsigned long end;
65109 struct vm_area_struct *vma, *prev, *last;
65110
65111 + /*
65112 + * mm->mmap_sem is required to protect against another thread
65113 + * changing the mappings in case we sleep.
65114 + */
65115 + verify_mm_writelocked(mm);
65116 +
65117 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
65118 return -EINVAL;
65119
65120 @@ -1953,6 +2349,8 @@ int do_munmap(struct mm_struct *mm, unsi
65121 /* Fix up all other VM information */
65122 remove_vma_list(mm, vma);
65123
65124 + track_exec_limit(mm, start, end, 0UL);
65125 +
65126 return 0;
65127 }
65128
65129 @@ -1965,22 +2363,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
65130
65131 profile_munmap(addr);
65132
65133 +#ifdef CONFIG_PAX_SEGMEXEC
65134 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
65135 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
65136 + return -EINVAL;
65137 +#endif
65138 +
65139 down_write(&mm->mmap_sem);
65140 ret = do_munmap(mm, addr, len);
65141 up_write(&mm->mmap_sem);
65142 return ret;
65143 }
65144
65145 -static inline void verify_mm_writelocked(struct mm_struct *mm)
65146 -{
65147 -#ifdef CONFIG_DEBUG_VM
65148 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
65149 - WARN_ON(1);
65150 - up_read(&mm->mmap_sem);
65151 - }
65152 -#endif
65153 -}
65154 -
65155 /*
65156 * this is really a simplified "do_mmap". it only handles
65157 * anonymous maps. eventually we may be able to do some
65158 @@ -1994,6 +2388,7 @@ unsigned long do_brk(unsigned long addr,
65159 struct rb_node ** rb_link, * rb_parent;
65160 pgoff_t pgoff = addr >> PAGE_SHIFT;
65161 int error;
65162 + unsigned long charged;
65163
65164 len = PAGE_ALIGN(len);
65165 if (!len)
65166 @@ -2005,16 +2400,30 @@ unsigned long do_brk(unsigned long addr,
65167
65168 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
65169
65170 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
65171 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
65172 + flags &= ~VM_EXEC;
65173 +
65174 +#ifdef CONFIG_PAX_MPROTECT
65175 + if (mm->pax_flags & MF_PAX_MPROTECT)
65176 + flags &= ~VM_MAYEXEC;
65177 +#endif
65178 +
65179 + }
65180 +#endif
65181 +
65182 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
65183 if (error & ~PAGE_MASK)
65184 return error;
65185
65186 + charged = len >> PAGE_SHIFT;
65187 +
65188 /*
65189 * mlock MCL_FUTURE?
65190 */
65191 if (mm->def_flags & VM_LOCKED) {
65192 unsigned long locked, lock_limit;
65193 - locked = len >> PAGE_SHIFT;
65194 + locked = charged;
65195 locked += mm->locked_vm;
65196 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
65197 lock_limit >>= PAGE_SHIFT;
65198 @@ -2031,22 +2440,22 @@ unsigned long do_brk(unsigned long addr,
65199 /*
65200 * Clear old maps. this also does some error checking for us
65201 */
65202 - munmap_back:
65203 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
65204 if (vma && vma->vm_start < addr + len) {
65205 if (do_munmap(mm, addr, len))
65206 return -ENOMEM;
65207 - goto munmap_back;
65208 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
65209 + BUG_ON(vma && vma->vm_start < addr + len);
65210 }
65211
65212 /* Check against address space limits *after* clearing old maps... */
65213 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
65214 + if (!may_expand_vm(mm, charged))
65215 return -ENOMEM;
65216
65217 if (mm->map_count > sysctl_max_map_count)
65218 return -ENOMEM;
65219
65220 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
65221 + if (security_vm_enough_memory(charged))
65222 return -ENOMEM;
65223
65224 /* Can we just expand an old private anonymous mapping? */
65225 @@ -2060,7 +2469,7 @@ unsigned long do_brk(unsigned long addr,
65226 */
65227 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65228 if (!vma) {
65229 - vm_unacct_memory(len >> PAGE_SHIFT);
65230 + vm_unacct_memory(charged);
65231 return -ENOMEM;
65232 }
65233
65234 @@ -2072,11 +2481,12 @@ unsigned long do_brk(unsigned long addr,
65235 vma->vm_page_prot = vm_get_page_prot(flags);
65236 vma_link(mm, vma, prev, rb_link, rb_parent);
65237 out:
65238 - mm->total_vm += len >> PAGE_SHIFT;
65239 + mm->total_vm += charged;
65240 if (flags & VM_LOCKED) {
65241 if (!mlock_vma_pages_range(vma, addr, addr + len))
65242 - mm->locked_vm += (len >> PAGE_SHIFT);
65243 + mm->locked_vm += charged;
65244 }
65245 + track_exec_limit(mm, addr, addr + len, flags);
65246 return addr;
65247 }
65248
65249 @@ -2123,8 +2533,10 @@ void exit_mmap(struct mm_struct *mm)
65250 * Walk the list again, actually closing and freeing it,
65251 * with preemption enabled, without holding any MM locks.
65252 */
65253 - while (vma)
65254 + while (vma) {
65255 + vma->vm_mirror = NULL;
65256 vma = remove_vma(vma);
65257 + }
65258
65259 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
65260 }
65261 @@ -2138,6 +2550,10 @@ int insert_vm_struct(struct mm_struct *
65262 struct vm_area_struct * __vma, * prev;
65263 struct rb_node ** rb_link, * rb_parent;
65264
65265 +#ifdef CONFIG_PAX_SEGMEXEC
65266 + struct vm_area_struct *vma_m = NULL;
65267 +#endif
65268 +
65269 /*
65270 * The vm_pgoff of a purely anonymous vma should be irrelevant
65271 * until its first write fault, when page's anon_vma and index
65272 @@ -2160,7 +2576,22 @@ int insert_vm_struct(struct mm_struct *
65273 if ((vma->vm_flags & VM_ACCOUNT) &&
65274 security_vm_enough_memory_mm(mm, vma_pages(vma)))
65275 return -ENOMEM;
65276 +
65277 +#ifdef CONFIG_PAX_SEGMEXEC
65278 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
65279 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65280 + if (!vma_m)
65281 + return -ENOMEM;
65282 + }
65283 +#endif
65284 +
65285 vma_link(mm, vma, prev, rb_link, rb_parent);
65286 +
65287 +#ifdef CONFIG_PAX_SEGMEXEC
65288 + if (vma_m)
65289 + pax_mirror_vma(vma_m, vma);
65290 +#endif
65291 +
65292 return 0;
65293 }
65294
65295 @@ -2178,6 +2609,8 @@ struct vm_area_struct *copy_vma(struct v
65296 struct rb_node **rb_link, *rb_parent;
65297 struct mempolicy *pol;
65298
65299 + BUG_ON(vma->vm_mirror);
65300 +
65301 /*
65302 * If anonymous vma has not yet been faulted, update new pgoff
65303 * to match new location, to increase its chance of merging.
65304 @@ -2221,6 +2654,35 @@ struct vm_area_struct *copy_vma(struct v
65305 return new_vma;
65306 }
65307
65308 +#ifdef CONFIG_PAX_SEGMEXEC
65309 +void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
65310 +{
65311 + struct vm_area_struct *prev_m;
65312 + struct rb_node **rb_link_m, *rb_parent_m;
65313 + struct mempolicy *pol_m;
65314 +
65315 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
65316 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
65317 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
65318 + *vma_m = *vma;
65319 + pol_m = vma_policy(vma_m);
65320 + mpol_get(pol_m);
65321 + vma_set_policy(vma_m, pol_m);
65322 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
65323 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
65324 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
65325 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
65326 + if (vma_m->vm_file)
65327 + get_file(vma_m->vm_file);
65328 + if (vma_m->vm_ops && vma_m->vm_ops->open)
65329 + vma_m->vm_ops->open(vma_m);
65330 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
65331 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
65332 + vma_m->vm_mirror = vma;
65333 + vma->vm_mirror = vma_m;
65334 +}
65335 +#endif
65336 +
65337 /*
65338 * Return true if the calling process may expand its vm space by the passed
65339 * number of pages
65340 @@ -2231,7 +2693,7 @@ int may_expand_vm(struct mm_struct *mm,
65341 unsigned long lim;
65342
65343 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
65344 -
65345 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
65346 if (cur + npages > lim)
65347 return 0;
65348 return 1;
65349 @@ -2301,6 +2763,22 @@ int install_special_mapping(struct mm_st
65350 vma->vm_start = addr;
65351 vma->vm_end = addr + len;
65352
65353 +#ifdef CONFIG_PAX_MPROTECT
65354 + if (mm->pax_flags & MF_PAX_MPROTECT) {
65355 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
65356 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
65357 + return -EPERM;
65358 + if (!(vm_flags & VM_EXEC))
65359 + vm_flags &= ~VM_MAYEXEC;
65360 +#else
65361 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
65362 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
65363 +#endif
65364 + else
65365 + vm_flags &= ~VM_MAYWRITE;
65366 + }
65367 +#endif
65368 +
65369 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
65370 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
65371
65372 diff -urNp linux-2.6.32.41/mm/mprotect.c linux-2.6.32.41/mm/mprotect.c
65373 --- linux-2.6.32.41/mm/mprotect.c 2011-03-27 14:31:47.000000000 -0400
65374 +++ linux-2.6.32.41/mm/mprotect.c 2011-04-17 15:56:46.000000000 -0400
65375 @@ -24,10 +24,16 @@
65376 #include <linux/mmu_notifier.h>
65377 #include <linux/migrate.h>
65378 #include <linux/perf_event.h>
65379 +
65380 +#ifdef CONFIG_PAX_MPROTECT
65381 +#include <linux/elf.h>
65382 +#endif
65383 +
65384 #include <asm/uaccess.h>
65385 #include <asm/pgtable.h>
65386 #include <asm/cacheflush.h>
65387 #include <asm/tlbflush.h>
65388 +#include <asm/mmu_context.h>
65389
65390 #ifndef pgprot_modify
65391 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
65392 @@ -132,6 +138,48 @@ static void change_protection(struct vm_
65393 flush_tlb_range(vma, start, end);
65394 }
65395
65396 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
65397 +/* called while holding the mmap semaphor for writing except stack expansion */
65398 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
65399 +{
65400 + unsigned long oldlimit, newlimit = 0UL;
65401 +
65402 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
65403 + return;
65404 +
65405 + spin_lock(&mm->page_table_lock);
65406 + oldlimit = mm->context.user_cs_limit;
65407 + if ((prot & VM_EXEC) && oldlimit < end)
65408 + /* USER_CS limit moved up */
65409 + newlimit = end;
65410 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
65411 + /* USER_CS limit moved down */
65412 + newlimit = start;
65413 +
65414 + if (newlimit) {
65415 + mm->context.user_cs_limit = newlimit;
65416 +
65417 +#ifdef CONFIG_SMP
65418 + wmb();
65419 + cpus_clear(mm->context.cpu_user_cs_mask);
65420 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
65421 +#endif
65422 +
65423 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
65424 + }
65425 + spin_unlock(&mm->page_table_lock);
65426 + if (newlimit == end) {
65427 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
65428 +
65429 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
65430 + if (is_vm_hugetlb_page(vma))
65431 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
65432 + else
65433 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
65434 + }
65435 +}
65436 +#endif
65437 +
65438 int
65439 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
65440 unsigned long start, unsigned long end, unsigned long newflags)
65441 @@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vm
65442 int error;
65443 int dirty_accountable = 0;
65444
65445 +#ifdef CONFIG_PAX_SEGMEXEC
65446 + struct vm_area_struct *vma_m = NULL;
65447 + unsigned long start_m, end_m;
65448 +
65449 + start_m = start + SEGMEXEC_TASK_SIZE;
65450 + end_m = end + SEGMEXEC_TASK_SIZE;
65451 +#endif
65452 +
65453 if (newflags == oldflags) {
65454 *pprev = vma;
65455 return 0;
65456 }
65457
65458 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
65459 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
65460 +
65461 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
65462 + return -ENOMEM;
65463 +
65464 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
65465 + return -ENOMEM;
65466 + }
65467 +
65468 /*
65469 * If we make a private mapping writable we increase our commit;
65470 * but (without finer accounting) cannot reduce our commit if we
65471 @@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vm
65472 }
65473 }
65474
65475 +#ifdef CONFIG_PAX_SEGMEXEC
65476 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
65477 + if (start != vma->vm_start) {
65478 + error = split_vma(mm, vma, start, 1);
65479 + if (error)
65480 + goto fail;
65481 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
65482 + *pprev = (*pprev)->vm_next;
65483 + }
65484 +
65485 + if (end != vma->vm_end) {
65486 + error = split_vma(mm, vma, end, 0);
65487 + if (error)
65488 + goto fail;
65489 + }
65490 +
65491 + if (pax_find_mirror_vma(vma)) {
65492 + error = __do_munmap(mm, start_m, end_m - start_m);
65493 + if (error)
65494 + goto fail;
65495 + } else {
65496 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65497 + if (!vma_m) {
65498 + error = -ENOMEM;
65499 + goto fail;
65500 + }
65501 + vma->vm_flags = newflags;
65502 + pax_mirror_vma(vma_m, vma);
65503 + }
65504 + }
65505 +#endif
65506 +
65507 /*
65508 * First try to merge with previous and/or next vma.
65509 */
65510 @@ -195,9 +293,21 @@ success:
65511 * vm_flags and vm_page_prot are protected by the mmap_sem
65512 * held in write mode.
65513 */
65514 +
65515 +#ifdef CONFIG_PAX_SEGMEXEC
65516 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
65517 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
65518 +#endif
65519 +
65520 vma->vm_flags = newflags;
65521 +
65522 +#ifdef CONFIG_PAX_MPROTECT
65523 + if (mm->binfmt && mm->binfmt->handle_mprotect)
65524 + mm->binfmt->handle_mprotect(vma, newflags);
65525 +#endif
65526 +
65527 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
65528 - vm_get_page_prot(newflags));
65529 + vm_get_page_prot(vma->vm_flags));
65530
65531 if (vma_wants_writenotify(vma)) {
65532 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
65533 @@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65534 end = start + len;
65535 if (end <= start)
65536 return -ENOMEM;
65537 +
65538 +#ifdef CONFIG_PAX_SEGMEXEC
65539 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
65540 + if (end > SEGMEXEC_TASK_SIZE)
65541 + return -EINVAL;
65542 + } else
65543 +#endif
65544 +
65545 + if (end > TASK_SIZE)
65546 + return -EINVAL;
65547 +
65548 if (!arch_validate_prot(prot))
65549 return -EINVAL;
65550
65551 @@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65552 /*
65553 * Does the application expect PROT_READ to imply PROT_EXEC:
65554 */
65555 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
65556 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
65557 prot |= PROT_EXEC;
65558
65559 vm_flags = calc_vm_prot_bits(prot);
65560 @@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65561 if (start > vma->vm_start)
65562 prev = vma;
65563
65564 +#ifdef CONFIG_PAX_MPROTECT
65565 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
65566 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
65567 +#endif
65568 +
65569 for (nstart = start ; ; ) {
65570 unsigned long newflags;
65571
65572 @@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65573
65574 /* newflags >> 4 shift VM_MAY% in place of VM_% */
65575 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
65576 + if (prot & (PROT_WRITE | PROT_EXEC))
65577 + gr_log_rwxmprotect(vma->vm_file);
65578 +
65579 + error = -EACCES;
65580 + goto out;
65581 + }
65582 +
65583 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
65584 error = -EACCES;
65585 goto out;
65586 }
65587 @@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65588 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
65589 if (error)
65590 goto out;
65591 +
65592 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
65593 +
65594 nstart = tmp;
65595
65596 if (nstart < prev->vm_end)
65597 diff -urNp linux-2.6.32.41/mm/mremap.c linux-2.6.32.41/mm/mremap.c
65598 --- linux-2.6.32.41/mm/mremap.c 2011-04-17 17:00:52.000000000 -0400
65599 +++ linux-2.6.32.41/mm/mremap.c 2011-04-17 17:03:58.000000000 -0400
65600 @@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_str
65601 continue;
65602 pte = ptep_clear_flush(vma, old_addr, old_pte);
65603 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
65604 +
65605 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
65606 + if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
65607 + pte = pte_exprotect(pte);
65608 +#endif
65609 +
65610 set_pte_at(mm, new_addr, new_pte, pte);
65611 }
65612
65613 @@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_res
65614 if (is_vm_hugetlb_page(vma))
65615 goto Einval;
65616
65617 +#ifdef CONFIG_PAX_SEGMEXEC
65618 + if (pax_find_mirror_vma(vma))
65619 + goto Einval;
65620 +#endif
65621 +
65622 /* We can't remap across vm area boundaries */
65623 if (old_len > vma->vm_end - addr)
65624 goto Efault;
65625 @@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned
65626 unsigned long ret = -EINVAL;
65627 unsigned long charged = 0;
65628 unsigned long map_flags;
65629 + unsigned long pax_task_size = TASK_SIZE;
65630
65631 if (new_addr & ~PAGE_MASK)
65632 goto out;
65633
65634 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
65635 +#ifdef CONFIG_PAX_SEGMEXEC
65636 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
65637 + pax_task_size = SEGMEXEC_TASK_SIZE;
65638 +#endif
65639 +
65640 + pax_task_size -= PAGE_SIZE;
65641 +
65642 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
65643 goto out;
65644
65645 /* Check if the location we're moving into overlaps the
65646 * old location at all, and fail if it does.
65647 */
65648 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
65649 - goto out;
65650 -
65651 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
65652 + if (addr + old_len > new_addr && new_addr + new_len > addr)
65653 goto out;
65654
65655 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
65656 @@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long ad
65657 struct vm_area_struct *vma;
65658 unsigned long ret = -EINVAL;
65659 unsigned long charged = 0;
65660 + unsigned long pax_task_size = TASK_SIZE;
65661
65662 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
65663 goto out;
65664 @@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long ad
65665 if (!new_len)
65666 goto out;
65667
65668 +#ifdef CONFIG_PAX_SEGMEXEC
65669 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
65670 + pax_task_size = SEGMEXEC_TASK_SIZE;
65671 +#endif
65672 +
65673 + pax_task_size -= PAGE_SIZE;
65674 +
65675 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
65676 + old_len > pax_task_size || addr > pax_task_size-old_len)
65677 + goto out;
65678 +
65679 if (flags & MREMAP_FIXED) {
65680 if (flags & MREMAP_MAYMOVE)
65681 ret = mremap_to(addr, old_len, new_addr, new_len);
65682 @@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long ad
65683 addr + new_len);
65684 }
65685 ret = addr;
65686 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
65687 goto out;
65688 }
65689 }
65690 @@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long ad
65691 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
65692 if (ret)
65693 goto out;
65694 +
65695 + map_flags = vma->vm_flags;
65696 ret = move_vma(vma, addr, old_len, new_len, new_addr);
65697 + if (!(ret & ~PAGE_MASK)) {
65698 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
65699 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
65700 + }
65701 }
65702 out:
65703 if (ret & ~PAGE_MASK)
65704 diff -urNp linux-2.6.32.41/mm/nommu.c linux-2.6.32.41/mm/nommu.c
65705 --- linux-2.6.32.41/mm/nommu.c 2011-03-27 14:31:47.000000000 -0400
65706 +++ linux-2.6.32.41/mm/nommu.c 2011-04-17 15:56:46.000000000 -0400
65707 @@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMI
65708 int sysctl_overcommit_ratio = 50; /* default is 50% */
65709 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
65710 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
65711 -int heap_stack_gap = 0;
65712
65713 atomic_long_t mmap_pages_allocated;
65714
65715 @@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct m
65716 EXPORT_SYMBOL(find_vma);
65717
65718 /*
65719 - * find a VMA
65720 - * - we don't extend stack VMAs under NOMMU conditions
65721 - */
65722 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
65723 -{
65724 - return find_vma(mm, addr);
65725 -}
65726 -
65727 -/*
65728 * expand a stack to a given address
65729 * - not supported under NOMMU conditions
65730 */
65731 diff -urNp linux-2.6.32.41/mm/page_alloc.c linux-2.6.32.41/mm/page_alloc.c
65732 --- linux-2.6.32.41/mm/page_alloc.c 2011-03-27 14:31:47.000000000 -0400
65733 +++ linux-2.6.32.41/mm/page_alloc.c 2011-05-16 21:46:57.000000000 -0400
65734 @@ -587,6 +587,10 @@ static void __free_pages_ok(struct page
65735 int bad = 0;
65736 int wasMlocked = __TestClearPageMlocked(page);
65737
65738 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
65739 + unsigned long index = 1UL << order;
65740 +#endif
65741 +
65742 kmemcheck_free_shadow(page, order);
65743
65744 for (i = 0 ; i < (1 << order) ; ++i)
65745 @@ -599,6 +603,12 @@ static void __free_pages_ok(struct page
65746 debug_check_no_obj_freed(page_address(page),
65747 PAGE_SIZE << order);
65748 }
65749 +
65750 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
65751 + for (; index; --index)
65752 + sanitize_highpage(page + index - 1);
65753 +#endif
65754 +
65755 arch_free_page(page, order);
65756 kernel_map_pages(page, 1 << order, 0);
65757
65758 @@ -702,8 +712,10 @@ static int prep_new_page(struct page *pa
65759 arch_alloc_page(page, order);
65760 kernel_map_pages(page, 1 << order, 1);
65761
65762 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
65763 if (gfp_flags & __GFP_ZERO)
65764 prep_zero_page(page, order, gfp_flags);
65765 +#endif
65766
65767 if (order && (gfp_flags & __GFP_COMP))
65768 prep_compound_page(page, order);
65769 @@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct pa
65770 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
65771 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
65772 }
65773 +
65774 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
65775 + sanitize_highpage(page);
65776 +#endif
65777 +
65778 arch_free_page(page, 0);
65779 kernel_map_pages(page, 1, 0);
65780
65781 @@ -2179,6 +2196,8 @@ void show_free_areas(void)
65782 int cpu;
65783 struct zone *zone;
65784
65785 + pax_track_stack();
65786 +
65787 for_each_populated_zone(zone) {
65788 show_node(zone);
65789 printk("%s per-cpu:\n", zone->name);
65790 @@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct p
65791 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
65792 }
65793 #else
65794 -static void inline setup_usemap(struct pglist_data *pgdat,
65795 +static inline void setup_usemap(struct pglist_data *pgdat,
65796 struct zone *zone, unsigned long zonesize) {}
65797 #endif /* CONFIG_SPARSEMEM */
65798
65799 diff -urNp linux-2.6.32.41/mm/percpu.c linux-2.6.32.41/mm/percpu.c
65800 --- linux-2.6.32.41/mm/percpu.c 2011-03-27 14:31:47.000000000 -0400
65801 +++ linux-2.6.32.41/mm/percpu.c 2011-04-17 15:56:46.000000000 -0400
65802 @@ -115,7 +115,7 @@ static unsigned int pcpu_first_unit_cpu
65803 static unsigned int pcpu_last_unit_cpu __read_mostly;
65804
65805 /* the address of the first chunk which starts with the kernel static area */
65806 -void *pcpu_base_addr __read_mostly;
65807 +void *pcpu_base_addr __read_only;
65808 EXPORT_SYMBOL_GPL(pcpu_base_addr);
65809
65810 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
65811 diff -urNp linux-2.6.32.41/mm/rmap.c linux-2.6.32.41/mm/rmap.c
65812 --- linux-2.6.32.41/mm/rmap.c 2011-03-27 14:31:47.000000000 -0400
65813 +++ linux-2.6.32.41/mm/rmap.c 2011-04-17 15:56:46.000000000 -0400
65814 @@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_stru
65815 /* page_table_lock to protect against threads */
65816 spin_lock(&mm->page_table_lock);
65817 if (likely(!vma->anon_vma)) {
65818 +
65819 +#ifdef CONFIG_PAX_SEGMEXEC
65820 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
65821 +
65822 + if (vma_m) {
65823 + BUG_ON(vma_m->anon_vma);
65824 + vma_m->anon_vma = anon_vma;
65825 + list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
65826 + }
65827 +#endif
65828 +
65829 vma->anon_vma = anon_vma;
65830 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
65831 allocated = NULL;
65832 diff -urNp linux-2.6.32.41/mm/shmem.c linux-2.6.32.41/mm/shmem.c
65833 --- linux-2.6.32.41/mm/shmem.c 2011-03-27 14:31:47.000000000 -0400
65834 +++ linux-2.6.32.41/mm/shmem.c 2011-05-18 20:09:37.000000000 -0400
65835 @@ -31,7 +31,7 @@
65836 #include <linux/swap.h>
65837 #include <linux/ima.h>
65838
65839 -static struct vfsmount *shm_mnt;
65840 +struct vfsmount *shm_mnt;
65841
65842 #ifdef CONFIG_SHMEM
65843 /*
65844 @@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *
65845 goto unlock;
65846 }
65847 entry = shmem_swp_entry(info, index, NULL);
65848 + if (!entry)
65849 + goto unlock;
65850 if (entry->val) {
65851 /*
65852 * The more uptodate page coming down from a stacked
65853 @@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_ent
65854 struct vm_area_struct pvma;
65855 struct page *page;
65856
65857 + pax_track_stack();
65858 +
65859 spol = mpol_cond_copy(&mpol,
65860 mpol_shared_policy_lookup(&info->policy, idx));
65861
65862 @@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *d
65863
65864 info = SHMEM_I(inode);
65865 inode->i_size = len-1;
65866 - if (len <= (char *)inode - (char *)info) {
65867 + if (len <= (char *)inode - (char *)info && len <= 64) {
65868 /* do it inline */
65869 memcpy(info, symname, len);
65870 inode->i_op = &shmem_symlink_inline_operations;
65871 @@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block
65872 int err = -ENOMEM;
65873
65874 /* Round up to L1_CACHE_BYTES to resist false sharing */
65875 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
65876 - L1_CACHE_BYTES), GFP_KERNEL);
65877 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
65878 if (!sbinfo)
65879 return -ENOMEM;
65880
65881 diff -urNp linux-2.6.32.41/mm/slab.c linux-2.6.32.41/mm/slab.c
65882 --- linux-2.6.32.41/mm/slab.c 2011-03-27 14:31:47.000000000 -0400
65883 +++ linux-2.6.32.41/mm/slab.c 2011-05-04 17:56:20.000000000 -0400
65884 @@ -174,7 +174,7 @@
65885
65886 /* Legal flag mask for kmem_cache_create(). */
65887 #if DEBUG
65888 -# define CREATE_MASK (SLAB_RED_ZONE | \
65889 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
65890 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
65891 SLAB_CACHE_DMA | \
65892 SLAB_STORE_USER | \
65893 @@ -182,7 +182,7 @@
65894 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
65895 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
65896 #else
65897 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
65898 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
65899 SLAB_CACHE_DMA | \
65900 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
65901 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
65902 @@ -308,7 +308,7 @@ struct kmem_list3 {
65903 * Need this for bootstrapping a per node allocator.
65904 */
65905 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
65906 -struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
65907 +struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
65908 #define CACHE_CACHE 0
65909 #define SIZE_AC MAX_NUMNODES
65910 #define SIZE_L3 (2 * MAX_NUMNODES)
65911 @@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_
65912 if ((x)->max_freeable < i) \
65913 (x)->max_freeable = i; \
65914 } while (0)
65915 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
65916 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
65917 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
65918 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
65919 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
65920 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
65921 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
65922 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
65923 #else
65924 #define STATS_INC_ACTIVE(x) do { } while (0)
65925 #define STATS_DEC_ACTIVE(x) do { } while (0)
65926 @@ -558,7 +558,7 @@ static inline void *index_to_obj(struct
65927 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
65928 */
65929 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
65930 - const struct slab *slab, void *obj)
65931 + const struct slab *slab, const void *obj)
65932 {
65933 u32 offset = (obj - slab->s_mem);
65934 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
65935 @@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
65936 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
65937 sizes[INDEX_AC].cs_size,
65938 ARCH_KMALLOC_MINALIGN,
65939 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
65940 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
65941 NULL);
65942
65943 if (INDEX_AC != INDEX_L3) {
65944 @@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
65945 kmem_cache_create(names[INDEX_L3].name,
65946 sizes[INDEX_L3].cs_size,
65947 ARCH_KMALLOC_MINALIGN,
65948 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
65949 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
65950 NULL);
65951 }
65952
65953 @@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
65954 sizes->cs_cachep = kmem_cache_create(names->name,
65955 sizes->cs_size,
65956 ARCH_KMALLOC_MINALIGN,
65957 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
65958 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
65959 NULL);
65960 }
65961 #ifdef CONFIG_ZONE_DMA
65962 @@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, vo
65963 }
65964 /* cpu stats */
65965 {
65966 - unsigned long allochit = atomic_read(&cachep->allochit);
65967 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
65968 - unsigned long freehit = atomic_read(&cachep->freehit);
65969 - unsigned long freemiss = atomic_read(&cachep->freemiss);
65970 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
65971 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
65972 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
65973 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
65974
65975 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
65976 allochit, allocmiss, freehit, freemiss);
65977 @@ -4471,15 +4471,66 @@ static const struct file_operations proc
65978
65979 static int __init slab_proc_init(void)
65980 {
65981 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
65982 + mode_t gr_mode = S_IRUGO;
65983 +
65984 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
65985 + gr_mode = S_IRUSR;
65986 +#endif
65987 +
65988 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
65989 #ifdef CONFIG_DEBUG_SLAB_LEAK
65990 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
65991 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
65992 #endif
65993 return 0;
65994 }
65995 module_init(slab_proc_init);
65996 #endif
65997
65998 +void check_object_size(const void *ptr, unsigned long n, bool to)
65999 +{
66000 +
66001 +#ifdef CONFIG_PAX_USERCOPY
66002 + struct page *page;
66003 + struct kmem_cache *cachep = NULL;
66004 + struct slab *slabp;
66005 + unsigned int objnr;
66006 + unsigned long offset;
66007 +
66008 + if (!n)
66009 + return;
66010 +
66011 + if (ZERO_OR_NULL_PTR(ptr))
66012 + goto report;
66013 +
66014 + if (!virt_addr_valid(ptr))
66015 + return;
66016 +
66017 + page = virt_to_head_page(ptr);
66018 +
66019 + if (!PageSlab(page)) {
66020 + if (object_is_on_stack(ptr, n) == -1)
66021 + goto report;
66022 + return;
66023 + }
66024 +
66025 + cachep = page_get_cache(page);
66026 + if (!(cachep->flags & SLAB_USERCOPY))
66027 + goto report;
66028 +
66029 + slabp = page_get_slab(page);
66030 + objnr = obj_to_index(cachep, slabp, ptr);
66031 + BUG_ON(objnr >= cachep->num);
66032 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
66033 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
66034 + return;
66035 +
66036 +report:
66037 + pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
66038 +#endif
66039 +
66040 +}
66041 +EXPORT_SYMBOL(check_object_size);
66042 +
66043 /**
66044 * ksize - get the actual amount of memory allocated for a given object
66045 * @objp: Pointer to the object
66046 diff -urNp linux-2.6.32.41/mm/slob.c linux-2.6.32.41/mm/slob.c
66047 --- linux-2.6.32.41/mm/slob.c 2011-03-27 14:31:47.000000000 -0400
66048 +++ linux-2.6.32.41/mm/slob.c 2011-04-17 15:56:46.000000000 -0400
66049 @@ -29,7 +29,7 @@
66050 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
66051 * alloc_pages() directly, allocating compound pages so the page order
66052 * does not have to be separately tracked, and also stores the exact
66053 - * allocation size in page->private so that it can be used to accurately
66054 + * allocation size in slob_page->size so that it can be used to accurately
66055 * provide ksize(). These objects are detected in kfree() because slob_page()
66056 * is false for them.
66057 *
66058 @@ -58,6 +58,7 @@
66059 */
66060
66061 #include <linux/kernel.h>
66062 +#include <linux/sched.h>
66063 #include <linux/slab.h>
66064 #include <linux/mm.h>
66065 #include <linux/swap.h> /* struct reclaim_state */
66066 @@ -100,7 +101,8 @@ struct slob_page {
66067 unsigned long flags; /* mandatory */
66068 atomic_t _count; /* mandatory */
66069 slobidx_t units; /* free units left in page */
66070 - unsigned long pad[2];
66071 + unsigned long pad[1];
66072 + unsigned long size; /* size when >=PAGE_SIZE */
66073 slob_t *free; /* first free slob_t in page */
66074 struct list_head list; /* linked list of free pages */
66075 };
66076 @@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
66077 */
66078 static inline int is_slob_page(struct slob_page *sp)
66079 {
66080 - return PageSlab((struct page *)sp);
66081 + return PageSlab((struct page *)sp) && !sp->size;
66082 }
66083
66084 static inline void set_slob_page(struct slob_page *sp)
66085 @@ -148,7 +150,7 @@ static inline void clear_slob_page(struc
66086
66087 static inline struct slob_page *slob_page(const void *addr)
66088 {
66089 - return (struct slob_page *)virt_to_page(addr);
66090 + return (struct slob_page *)virt_to_head_page(addr);
66091 }
66092
66093 /*
66094 @@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_
66095 /*
66096 * Return the size of a slob block.
66097 */
66098 -static slobidx_t slob_units(slob_t *s)
66099 +static slobidx_t slob_units(const slob_t *s)
66100 {
66101 if (s->units > 0)
66102 return s->units;
66103 @@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
66104 /*
66105 * Return the next free slob block pointer after this one.
66106 */
66107 -static slob_t *slob_next(slob_t *s)
66108 +static slob_t *slob_next(const slob_t *s)
66109 {
66110 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
66111 slobidx_t next;
66112 @@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
66113 /*
66114 * Returns true if s is the last free block in its page.
66115 */
66116 -static int slob_last(slob_t *s)
66117 +static int slob_last(const slob_t *s)
66118 {
66119 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
66120 }
66121 @@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, i
66122 if (!page)
66123 return NULL;
66124
66125 + set_slob_page(page);
66126 return page_address(page);
66127 }
66128
66129 @@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp
66130 if (!b)
66131 return NULL;
66132 sp = slob_page(b);
66133 - set_slob_page(sp);
66134
66135 spin_lock_irqsave(&slob_lock, flags);
66136 sp->units = SLOB_UNITS(PAGE_SIZE);
66137 sp->free = b;
66138 + sp->size = 0;
66139 INIT_LIST_HEAD(&sp->list);
66140 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
66141 set_slob_page_free(sp, slob_list);
66142 @@ -475,10 +478,9 @@ out:
66143 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
66144 #endif
66145
66146 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
66147 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
66148 {
66149 - unsigned int *m;
66150 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66151 + slob_t *m;
66152 void *ret;
66153
66154 lockdep_trace_alloc(gfp);
66155 @@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t
66156
66157 if (!m)
66158 return NULL;
66159 - *m = size;
66160 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
66161 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
66162 + m[0].units = size;
66163 + m[1].units = align;
66164 ret = (void *)m + align;
66165
66166 trace_kmalloc_node(_RET_IP_, ret,
66167 @@ -501,9 +506,9 @@ void *__kmalloc_node(size_t size, gfp_t
66168
66169 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
66170 if (ret) {
66171 - struct page *page;
66172 - page = virt_to_page(ret);
66173 - page->private = size;
66174 + struct slob_page *sp;
66175 + sp = slob_page(ret);
66176 + sp->size = size;
66177 }
66178
66179 trace_kmalloc_node(_RET_IP_, ret,
66180 @@ -513,6 +518,13 @@ void *__kmalloc_node(size_t size, gfp_t
66181 kmemleak_alloc(ret, size, 1, gfp);
66182 return ret;
66183 }
66184 +
66185 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
66186 +{
66187 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66188 +
66189 + return __kmalloc_node_align(size, gfp, node, align);
66190 +}
66191 EXPORT_SYMBOL(__kmalloc_node);
66192
66193 void kfree(const void *block)
66194 @@ -528,13 +540,81 @@ void kfree(const void *block)
66195 sp = slob_page(block);
66196 if (is_slob_page(sp)) {
66197 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66198 - unsigned int *m = (unsigned int *)(block - align);
66199 - slob_free(m, *m + align);
66200 - } else
66201 + slob_t *m = (slob_t *)(block - align);
66202 + slob_free(m, m[0].units + align);
66203 + } else {
66204 + clear_slob_page(sp);
66205 + free_slob_page(sp);
66206 + sp->size = 0;
66207 put_page(&sp->page);
66208 + }
66209 }
66210 EXPORT_SYMBOL(kfree);
66211
66212 +void check_object_size(const void *ptr, unsigned long n, bool to)
66213 +{
66214 +
66215 +#ifdef CONFIG_PAX_USERCOPY
66216 + struct slob_page *sp;
66217 + const slob_t *free;
66218 + const void *base;
66219 +
66220 + if (!n)
66221 + return;
66222 +
66223 + if (ZERO_OR_NULL_PTR(ptr))
66224 + goto report;
66225 +
66226 + if (!virt_addr_valid(ptr))
66227 + return;
66228 +
66229 + sp = slob_page(ptr);
66230 + if (!PageSlab((struct page*)sp)) {
66231 + if (object_is_on_stack(ptr, n) == -1)
66232 + goto report;
66233 + return;
66234 + }
66235 +
66236 + if (sp->size) {
66237 + base = page_address(&sp->page);
66238 + if (base <= ptr && n <= sp->size - (ptr - base))
66239 + return;
66240 + goto report;
66241 + }
66242 +
66243 + /* some tricky double walking to find the chunk */
66244 + base = (void *)((unsigned long)ptr & PAGE_MASK);
66245 + free = sp->free;
66246 +
66247 + while (!slob_last(free) && (void *)free <= ptr) {
66248 + base = free + slob_units(free);
66249 + free = slob_next(free);
66250 + }
66251 +
66252 + while (base < (void *)free) {
66253 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
66254 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
66255 + int offset;
66256 +
66257 + if (ptr < base + align)
66258 + goto report;
66259 +
66260 + offset = ptr - base - align;
66261 + if (offset < m) {
66262 + if (n <= m - offset)
66263 + return;
66264 + goto report;
66265 + }
66266 + base += size;
66267 + }
66268 +
66269 +report:
66270 + pax_report_usercopy(ptr, n, to, NULL);
66271 +#endif
66272 +
66273 +}
66274 +EXPORT_SYMBOL(check_object_size);
66275 +
66276 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
66277 size_t ksize(const void *block)
66278 {
66279 @@ -547,10 +627,10 @@ size_t ksize(const void *block)
66280 sp = slob_page(block);
66281 if (is_slob_page(sp)) {
66282 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66283 - unsigned int *m = (unsigned int *)(block - align);
66284 - return SLOB_UNITS(*m) * SLOB_UNIT;
66285 + slob_t *m = (slob_t *)(block - align);
66286 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
66287 } else
66288 - return sp->page.private;
66289 + return sp->size;
66290 }
66291 EXPORT_SYMBOL(ksize);
66292
66293 @@ -605,17 +685,25 @@ void *kmem_cache_alloc_node(struct kmem_
66294 {
66295 void *b;
66296
66297 +#ifdef CONFIG_PAX_USERCOPY
66298 + b = __kmalloc_node_align(c->size, flags, node, c->align);
66299 +#else
66300 if (c->size < PAGE_SIZE) {
66301 b = slob_alloc(c->size, flags, c->align, node);
66302 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
66303 SLOB_UNITS(c->size) * SLOB_UNIT,
66304 flags, node);
66305 } else {
66306 + struct slob_page *sp;
66307 +
66308 b = slob_new_pages(flags, get_order(c->size), node);
66309 + sp = slob_page(b);
66310 + sp->size = c->size;
66311 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
66312 PAGE_SIZE << get_order(c->size),
66313 flags, node);
66314 }
66315 +#endif
66316
66317 if (c->ctor)
66318 c->ctor(b);
66319 @@ -627,10 +715,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
66320
66321 static void __kmem_cache_free(void *b, int size)
66322 {
66323 - if (size < PAGE_SIZE)
66324 + struct slob_page *sp = slob_page(b);
66325 +
66326 + if (is_slob_page(sp))
66327 slob_free(b, size);
66328 - else
66329 + else {
66330 + clear_slob_page(sp);
66331 + free_slob_page(sp);
66332 + sp->size = 0;
66333 slob_free_pages(b, get_order(size));
66334 + }
66335 }
66336
66337 static void kmem_rcu_free(struct rcu_head *head)
66338 @@ -643,15 +737,24 @@ static void kmem_rcu_free(struct rcu_hea
66339
66340 void kmem_cache_free(struct kmem_cache *c, void *b)
66341 {
66342 + int size = c->size;
66343 +
66344 +#ifdef CONFIG_PAX_USERCOPY
66345 + if (size + c->align < PAGE_SIZE) {
66346 + size += c->align;
66347 + b -= c->align;
66348 + }
66349 +#endif
66350 +
66351 kmemleak_free_recursive(b, c->flags);
66352 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
66353 struct slob_rcu *slob_rcu;
66354 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
66355 + slob_rcu = b + (size - sizeof(struct slob_rcu));
66356 INIT_RCU_HEAD(&slob_rcu->head);
66357 - slob_rcu->size = c->size;
66358 + slob_rcu->size = size;
66359 call_rcu(&slob_rcu->head, kmem_rcu_free);
66360 } else {
66361 - __kmem_cache_free(b, c->size);
66362 + __kmem_cache_free(b, size);
66363 }
66364
66365 trace_kmem_cache_free(_RET_IP_, b);
66366 diff -urNp linux-2.6.32.41/mm/slub.c linux-2.6.32.41/mm/slub.c
66367 --- linux-2.6.32.41/mm/slub.c 2011-03-27 14:31:47.000000000 -0400
66368 +++ linux-2.6.32.41/mm/slub.c 2011-04-17 15:56:46.000000000 -0400
66369 @@ -410,7 +410,7 @@ static void print_track(const char *s, s
66370 if (!t->addr)
66371 return;
66372
66373 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
66374 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
66375 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
66376 }
66377
66378 @@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *
66379
66380 page = virt_to_head_page(x);
66381
66382 + BUG_ON(!PageSlab(page));
66383 +
66384 slab_free(s, page, x, _RET_IP_);
66385
66386 trace_kmem_cache_free(_RET_IP_, x);
66387 @@ -1937,7 +1939,7 @@ static int slub_min_objects;
66388 * Merge control. If this is set then no merging of slab caches will occur.
66389 * (Could be removed. This was introduced to pacify the merge skeptics.)
66390 */
66391 -static int slub_nomerge;
66392 +static int slub_nomerge = 1;
66393
66394 /*
66395 * Calculate the order of allocation given an slab object size.
66396 @@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_c
66397 * list to avoid pounding the page allocator excessively.
66398 */
66399 set_min_partial(s, ilog2(s->size));
66400 - s->refcount = 1;
66401 + atomic_set(&s->refcount, 1);
66402 #ifdef CONFIG_NUMA
66403 s->remote_node_defrag_ratio = 1000;
66404 #endif
66405 @@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struc
66406 void kmem_cache_destroy(struct kmem_cache *s)
66407 {
66408 down_write(&slub_lock);
66409 - s->refcount--;
66410 - if (!s->refcount) {
66411 + if (atomic_dec_and_test(&s->refcount)) {
66412 list_del(&s->list);
66413 up_write(&slub_lock);
66414 if (kmem_cache_close(s)) {
66415 @@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(cha
66416 __setup("slub_nomerge", setup_slub_nomerge);
66417
66418 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
66419 - const char *name, int size, gfp_t gfp_flags)
66420 + const char *name, int size, gfp_t gfp_flags, unsigned int flags)
66421 {
66422 - unsigned int flags = 0;
66423 -
66424 if (gfp_flags & SLUB_DMA)
66425 - flags = SLAB_CACHE_DMA;
66426 + flags |= SLAB_CACHE_DMA;
66427
66428 /*
66429 * This function is called with IRQs disabled during early-boot on
66430 @@ -2915,6 +2914,46 @@ void *__kmalloc_node(size_t size, gfp_t
66431 EXPORT_SYMBOL(__kmalloc_node);
66432 #endif
66433
66434 +void check_object_size(const void *ptr, unsigned long n, bool to)
66435 +{
66436 +
66437 +#ifdef CONFIG_PAX_USERCOPY
66438 + struct page *page;
66439 + struct kmem_cache *s = NULL;
66440 + unsigned long offset;
66441 +
66442 + if (!n)
66443 + return;
66444 +
66445 + if (ZERO_OR_NULL_PTR(ptr))
66446 + goto report;
66447 +
66448 + if (!virt_addr_valid(ptr))
66449 + return;
66450 +
66451 + page = get_object_page(ptr);
66452 +
66453 + if (!page) {
66454 + if (object_is_on_stack(ptr, n) == -1)
66455 + goto report;
66456 + return;
66457 + }
66458 +
66459 + s = page->slab;
66460 + if (!(s->flags & SLAB_USERCOPY))
66461 + goto report;
66462 +
66463 + offset = (ptr - page_address(page)) % s->size;
66464 + if (offset <= s->objsize && n <= s->objsize - offset)
66465 + return;
66466 +
66467 +report:
66468 + pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
66469 +#endif
66470 +
66471 +}
66472 +EXPORT_SYMBOL(check_object_size);
66473 +
66474 size_t ksize(const void *object)
66475 {
66476 struct page *page;
66477 @@ -3185,8 +3224,8 @@ void __init kmem_cache_init(void)
66478 * kmem_cache_open for slab_state == DOWN.
66479 */
66480 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
66481 - sizeof(struct kmem_cache_node), GFP_NOWAIT);
66482 - kmalloc_caches[0].refcount = -1;
66483 + sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
66484 + atomic_set(&kmalloc_caches[0].refcount, -1);
66485 caches++;
66486
66487 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
66488 @@ -3198,18 +3237,18 @@ void __init kmem_cache_init(void)
66489 /* Caches that are not of the two-to-the-power-of size */
66490 if (KMALLOC_MIN_SIZE <= 32) {
66491 create_kmalloc_cache(&kmalloc_caches[1],
66492 - "kmalloc-96", 96, GFP_NOWAIT);
66493 + "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
66494 caches++;
66495 }
66496 if (KMALLOC_MIN_SIZE <= 64) {
66497 create_kmalloc_cache(&kmalloc_caches[2],
66498 - "kmalloc-192", 192, GFP_NOWAIT);
66499 + "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
66500 caches++;
66501 }
66502
66503 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
66504 create_kmalloc_cache(&kmalloc_caches[i],
66505 - "kmalloc", 1 << i, GFP_NOWAIT);
66506 + "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
66507 caches++;
66508 }
66509
66510 @@ -3293,7 +3332,7 @@ static int slab_unmergeable(struct kmem_
66511 /*
66512 * We may have set a slab to be unmergeable during bootstrap.
66513 */
66514 - if (s->refcount < 0)
66515 + if (atomic_read(&s->refcount) < 0)
66516 return 1;
66517
66518 return 0;
66519 @@ -3353,7 +3392,7 @@ struct kmem_cache *kmem_cache_create(con
66520 if (s) {
66521 int cpu;
66522
66523 - s->refcount++;
66524 + atomic_inc(&s->refcount);
66525 /*
66526 * Adjust the object sizes so that we clear
66527 * the complete object on kzalloc.
66528 @@ -3372,7 +3411,7 @@ struct kmem_cache *kmem_cache_create(con
66529
66530 if (sysfs_slab_alias(s, name)) {
66531 down_write(&slub_lock);
66532 - s->refcount--;
66533 + atomic_dec(&s->refcount);
66534 up_write(&slub_lock);
66535 goto err;
66536 }
66537 @@ -4101,7 +4140,7 @@ SLAB_ATTR_RO(ctor);
66538
66539 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
66540 {
66541 - return sprintf(buf, "%d\n", s->refcount - 1);
66542 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
66543 }
66544 SLAB_ATTR_RO(aliases);
66545
66546 @@ -4503,7 +4542,7 @@ static void kmem_cache_release(struct ko
66547 kfree(s);
66548 }
66549
66550 -static struct sysfs_ops slab_sysfs_ops = {
66551 +static const struct sysfs_ops slab_sysfs_ops = {
66552 .show = slab_attr_show,
66553 .store = slab_attr_store,
66554 };
66555 @@ -4522,7 +4561,7 @@ static int uevent_filter(struct kset *ks
66556 return 0;
66557 }
66558
66559 -static struct kset_uevent_ops slab_uevent_ops = {
66560 +static const struct kset_uevent_ops slab_uevent_ops = {
66561 .filter = uevent_filter,
66562 };
66563
66564 @@ -4785,7 +4824,13 @@ static const struct file_operations proc
66565
66566 static int __init slab_proc_init(void)
66567 {
66568 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
66569 + mode_t gr_mode = S_IRUGO;
66570 +
66571 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66572 + gr_mode = S_IRUSR;
66573 +#endif
66574 +
66575 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
66576 return 0;
66577 }
66578 module_init(slab_proc_init);
66579 diff -urNp linux-2.6.32.41/mm/util.c linux-2.6.32.41/mm/util.c
66580 --- linux-2.6.32.41/mm/util.c 2011-03-27 14:31:47.000000000 -0400
66581 +++ linux-2.6.32.41/mm/util.c 2011-04-17 15:56:46.000000000 -0400
66582 @@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
66583 void arch_pick_mmap_layout(struct mm_struct *mm)
66584 {
66585 mm->mmap_base = TASK_UNMAPPED_BASE;
66586 +
66587 +#ifdef CONFIG_PAX_RANDMMAP
66588 + if (mm->pax_flags & MF_PAX_RANDMMAP)
66589 + mm->mmap_base += mm->delta_mmap;
66590 +#endif
66591 +
66592 mm->get_unmapped_area = arch_get_unmapped_area;
66593 mm->unmap_area = arch_unmap_area;
66594 }
66595 diff -urNp linux-2.6.32.41/mm/vmalloc.c linux-2.6.32.41/mm/vmalloc.c
66596 --- linux-2.6.32.41/mm/vmalloc.c 2011-03-27 14:31:47.000000000 -0400
66597 +++ linux-2.6.32.41/mm/vmalloc.c 2011-04-17 15:56:46.000000000 -0400
66598 @@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd,
66599
66600 pte = pte_offset_kernel(pmd, addr);
66601 do {
66602 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
66603 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
66604 +
66605 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66606 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
66607 + BUG_ON(!pte_exec(*pte));
66608 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
66609 + continue;
66610 + }
66611 +#endif
66612 +
66613 + {
66614 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
66615 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
66616 + }
66617 } while (pte++, addr += PAGE_SIZE, addr != end);
66618 }
66619
66620 @@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, un
66621 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
66622 {
66623 pte_t *pte;
66624 + int ret = -ENOMEM;
66625
66626 /*
66627 * nr is a running index into the array which helps higher level
66628 @@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, un
66629 pte = pte_alloc_kernel(pmd, addr);
66630 if (!pte)
66631 return -ENOMEM;
66632 +
66633 + pax_open_kernel();
66634 do {
66635 struct page *page = pages[*nr];
66636
66637 - if (WARN_ON(!pte_none(*pte)))
66638 - return -EBUSY;
66639 - if (WARN_ON(!page))
66640 - return -ENOMEM;
66641 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66642 + if (!(pgprot_val(prot) & _PAGE_NX))
66643 + BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
66644 + else
66645 +#endif
66646 +
66647 + if (WARN_ON(!pte_none(*pte))) {
66648 + ret = -EBUSY;
66649 + goto out;
66650 + }
66651 + if (WARN_ON(!page)) {
66652 + ret = -ENOMEM;
66653 + goto out;
66654 + }
66655 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
66656 (*nr)++;
66657 } while (pte++, addr += PAGE_SIZE, addr != end);
66658 - return 0;
66659 + ret = 0;
66660 +out:
66661 + pax_close_kernel();
66662 + return ret;
66663 }
66664
66665 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
66666 @@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void
66667 * and fall back on vmalloc() if that fails. Others
66668 * just put it in the vmalloc space.
66669 */
66670 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
66671 +#ifdef CONFIG_MODULES
66672 +#ifdef MODULES_VADDR
66673 unsigned long addr = (unsigned long)x;
66674 if (addr >= MODULES_VADDR && addr < MODULES_END)
66675 return 1;
66676 #endif
66677 +
66678 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66679 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
66680 + return 1;
66681 +#endif
66682 +
66683 +#endif
66684 +
66685 return is_vmalloc_addr(x);
66686 }
66687
66688 @@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void
66689
66690 if (!pgd_none(*pgd)) {
66691 pud_t *pud = pud_offset(pgd, addr);
66692 +#ifdef CONFIG_X86
66693 + if (!pud_large(*pud))
66694 +#endif
66695 if (!pud_none(*pud)) {
66696 pmd_t *pmd = pmd_offset(pud, addr);
66697 +#ifdef CONFIG_X86
66698 + if (!pmd_large(*pmd))
66699 +#endif
66700 if (!pmd_none(*pmd)) {
66701 pte_t *ptep, pte;
66702
66703 @@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vm
66704 struct rb_node *tmp;
66705
66706 while (*p) {
66707 - struct vmap_area *tmp;
66708 + struct vmap_area *varea;
66709
66710 parent = *p;
66711 - tmp = rb_entry(parent, struct vmap_area, rb_node);
66712 - if (va->va_start < tmp->va_end)
66713 + varea = rb_entry(parent, struct vmap_area, rb_node);
66714 + if (va->va_start < varea->va_end)
66715 p = &(*p)->rb_left;
66716 - else if (va->va_end > tmp->va_start)
66717 + else if (va->va_end > varea->va_start)
66718 p = &(*p)->rb_right;
66719 else
66720 BUG();
66721 @@ -1232,6 +1274,16 @@ static struct vm_struct *__get_vm_area_n
66722 struct vm_struct *area;
66723
66724 BUG_ON(in_interrupt());
66725 +
66726 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
66727 + if (flags & VM_KERNEXEC) {
66728 + if (start != VMALLOC_START || end != VMALLOC_END)
66729 + return NULL;
66730 + start = (unsigned long)MODULES_EXEC_VADDR;
66731 + end = (unsigned long)MODULES_EXEC_END;
66732 + }
66733 +#endif
66734 +
66735 if (flags & VM_IOREMAP) {
66736 int bit = fls(size);
66737
66738 @@ -1457,6 +1509,11 @@ void *vmap(struct page **pages, unsigned
66739 if (count > totalram_pages)
66740 return NULL;
66741
66742 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
66743 + if (!(pgprot_val(prot) & _PAGE_NX))
66744 + flags |= VM_KERNEXEC;
66745 +#endif
66746 +
66747 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
66748 __builtin_return_address(0));
66749 if (!area)
66750 @@ -1567,6 +1624,13 @@ static void *__vmalloc_node(unsigned lon
66751 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
66752 return NULL;
66753
66754 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
66755 + if (!(pgprot_val(prot) & _PAGE_NX))
66756 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
66757 + node, gfp_mask, caller);
66758 + else
66759 +#endif
66760 +
66761 area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
66762 VMALLOC_END, node, gfp_mask, caller);
66763
66764 @@ -1585,6 +1649,7 @@ static void *__vmalloc_node(unsigned lon
66765 return addr;
66766 }
66767
66768 +#undef __vmalloc
66769 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
66770 {
66771 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
66772 @@ -1601,6 +1666,7 @@ EXPORT_SYMBOL(__vmalloc);
66773 * For tight control over page level allocator and protection flags
66774 * use __vmalloc() instead.
66775 */
66776 +#undef vmalloc
66777 void *vmalloc(unsigned long size)
66778 {
66779 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
66780 @@ -1615,6 +1681,7 @@ EXPORT_SYMBOL(vmalloc);
66781 * The resulting memory area is zeroed so it can be mapped to userspace
66782 * without leaking data.
66783 */
66784 +#undef vmalloc_user
66785 void *vmalloc_user(unsigned long size)
66786 {
66787 struct vm_struct *area;
66788 @@ -1642,6 +1709,7 @@ EXPORT_SYMBOL(vmalloc_user);
66789 * For tight control over page level allocator and protection flags
66790 * use __vmalloc() instead.
66791 */
66792 +#undef vmalloc_node
66793 void *vmalloc_node(unsigned long size, int node)
66794 {
66795 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
66796 @@ -1664,10 +1732,10 @@ EXPORT_SYMBOL(vmalloc_node);
66797 * For tight control over page level allocator and protection flags
66798 * use __vmalloc() instead.
66799 */
66800 -
66801 +#undef vmalloc_exec
66802 void *vmalloc_exec(unsigned long size)
66803 {
66804 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
66805 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
66806 -1, __builtin_return_address(0));
66807 }
66808
66809 @@ -1686,6 +1754,7 @@ void *vmalloc_exec(unsigned long size)
66810 * Allocate enough 32bit PA addressable pages to cover @size from the
66811 * page level allocator and map them into contiguous kernel virtual space.
66812 */
66813 +#undef vmalloc_32
66814 void *vmalloc_32(unsigned long size)
66815 {
66816 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
66817 @@ -1700,6 +1769,7 @@ EXPORT_SYMBOL(vmalloc_32);
66818 * The resulting memory area is 32bit addressable and zeroed so it can be
66819 * mapped to userspace without leaking data.
66820 */
66821 +#undef vmalloc_32_user
66822 void *vmalloc_32_user(unsigned long size)
66823 {
66824 struct vm_struct *area;
66825 @@ -1964,6 +2034,8 @@ int remap_vmalloc_range(struct vm_area_s
66826 unsigned long uaddr = vma->vm_start;
66827 unsigned long usize = vma->vm_end - vma->vm_start;
66828
66829 + BUG_ON(vma->vm_mirror);
66830 +
66831 if ((PAGE_SIZE-1) & (unsigned long)addr)
66832 return -EINVAL;
66833
66834 diff -urNp linux-2.6.32.41/mm/vmstat.c linux-2.6.32.41/mm/vmstat.c
66835 --- linux-2.6.32.41/mm/vmstat.c 2011-03-27 14:31:47.000000000 -0400
66836 +++ linux-2.6.32.41/mm/vmstat.c 2011-04-17 15:56:46.000000000 -0400
66837 @@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
66838 *
66839 * vm_stat contains the global counters
66840 */
66841 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
66842 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
66843 EXPORT_SYMBOL(vm_stat);
66844
66845 #ifdef CONFIG_SMP
66846 @@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
66847 v = p->vm_stat_diff[i];
66848 p->vm_stat_diff[i] = 0;
66849 local_irq_restore(flags);
66850 - atomic_long_add(v, &zone->vm_stat[i]);
66851 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
66852 global_diff[i] += v;
66853 #ifdef CONFIG_NUMA
66854 /* 3 seconds idle till flush */
66855 @@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
66856
66857 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
66858 if (global_diff[i])
66859 - atomic_long_add(global_diff[i], &vm_stat[i]);
66860 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
66861 }
66862
66863 #endif
66864 @@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
66865 start_cpu_timer(cpu);
66866 #endif
66867 #ifdef CONFIG_PROC_FS
66868 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
66869 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
66870 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
66871 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
66872 + {
66873 + mode_t gr_mode = S_IRUGO;
66874 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66875 + gr_mode = S_IRUSR;
66876 +#endif
66877 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
66878 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
66879 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66880 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
66881 +#else
66882 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
66883 +#endif
66884 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
66885 + }
66886 #endif
66887 return 0;
66888 }
66889 diff -urNp linux-2.6.32.41/net/8021q/vlan.c linux-2.6.32.41/net/8021q/vlan.c
66890 --- linux-2.6.32.41/net/8021q/vlan.c 2011-03-27 14:31:47.000000000 -0400
66891 +++ linux-2.6.32.41/net/8021q/vlan.c 2011-04-17 15:56:46.000000000 -0400
66892 @@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net
66893 err = -EPERM;
66894 if (!capable(CAP_NET_ADMIN))
66895 break;
66896 - if ((args.u.name_type >= 0) &&
66897 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
66898 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
66899 struct vlan_net *vn;
66900
66901 vn = net_generic(net, vlan_net_id);
66902 diff -urNp linux-2.6.32.41/net/atm/atm_misc.c linux-2.6.32.41/net/atm/atm_misc.c
66903 --- linux-2.6.32.41/net/atm/atm_misc.c 2011-03-27 14:31:47.000000000 -0400
66904 +++ linux-2.6.32.41/net/atm/atm_misc.c 2011-04-17 15:56:46.000000000 -0400
66905 @@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int t
66906 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
66907 return 1;
66908 atm_return(vcc,truesize);
66909 - atomic_inc(&vcc->stats->rx_drop);
66910 + atomic_inc_unchecked(&vcc->stats->rx_drop);
66911 return 0;
66912 }
66913
66914 @@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct
66915 }
66916 }
66917 atm_return(vcc,guess);
66918 - atomic_inc(&vcc->stats->rx_drop);
66919 + atomic_inc_unchecked(&vcc->stats->rx_drop);
66920 return NULL;
66921 }
66922
66923 @@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafpr
66924
66925 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
66926 {
66927 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
66928 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
66929 __SONET_ITEMS
66930 #undef __HANDLE_ITEM
66931 }
66932 @@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_sta
66933
66934 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
66935 {
66936 -#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
66937 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
66938 __SONET_ITEMS
66939 #undef __HANDLE_ITEM
66940 }
66941 diff -urNp linux-2.6.32.41/net/atm/mpoa_caches.c linux-2.6.32.41/net/atm/mpoa_caches.c
66942 --- linux-2.6.32.41/net/atm/mpoa_caches.c 2011-03-27 14:31:47.000000000 -0400
66943 +++ linux-2.6.32.41/net/atm/mpoa_caches.c 2011-05-16 21:46:57.000000000 -0400
66944 @@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_cl
66945 struct timeval now;
66946 struct k_message msg;
66947
66948 + pax_track_stack();
66949 +
66950 do_gettimeofday(&now);
66951
66952 write_lock_irq(&client->egress_lock);
66953 diff -urNp linux-2.6.32.41/net/atm/proc.c linux-2.6.32.41/net/atm/proc.c
66954 --- linux-2.6.32.41/net/atm/proc.c 2011-03-27 14:31:47.000000000 -0400
66955 +++ linux-2.6.32.41/net/atm/proc.c 2011-04-17 15:56:46.000000000 -0400
66956 @@ -43,9 +43,9 @@ static void add_stats(struct seq_file *s
66957 const struct k_atm_aal_stats *stats)
66958 {
66959 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
66960 - atomic_read(&stats->tx),atomic_read(&stats->tx_err),
66961 - atomic_read(&stats->rx),atomic_read(&stats->rx_err),
66962 - atomic_read(&stats->rx_drop));
66963 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
66964 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
66965 + atomic_read_unchecked(&stats->rx_drop));
66966 }
66967
66968 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
66969 @@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *se
66970 {
66971 struct sock *sk = sk_atm(vcc);
66972
66973 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66974 + seq_printf(seq, "%p ", NULL);
66975 +#else
66976 seq_printf(seq, "%p ", vcc);
66977 +#endif
66978 +
66979 if (!vcc->dev)
66980 seq_printf(seq, "Unassigned ");
66981 else
66982 @@ -214,7 +219,11 @@ static void svc_info(struct seq_file *se
66983 {
66984 if (!vcc->dev)
66985 seq_printf(seq, sizeof(void *) == 4 ?
66986 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66987 + "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
66988 +#else
66989 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
66990 +#endif
66991 else
66992 seq_printf(seq, "%3d %3d %5d ",
66993 vcc->dev->number, vcc->vpi, vcc->vci);
66994 diff -urNp linux-2.6.32.41/net/atm/resources.c linux-2.6.32.41/net/atm/resources.c
66995 --- linux-2.6.32.41/net/atm/resources.c 2011-03-27 14:31:47.000000000 -0400
66996 +++ linux-2.6.32.41/net/atm/resources.c 2011-04-17 15:56:46.000000000 -0400
66997 @@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *
66998 static void copy_aal_stats(struct k_atm_aal_stats *from,
66999 struct atm_aal_stats *to)
67000 {
67001 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
67002 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
67003 __AAL_STAT_ITEMS
67004 #undef __HANDLE_ITEM
67005 }
67006 @@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_
67007 static void subtract_aal_stats(struct k_atm_aal_stats *from,
67008 struct atm_aal_stats *to)
67009 {
67010 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
67011 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
67012 __AAL_STAT_ITEMS
67013 #undef __HANDLE_ITEM
67014 }
67015 diff -urNp linux-2.6.32.41/net/bluetooth/l2cap.c linux-2.6.32.41/net/bluetooth/l2cap.c
67016 --- linux-2.6.32.41/net/bluetooth/l2cap.c 2011-03-27 14:31:47.000000000 -0400
67017 +++ linux-2.6.32.41/net/bluetooth/l2cap.c 2011-06-12 06:34:08.000000000 -0400
67018 @@ -1885,7 +1885,7 @@ static int l2cap_sock_getsockopt_old(str
67019 err = -ENOTCONN;
67020 break;
67021 }
67022 -
67023 + memset(&cinfo, 0, sizeof(cinfo));
67024 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
67025 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
67026
67027 diff -urNp linux-2.6.32.41/net/bluetooth/rfcomm/sock.c linux-2.6.32.41/net/bluetooth/rfcomm/sock.c
67028 --- linux-2.6.32.41/net/bluetooth/rfcomm/sock.c 2011-03-27 14:31:47.000000000 -0400
67029 +++ linux-2.6.32.41/net/bluetooth/rfcomm/sock.c 2011-06-12 06:35:00.000000000 -0400
67030 @@ -878,6 +878,7 @@ static int rfcomm_sock_getsockopt_old(st
67031
67032 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
67033
67034 + memset(&cinfo, 0, sizeof(cinfo));
67035 cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle;
67036 memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3);
67037
67038 diff -urNp linux-2.6.32.41/net/bridge/br_private.h linux-2.6.32.41/net/bridge/br_private.h
67039 --- linux-2.6.32.41/net/bridge/br_private.h 2011-03-27 14:31:47.000000000 -0400
67040 +++ linux-2.6.32.41/net/bridge/br_private.h 2011-04-17 15:56:46.000000000 -0400
67041 @@ -254,7 +254,7 @@ extern void br_ifinfo_notify(int event,
67042
67043 #ifdef CONFIG_SYSFS
67044 /* br_sysfs_if.c */
67045 -extern struct sysfs_ops brport_sysfs_ops;
67046 +extern const struct sysfs_ops brport_sysfs_ops;
67047 extern int br_sysfs_addif(struct net_bridge_port *p);
67048
67049 /* br_sysfs_br.c */
67050 diff -urNp linux-2.6.32.41/net/bridge/br_stp_if.c linux-2.6.32.41/net/bridge/br_stp_if.c
67051 --- linux-2.6.32.41/net/bridge/br_stp_if.c 2011-03-27 14:31:47.000000000 -0400
67052 +++ linux-2.6.32.41/net/bridge/br_stp_if.c 2011-04-17 15:56:46.000000000 -0400
67053 @@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridg
67054 char *envp[] = { NULL };
67055
67056 if (br->stp_enabled == BR_USER_STP) {
67057 - r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
67058 + r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
67059 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
67060 br->dev->name, r);
67061
67062 diff -urNp linux-2.6.32.41/net/bridge/br_sysfs_if.c linux-2.6.32.41/net/bridge/br_sysfs_if.c
67063 --- linux-2.6.32.41/net/bridge/br_sysfs_if.c 2011-03-27 14:31:47.000000000 -0400
67064 +++ linux-2.6.32.41/net/bridge/br_sysfs_if.c 2011-04-17 15:56:46.000000000 -0400
67065 @@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobje
67066 return ret;
67067 }
67068
67069 -struct sysfs_ops brport_sysfs_ops = {
67070 +const struct sysfs_ops brport_sysfs_ops = {
67071 .show = brport_show,
67072 .store = brport_store,
67073 };
67074 diff -urNp linux-2.6.32.41/net/bridge/netfilter/ebtables.c linux-2.6.32.41/net/bridge/netfilter/ebtables.c
67075 --- linux-2.6.32.41/net/bridge/netfilter/ebtables.c 2011-04-17 17:00:52.000000000 -0400
67076 +++ linux-2.6.32.41/net/bridge/netfilter/ebtables.c 2011-05-16 21:46:57.000000000 -0400
67077 @@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struc
67078 unsigned int entries_size, nentries;
67079 char *entries;
67080
67081 + pax_track_stack();
67082 +
67083 if (cmd == EBT_SO_GET_ENTRIES) {
67084 entries_size = t->private->entries_size;
67085 nentries = t->private->nentries;
67086 diff -urNp linux-2.6.32.41/net/can/bcm.c linux-2.6.32.41/net/can/bcm.c
67087 --- linux-2.6.32.41/net/can/bcm.c 2011-05-10 22:12:01.000000000 -0400
67088 +++ linux-2.6.32.41/net/can/bcm.c 2011-05-10 22:12:34.000000000 -0400
67089 @@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file
67090 struct bcm_sock *bo = bcm_sk(sk);
67091 struct bcm_op *op;
67092
67093 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67094 + seq_printf(m, ">>> socket %p", NULL);
67095 + seq_printf(m, " / sk %p", NULL);
67096 + seq_printf(m, " / bo %p", NULL);
67097 +#else
67098 seq_printf(m, ">>> socket %p", sk->sk_socket);
67099 seq_printf(m, " / sk %p", sk);
67100 seq_printf(m, " / bo %p", bo);
67101 +#endif
67102 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
67103 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
67104 seq_printf(m, " <<<\n");
67105 diff -urNp linux-2.6.32.41/net/core/dev.c linux-2.6.32.41/net/core/dev.c
67106 --- linux-2.6.32.41/net/core/dev.c 2011-04-17 17:00:52.000000000 -0400
67107 +++ linux-2.6.32.41/net/core/dev.c 2011-04-17 17:04:18.000000000 -0400
67108 @@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const cha
67109 if (no_module && capable(CAP_NET_ADMIN))
67110 no_module = request_module("netdev-%s", name);
67111 if (no_module && capable(CAP_SYS_MODULE)) {
67112 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
67113 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
67114 +#else
67115 if (!request_module("%s", name))
67116 pr_err("Loading kernel module for a network device "
67117 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
67118 "instead\n", name);
67119 +#endif
67120 }
67121 }
67122 EXPORT_SYMBOL(dev_load);
67123 @@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
67124 }
67125 EXPORT_SYMBOL(netif_rx_ni);
67126
67127 -static void net_tx_action(struct softirq_action *h)
67128 +static void net_tx_action(void)
67129 {
67130 struct softnet_data *sd = &__get_cpu_var(softnet_data);
67131
67132 @@ -2826,7 +2830,7 @@ void netif_napi_del(struct napi_struct *
67133 EXPORT_SYMBOL(netif_napi_del);
67134
67135
67136 -static void net_rx_action(struct softirq_action *h)
67137 +static void net_rx_action(void)
67138 {
67139 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
67140 unsigned long time_limit = jiffies + 2;
67141 diff -urNp linux-2.6.32.41/net/core/flow.c linux-2.6.32.41/net/core/flow.c
67142 --- linux-2.6.32.41/net/core/flow.c 2011-03-27 14:31:47.000000000 -0400
67143 +++ linux-2.6.32.41/net/core/flow.c 2011-05-04 17:56:20.000000000 -0400
67144 @@ -35,11 +35,11 @@ struct flow_cache_entry {
67145 atomic_t *object_ref;
67146 };
67147
67148 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
67149 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
67150
67151 static u32 flow_hash_shift;
67152 #define flow_hash_size (1 << flow_hash_shift)
67153 -static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
67154 +static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
67155
67156 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
67157
67158 @@ -52,7 +52,7 @@ struct flow_percpu_info {
67159 u32 hash_rnd;
67160 int count;
67161 };
67162 -static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
67163 +static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
67164
67165 #define flow_hash_rnd_recalc(cpu) \
67166 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
67167 @@ -69,7 +69,7 @@ struct flow_flush_info {
67168 atomic_t cpuleft;
67169 struct completion completion;
67170 };
67171 -static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
67172 +static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
67173
67174 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
67175
67176 @@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net,
67177 if (fle->family == family &&
67178 fle->dir == dir &&
67179 flow_key_compare(key, &fle->key) == 0) {
67180 - if (fle->genid == atomic_read(&flow_cache_genid)) {
67181 + if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
67182 void *ret = fle->object;
67183
67184 if (ret)
67185 @@ -228,7 +228,7 @@ nocache:
67186 err = resolver(net, key, family, dir, &obj, &obj_ref);
67187
67188 if (fle && !err) {
67189 - fle->genid = atomic_read(&flow_cache_genid);
67190 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
67191
67192 if (fle->object)
67193 atomic_dec(fle->object_ref);
67194 @@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(uns
67195
67196 fle = flow_table(cpu)[i];
67197 for (; fle; fle = fle->next) {
67198 - unsigned genid = atomic_read(&flow_cache_genid);
67199 + unsigned genid = atomic_read_unchecked(&flow_cache_genid);
67200
67201 if (!fle->object || fle->genid == genid)
67202 continue;
67203 diff -urNp linux-2.6.32.41/net/core/skbuff.c linux-2.6.32.41/net/core/skbuff.c
67204 --- linux-2.6.32.41/net/core/skbuff.c 2011-03-27 14:31:47.000000000 -0400
67205 +++ linux-2.6.32.41/net/core/skbuff.c 2011-05-16 21:46:57.000000000 -0400
67206 @@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb,
67207 struct sk_buff *frag_iter;
67208 struct sock *sk = skb->sk;
67209
67210 + pax_track_stack();
67211 +
67212 /*
67213 * __skb_splice_bits() only fails if the output has no room left,
67214 * so no point in going over the frag_list for the error case.
67215 diff -urNp linux-2.6.32.41/net/core/sock.c linux-2.6.32.41/net/core/sock.c
67216 --- linux-2.6.32.41/net/core/sock.c 2011-03-27 14:31:47.000000000 -0400
67217 +++ linux-2.6.32.41/net/core/sock.c 2011-05-04 17:56:20.000000000 -0400
67218 @@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock,
67219 break;
67220
67221 case SO_PEERCRED:
67222 + {
67223 + struct ucred peercred;
67224 if (len > sizeof(sk->sk_peercred))
67225 len = sizeof(sk->sk_peercred);
67226 - if (copy_to_user(optval, &sk->sk_peercred, len))
67227 + peercred = sk->sk_peercred;
67228 + if (copy_to_user(optval, &peercred, len))
67229 return -EFAULT;
67230 goto lenout;
67231 + }
67232
67233 case SO_PEERNAME:
67234 {
67235 @@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock,
67236 */
67237 smp_wmb();
67238 atomic_set(&sk->sk_refcnt, 1);
67239 - atomic_set(&sk->sk_drops, 0);
67240 + atomic_set_unchecked(&sk->sk_drops, 0);
67241 }
67242 EXPORT_SYMBOL(sock_init_data);
67243
67244 diff -urNp linux-2.6.32.41/net/decnet/sysctl_net_decnet.c linux-2.6.32.41/net/decnet/sysctl_net_decnet.c
67245 --- linux-2.6.32.41/net/decnet/sysctl_net_decnet.c 2011-03-27 14:31:47.000000000 -0400
67246 +++ linux-2.6.32.41/net/decnet/sysctl_net_decnet.c 2011-04-17 15:56:46.000000000 -0400
67247 @@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_t
67248
67249 if (len > *lenp) len = *lenp;
67250
67251 - if (copy_to_user(buffer, addr, len))
67252 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
67253 return -EFAULT;
67254
67255 *lenp = len;
67256 @@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table
67257
67258 if (len > *lenp) len = *lenp;
67259
67260 - if (copy_to_user(buffer, devname, len))
67261 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
67262 return -EFAULT;
67263
67264 *lenp = len;
67265 diff -urNp linux-2.6.32.41/net/econet/Kconfig linux-2.6.32.41/net/econet/Kconfig
67266 --- linux-2.6.32.41/net/econet/Kconfig 2011-03-27 14:31:47.000000000 -0400
67267 +++ linux-2.6.32.41/net/econet/Kconfig 2011-04-17 15:56:46.000000000 -0400
67268 @@ -4,7 +4,7 @@
67269
67270 config ECONET
67271 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
67272 - depends on EXPERIMENTAL && INET
67273 + depends on EXPERIMENTAL && INET && BROKEN
67274 ---help---
67275 Econet is a fairly old and slow networking protocol mainly used by
67276 Acorn computers to access file and print servers. It uses native
67277 diff -urNp linux-2.6.32.41/net/ieee802154/dgram.c linux-2.6.32.41/net/ieee802154/dgram.c
67278 --- linux-2.6.32.41/net/ieee802154/dgram.c 2011-03-27 14:31:47.000000000 -0400
67279 +++ linux-2.6.32.41/net/ieee802154/dgram.c 2011-05-04 17:56:28.000000000 -0400
67280 @@ -318,7 +318,7 @@ out:
67281 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
67282 {
67283 if (sock_queue_rcv_skb(sk, skb) < 0) {
67284 - atomic_inc(&sk->sk_drops);
67285 + atomic_inc_unchecked(&sk->sk_drops);
67286 kfree_skb(skb);
67287 return NET_RX_DROP;
67288 }
67289 diff -urNp linux-2.6.32.41/net/ieee802154/raw.c linux-2.6.32.41/net/ieee802154/raw.c
67290 --- linux-2.6.32.41/net/ieee802154/raw.c 2011-03-27 14:31:47.000000000 -0400
67291 +++ linux-2.6.32.41/net/ieee802154/raw.c 2011-05-04 17:56:28.000000000 -0400
67292 @@ -206,7 +206,7 @@ out:
67293 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
67294 {
67295 if (sock_queue_rcv_skb(sk, skb) < 0) {
67296 - atomic_inc(&sk->sk_drops);
67297 + atomic_inc_unchecked(&sk->sk_drops);
67298 kfree_skb(skb);
67299 return NET_RX_DROP;
67300 }
67301 diff -urNp linux-2.6.32.41/net/ipv4/inet_diag.c linux-2.6.32.41/net/ipv4/inet_diag.c
67302 --- linux-2.6.32.41/net/ipv4/inet_diag.c 2011-04-17 17:00:52.000000000 -0400
67303 +++ linux-2.6.32.41/net/ipv4/inet_diag.c 2011-04-17 17:04:18.000000000 -0400
67304 @@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct soc
67305 r->idiag_retrans = 0;
67306
67307 r->id.idiag_if = sk->sk_bound_dev_if;
67308 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67309 + r->id.idiag_cookie[0] = 0;
67310 + r->id.idiag_cookie[1] = 0;
67311 +#else
67312 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
67313 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
67314 +#endif
67315
67316 r->id.idiag_sport = inet->sport;
67317 r->id.idiag_dport = inet->dport;
67318 @@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct in
67319 r->idiag_family = tw->tw_family;
67320 r->idiag_retrans = 0;
67321 r->id.idiag_if = tw->tw_bound_dev_if;
67322 +
67323 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67324 + r->id.idiag_cookie[0] = 0;
67325 + r->id.idiag_cookie[1] = 0;
67326 +#else
67327 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
67328 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
67329 +#endif
67330 +
67331 r->id.idiag_sport = tw->tw_sport;
67332 r->id.idiag_dport = tw->tw_dport;
67333 r->id.idiag_src[0] = tw->tw_rcv_saddr;
67334 @@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk
67335 if (sk == NULL)
67336 goto unlock;
67337
67338 +#ifndef CONFIG_GRKERNSEC_HIDESYM
67339 err = -ESTALE;
67340 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
67341 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
67342 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
67343 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
67344 goto out;
67345 +#endif
67346
67347 err = -ENOMEM;
67348 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
67349 @@ -581,8 +595,14 @@ static int inet_diag_fill_req(struct sk_
67350 r->idiag_retrans = req->retrans;
67351
67352 r->id.idiag_if = sk->sk_bound_dev_if;
67353 +
67354 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67355 + r->id.idiag_cookie[0] = 0;
67356 + r->id.idiag_cookie[1] = 0;
67357 +#else
67358 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
67359 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
67360 +#endif
67361
67362 tmo = req->expires - jiffies;
67363 if (tmo < 0)
67364 diff -urNp linux-2.6.32.41/net/ipv4/inet_hashtables.c linux-2.6.32.41/net/ipv4/inet_hashtables.c
67365 --- linux-2.6.32.41/net/ipv4/inet_hashtables.c 2011-03-27 14:31:47.000000000 -0400
67366 +++ linux-2.6.32.41/net/ipv4/inet_hashtables.c 2011-04-17 15:56:46.000000000 -0400
67367 @@ -18,11 +18,14 @@
67368 #include <linux/sched.h>
67369 #include <linux/slab.h>
67370 #include <linux/wait.h>
67371 +#include <linux/security.h>
67372
67373 #include <net/inet_connection_sock.h>
67374 #include <net/inet_hashtables.h>
67375 #include <net/ip.h>
67376
67377 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
67378 +
67379 /*
67380 * Allocate and initialize a new local port bind bucket.
67381 * The bindhash mutex for snum's hash chain must be held here.
67382 @@ -490,6 +493,8 @@ ok:
67383 }
67384 spin_unlock(&head->lock);
67385
67386 + gr_update_task_in_ip_table(current, inet_sk(sk));
67387 +
67388 if (tw) {
67389 inet_twsk_deschedule(tw, death_row);
67390 inet_twsk_put(tw);
67391 diff -urNp linux-2.6.32.41/net/ipv4/inetpeer.c linux-2.6.32.41/net/ipv4/inetpeer.c
67392 --- linux-2.6.32.41/net/ipv4/inetpeer.c 2011-03-27 14:31:47.000000000 -0400
67393 +++ linux-2.6.32.41/net/ipv4/inetpeer.c 2011-05-16 21:46:57.000000000 -0400
67394 @@ -366,6 +366,8 @@ struct inet_peer *inet_getpeer(__be32 da
67395 struct inet_peer *p, *n;
67396 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
67397
67398 + pax_track_stack();
67399 +
67400 /* Look up for the address quickly. */
67401 read_lock_bh(&peer_pool_lock);
67402 p = lookup(daddr, NULL);
67403 @@ -389,7 +391,7 @@ struct inet_peer *inet_getpeer(__be32 da
67404 return NULL;
67405 n->v4daddr = daddr;
67406 atomic_set(&n->refcnt, 1);
67407 - atomic_set(&n->rid, 0);
67408 + atomic_set_unchecked(&n->rid, 0);
67409 n->ip_id_count = secure_ip_id(daddr);
67410 n->tcp_ts_stamp = 0;
67411
67412 diff -urNp linux-2.6.32.41/net/ipv4/ip_fragment.c linux-2.6.32.41/net/ipv4/ip_fragment.c
67413 --- linux-2.6.32.41/net/ipv4/ip_fragment.c 2011-03-27 14:31:47.000000000 -0400
67414 +++ linux-2.6.32.41/net/ipv4/ip_fragment.c 2011-04-17 15:56:46.000000000 -0400
67415 @@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct
67416 return 0;
67417
67418 start = qp->rid;
67419 - end = atomic_inc_return(&peer->rid);
67420 + end = atomic_inc_return_unchecked(&peer->rid);
67421 qp->rid = end;
67422
67423 rc = qp->q.fragments && (end - start) > max;
67424 diff -urNp linux-2.6.32.41/net/ipv4/ip_sockglue.c linux-2.6.32.41/net/ipv4/ip_sockglue.c
67425 --- linux-2.6.32.41/net/ipv4/ip_sockglue.c 2011-03-27 14:31:47.000000000 -0400
67426 +++ linux-2.6.32.41/net/ipv4/ip_sockglue.c 2011-05-16 21:46:57.000000000 -0400
67427 @@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock
67428 int val;
67429 int len;
67430
67431 + pax_track_stack();
67432 +
67433 if (level != SOL_IP)
67434 return -EOPNOTSUPP;
67435
67436 diff -urNp linux-2.6.32.41/net/ipv4/netfilter/arp_tables.c linux-2.6.32.41/net/ipv4/netfilter/arp_tables.c
67437 --- linux-2.6.32.41/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:00:52.000000000 -0400
67438 +++ linux-2.6.32.41/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:04:18.000000000 -0400
67439 @@ -934,6 +934,7 @@ static int get_info(struct net *net, voi
67440 private = &tmp;
67441 }
67442 #endif
67443 + memset(&info, 0, sizeof(info));
67444 info.valid_hooks = t->valid_hooks;
67445 memcpy(info.hook_entry, private->hook_entry,
67446 sizeof(info.hook_entry));
67447 diff -urNp linux-2.6.32.41/net/ipv4/netfilter/ip_tables.c linux-2.6.32.41/net/ipv4/netfilter/ip_tables.c
67448 --- linux-2.6.32.41/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:00:52.000000000 -0400
67449 +++ linux-2.6.32.41/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:04:18.000000000 -0400
67450 @@ -1141,6 +1141,7 @@ static int get_info(struct net *net, voi
67451 private = &tmp;
67452 }
67453 #endif
67454 + memset(&info, 0, sizeof(info));
67455 info.valid_hooks = t->valid_hooks;
67456 memcpy(info.hook_entry, private->hook_entry,
67457 sizeof(info.hook_entry));
67458 diff -urNp linux-2.6.32.41/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-2.6.32.41/net/ipv4/netfilter/nf_nat_snmp_basic.c
67459 --- linux-2.6.32.41/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-03-27 14:31:47.000000000 -0400
67460 +++ linux-2.6.32.41/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-04-17 15:56:46.000000000 -0400
67461 @@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(
67462
67463 *len = 0;
67464
67465 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
67466 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
67467 if (*octets == NULL) {
67468 if (net_ratelimit())
67469 printk("OOM in bsalg (%d)\n", __LINE__);
67470 diff -urNp linux-2.6.32.41/net/ipv4/raw.c linux-2.6.32.41/net/ipv4/raw.c
67471 --- linux-2.6.32.41/net/ipv4/raw.c 2011-03-27 14:31:47.000000000 -0400
67472 +++ linux-2.6.32.41/net/ipv4/raw.c 2011-05-04 17:59:08.000000000 -0400
67473 @@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk,
67474 /* Charge it to the socket. */
67475
67476 if (sock_queue_rcv_skb(sk, skb) < 0) {
67477 - atomic_inc(&sk->sk_drops);
67478 + atomic_inc_unchecked(&sk->sk_drops);
67479 kfree_skb(skb);
67480 return NET_RX_DROP;
67481 }
67482 @@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk,
67483 int raw_rcv(struct sock *sk, struct sk_buff *skb)
67484 {
67485 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
67486 - atomic_inc(&sk->sk_drops);
67487 + atomic_inc_unchecked(&sk->sk_drops);
67488 kfree_skb(skb);
67489 return NET_RX_DROP;
67490 }
67491 @@ -724,15 +724,22 @@ static int raw_init(struct sock *sk)
67492
67493 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
67494 {
67495 + struct icmp_filter filter;
67496 +
67497 + if (optlen < 0)
67498 + return -EINVAL;
67499 if (optlen > sizeof(struct icmp_filter))
67500 optlen = sizeof(struct icmp_filter);
67501 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
67502 + if (copy_from_user(&filter, optval, optlen))
67503 return -EFAULT;
67504 + memcpy(&raw_sk(sk)->filter, &filter, optlen);
67505 +
67506 return 0;
67507 }
67508
67509 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
67510 {
67511 + struct icmp_filter filter;
67512 int len, ret = -EFAULT;
67513
67514 if (get_user(len, optlen))
67515 @@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock
67516 if (len > sizeof(struct icmp_filter))
67517 len = sizeof(struct icmp_filter);
67518 ret = -EFAULT;
67519 + memcpy(&filter, &raw_sk(sk)->filter, len);
67520 if (put_user(len, optlen) ||
67521 - copy_to_user(optval, &raw_sk(sk)->filter, len))
67522 + copy_to_user(optval, &filter, len))
67523 goto out;
67524 ret = 0;
67525 out: return ret;
67526 @@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq
67527 sk_wmem_alloc_get(sp),
67528 sk_rmem_alloc_get(sp),
67529 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
67530 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
67531 + atomic_read(&sp->sk_refcnt),
67532 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67533 + NULL,
67534 +#else
67535 + sp,
67536 +#endif
67537 + atomic_read_unchecked(&sp->sk_drops));
67538 }
67539
67540 static int raw_seq_show(struct seq_file *seq, void *v)
67541 diff -urNp linux-2.6.32.41/net/ipv4/route.c linux-2.6.32.41/net/ipv4/route.c
67542 --- linux-2.6.32.41/net/ipv4/route.c 2011-03-27 14:31:47.000000000 -0400
67543 +++ linux-2.6.32.41/net/ipv4/route.c 2011-05-04 17:56:28.000000000 -0400
67544 @@ -268,7 +268,7 @@ static inline unsigned int rt_hash(__be3
67545
67546 static inline int rt_genid(struct net *net)
67547 {
67548 - return atomic_read(&net->ipv4.rt_genid);
67549 + return atomic_read_unchecked(&net->ipv4.rt_genid);
67550 }
67551
67552 #ifdef CONFIG_PROC_FS
67553 @@ -888,7 +888,7 @@ static void rt_cache_invalidate(struct n
67554 unsigned char shuffle;
67555
67556 get_random_bytes(&shuffle, sizeof(shuffle));
67557 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
67558 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
67559 }
67560
67561 /*
67562 @@ -3356,7 +3356,7 @@ static __net_initdata struct pernet_oper
67563
67564 static __net_init int rt_secret_timer_init(struct net *net)
67565 {
67566 - atomic_set(&net->ipv4.rt_genid,
67567 + atomic_set_unchecked(&net->ipv4.rt_genid,
67568 (int) ((num_physpages ^ (num_physpages>>8)) ^
67569 (jiffies ^ (jiffies >> 7))));
67570
67571 diff -urNp linux-2.6.32.41/net/ipv4/tcp.c linux-2.6.32.41/net/ipv4/tcp.c
67572 --- linux-2.6.32.41/net/ipv4/tcp.c 2011-03-27 14:31:47.000000000 -0400
67573 +++ linux-2.6.32.41/net/ipv4/tcp.c 2011-05-16 21:46:57.000000000 -0400
67574 @@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock
67575 int val;
67576 int err = 0;
67577
67578 + pax_track_stack();
67579 +
67580 /* This is a string value all the others are int's */
67581 if (optname == TCP_CONGESTION) {
67582 char name[TCP_CA_NAME_MAX];
67583 @@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock
67584 struct tcp_sock *tp = tcp_sk(sk);
67585 int val, len;
67586
67587 + pax_track_stack();
67588 +
67589 if (get_user(len, optlen))
67590 return -EFAULT;
67591
67592 diff -urNp linux-2.6.32.41/net/ipv4/tcp_ipv4.c linux-2.6.32.41/net/ipv4/tcp_ipv4.c
67593 --- linux-2.6.32.41/net/ipv4/tcp_ipv4.c 2011-03-27 14:31:47.000000000 -0400
67594 +++ linux-2.6.32.41/net/ipv4/tcp_ipv4.c 2011-04-17 15:56:46.000000000 -0400
67595 @@ -84,6 +84,9 @@
67596 int sysctl_tcp_tw_reuse __read_mostly;
67597 int sysctl_tcp_low_latency __read_mostly;
67598
67599 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67600 +extern int grsec_enable_blackhole;
67601 +#endif
67602
67603 #ifdef CONFIG_TCP_MD5SIG
67604 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
67605 @@ -1542,6 +1545,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
67606 return 0;
67607
67608 reset:
67609 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67610 + if (!grsec_enable_blackhole)
67611 +#endif
67612 tcp_v4_send_reset(rsk, skb);
67613 discard:
67614 kfree_skb(skb);
67615 @@ -1603,12 +1609,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
67616 TCP_SKB_CB(skb)->sacked = 0;
67617
67618 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
67619 - if (!sk)
67620 + if (!sk) {
67621 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67622 + ret = 1;
67623 +#endif
67624 goto no_tcp_socket;
67625 + }
67626
67627 process:
67628 - if (sk->sk_state == TCP_TIME_WAIT)
67629 + if (sk->sk_state == TCP_TIME_WAIT) {
67630 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67631 + ret = 2;
67632 +#endif
67633 goto do_time_wait;
67634 + }
67635
67636 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
67637 goto discard_and_relse;
67638 @@ -1650,6 +1664,10 @@ no_tcp_socket:
67639 bad_packet:
67640 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
67641 } else {
67642 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67643 + if (!grsec_enable_blackhole || (ret == 1 &&
67644 + (skb->dev->flags & IFF_LOOPBACK)))
67645 +#endif
67646 tcp_v4_send_reset(NULL, skb);
67647 }
67648
67649 @@ -2237,7 +2255,11 @@ static void get_openreq4(struct sock *sk
67650 0, /* non standard timer */
67651 0, /* open_requests have no inode */
67652 atomic_read(&sk->sk_refcnt),
67653 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67654 + NULL,
67655 +#else
67656 req,
67657 +#endif
67658 len);
67659 }
67660
67661 @@ -2279,7 +2301,12 @@ static void get_tcp4_sock(struct sock *s
67662 sock_i_uid(sk),
67663 icsk->icsk_probes_out,
67664 sock_i_ino(sk),
67665 - atomic_read(&sk->sk_refcnt), sk,
67666 + atomic_read(&sk->sk_refcnt),
67667 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67668 + NULL,
67669 +#else
67670 + sk,
67671 +#endif
67672 jiffies_to_clock_t(icsk->icsk_rto),
67673 jiffies_to_clock_t(icsk->icsk_ack.ato),
67674 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
67675 @@ -2307,7 +2334,13 @@ static void get_timewait4_sock(struct in
67676 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
67677 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
67678 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
67679 - atomic_read(&tw->tw_refcnt), tw, len);
67680 + atomic_read(&tw->tw_refcnt),
67681 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67682 + NULL,
67683 +#else
67684 + tw,
67685 +#endif
67686 + len);
67687 }
67688
67689 #define TMPSZ 150
67690 diff -urNp linux-2.6.32.41/net/ipv4/tcp_minisocks.c linux-2.6.32.41/net/ipv4/tcp_minisocks.c
67691 --- linux-2.6.32.41/net/ipv4/tcp_minisocks.c 2011-03-27 14:31:47.000000000 -0400
67692 +++ linux-2.6.32.41/net/ipv4/tcp_minisocks.c 2011-04-17 15:56:46.000000000 -0400
67693 @@ -26,6 +26,10 @@
67694 #include <net/inet_common.h>
67695 #include <net/xfrm.h>
67696
67697 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67698 +extern int grsec_enable_blackhole;
67699 +#endif
67700 +
67701 #ifdef CONFIG_SYSCTL
67702 #define SYNC_INIT 0 /* let the user enable it */
67703 #else
67704 @@ -672,6 +676,10 @@ listen_overflow:
67705
67706 embryonic_reset:
67707 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
67708 +
67709 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67710 + if (!grsec_enable_blackhole)
67711 +#endif
67712 if (!(flg & TCP_FLAG_RST))
67713 req->rsk_ops->send_reset(sk, skb);
67714
67715 diff -urNp linux-2.6.32.41/net/ipv4/tcp_output.c linux-2.6.32.41/net/ipv4/tcp_output.c
67716 --- linux-2.6.32.41/net/ipv4/tcp_output.c 2011-03-27 14:31:47.000000000 -0400
67717 +++ linux-2.6.32.41/net/ipv4/tcp_output.c 2011-05-16 21:46:57.000000000 -0400
67718 @@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct s
67719 __u8 *md5_hash_location;
67720 int mss;
67721
67722 + pax_track_stack();
67723 +
67724 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
67725 if (skb == NULL)
67726 return NULL;
67727 diff -urNp linux-2.6.32.41/net/ipv4/tcp_probe.c linux-2.6.32.41/net/ipv4/tcp_probe.c
67728 --- linux-2.6.32.41/net/ipv4/tcp_probe.c 2011-03-27 14:31:47.000000000 -0400
67729 +++ linux-2.6.32.41/net/ipv4/tcp_probe.c 2011-04-17 15:56:46.000000000 -0400
67730 @@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file
67731 if (cnt + width >= len)
67732 break;
67733
67734 - if (copy_to_user(buf + cnt, tbuf, width))
67735 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
67736 return -EFAULT;
67737 cnt += width;
67738 }
67739 diff -urNp linux-2.6.32.41/net/ipv4/tcp_timer.c linux-2.6.32.41/net/ipv4/tcp_timer.c
67740 --- linux-2.6.32.41/net/ipv4/tcp_timer.c 2011-03-27 14:31:47.000000000 -0400
67741 +++ linux-2.6.32.41/net/ipv4/tcp_timer.c 2011-04-17 15:56:46.000000000 -0400
67742 @@ -21,6 +21,10 @@
67743 #include <linux/module.h>
67744 #include <net/tcp.h>
67745
67746 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67747 +extern int grsec_lastack_retries;
67748 +#endif
67749 +
67750 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
67751 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
67752 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
67753 @@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock
67754 }
67755 }
67756
67757 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67758 + if ((sk->sk_state == TCP_LAST_ACK) &&
67759 + (grsec_lastack_retries > 0) &&
67760 + (grsec_lastack_retries < retry_until))
67761 + retry_until = grsec_lastack_retries;
67762 +#endif
67763 +
67764 if (retransmits_timed_out(sk, retry_until)) {
67765 /* Has it gone just too far? */
67766 tcp_write_err(sk);
67767 diff -urNp linux-2.6.32.41/net/ipv4/udp.c linux-2.6.32.41/net/ipv4/udp.c
67768 --- linux-2.6.32.41/net/ipv4/udp.c 2011-03-27 14:31:47.000000000 -0400
67769 +++ linux-2.6.32.41/net/ipv4/udp.c 2011-05-04 17:57:28.000000000 -0400
67770 @@ -86,6 +86,7 @@
67771 #include <linux/types.h>
67772 #include <linux/fcntl.h>
67773 #include <linux/module.h>
67774 +#include <linux/security.h>
67775 #include <linux/socket.h>
67776 #include <linux/sockios.h>
67777 #include <linux/igmp.h>
67778 @@ -106,6 +107,10 @@
67779 #include <net/xfrm.h>
67780 #include "udp_impl.h"
67781
67782 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67783 +extern int grsec_enable_blackhole;
67784 +#endif
67785 +
67786 struct udp_table udp_table;
67787 EXPORT_SYMBOL(udp_table);
67788
67789 @@ -371,6 +376,9 @@ found:
67790 return s;
67791 }
67792
67793 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
67794 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
67795 +
67796 /*
67797 * This routine is called by the ICMP module when it gets some
67798 * sort of error condition. If err < 0 then the socket should
67799 @@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
67800 dport = usin->sin_port;
67801 if (dport == 0)
67802 return -EINVAL;
67803 +
67804 + err = gr_search_udp_sendmsg(sk, usin);
67805 + if (err)
67806 + return err;
67807 } else {
67808 if (sk->sk_state != TCP_ESTABLISHED)
67809 return -EDESTADDRREQ;
67810 +
67811 + err = gr_search_udp_sendmsg(sk, NULL);
67812 + if (err)
67813 + return err;
67814 +
67815 daddr = inet->daddr;
67816 dport = inet->dport;
67817 /* Open fast path for connected socket.
67818 @@ -945,6 +962,10 @@ try_again:
67819 if (!skb)
67820 goto out;
67821
67822 + err = gr_search_udp_recvmsg(sk, skb);
67823 + if (err)
67824 + goto out_free;
67825 +
67826 ulen = skb->len - sizeof(struct udphdr);
67827 copied = len;
67828 if (copied > ulen)
67829 @@ -1065,7 +1086,7 @@ static int __udp_queue_rcv_skb(struct so
67830 if (rc == -ENOMEM) {
67831 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
67832 is_udplite);
67833 - atomic_inc(&sk->sk_drops);
67834 + atomic_inc_unchecked(&sk->sk_drops);
67835 }
67836 goto drop;
67837 }
67838 @@ -1335,6 +1356,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
67839 goto csum_error;
67840
67841 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
67842 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67843 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
67844 +#endif
67845 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
67846
67847 /*
67848 @@ -1755,8 +1779,13 @@ static void udp4_format_sock(struct sock
67849 sk_wmem_alloc_get(sp),
67850 sk_rmem_alloc_get(sp),
67851 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
67852 - atomic_read(&sp->sk_refcnt), sp,
67853 - atomic_read(&sp->sk_drops), len);
67854 + atomic_read(&sp->sk_refcnt),
67855 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67856 + NULL,
67857 +#else
67858 + sp,
67859 +#endif
67860 + atomic_read_unchecked(&sp->sk_drops), len);
67861 }
67862
67863 int udp4_seq_show(struct seq_file *seq, void *v)
67864 diff -urNp linux-2.6.32.41/net/ipv6/inet6_connection_sock.c linux-2.6.32.41/net/ipv6/inet6_connection_sock.c
67865 --- linux-2.6.32.41/net/ipv6/inet6_connection_sock.c 2011-03-27 14:31:47.000000000 -0400
67866 +++ linux-2.6.32.41/net/ipv6/inet6_connection_sock.c 2011-05-04 17:56:28.000000000 -0400
67867 @@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *
67868 #ifdef CONFIG_XFRM
67869 {
67870 struct rt6_info *rt = (struct rt6_info *)dst;
67871 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
67872 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
67873 }
67874 #endif
67875 }
67876 @@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(
67877 #ifdef CONFIG_XFRM
67878 if (dst) {
67879 struct rt6_info *rt = (struct rt6_info *)dst;
67880 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
67881 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
67882 sk->sk_dst_cache = NULL;
67883 dst_release(dst);
67884 dst = NULL;
67885 diff -urNp linux-2.6.32.41/net/ipv6/inet6_hashtables.c linux-2.6.32.41/net/ipv6/inet6_hashtables.c
67886 --- linux-2.6.32.41/net/ipv6/inet6_hashtables.c 2011-03-27 14:31:47.000000000 -0400
67887 +++ linux-2.6.32.41/net/ipv6/inet6_hashtables.c 2011-05-04 17:56:28.000000000 -0400
67888 @@ -118,7 +118,7 @@ out:
67889 }
67890 EXPORT_SYMBOL(__inet6_lookup_established);
67891
67892 -static int inline compute_score(struct sock *sk, struct net *net,
67893 +static inline int compute_score(struct sock *sk, struct net *net,
67894 const unsigned short hnum,
67895 const struct in6_addr *daddr,
67896 const int dif)
67897 diff -urNp linux-2.6.32.41/net/ipv6/ipv6_sockglue.c linux-2.6.32.41/net/ipv6/ipv6_sockglue.c
67898 --- linux-2.6.32.41/net/ipv6/ipv6_sockglue.c 2011-03-27 14:31:47.000000000 -0400
67899 +++ linux-2.6.32.41/net/ipv6/ipv6_sockglue.c 2011-05-16 21:46:57.000000000 -0400
67900 @@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct soc
67901 int val, valbool;
67902 int retv = -ENOPROTOOPT;
67903
67904 + pax_track_stack();
67905 +
67906 if (optval == NULL)
67907 val=0;
67908 else {
67909 @@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct soc
67910 int len;
67911 int val;
67912
67913 + pax_track_stack();
67914 +
67915 if (ip6_mroute_opt(optname))
67916 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
67917
67918 diff -urNp linux-2.6.32.41/net/ipv6/netfilter/ip6_tables.c linux-2.6.32.41/net/ipv6/netfilter/ip6_tables.c
67919 --- linux-2.6.32.41/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:00:52.000000000 -0400
67920 +++ linux-2.6.32.41/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:04:18.000000000 -0400
67921 @@ -1173,6 +1173,7 @@ static int get_info(struct net *net, voi
67922 private = &tmp;
67923 }
67924 #endif
67925 + memset(&info, 0, sizeof(info));
67926 info.valid_hooks = t->valid_hooks;
67927 memcpy(info.hook_entry, private->hook_entry,
67928 sizeof(info.hook_entry));
67929 diff -urNp linux-2.6.32.41/net/ipv6/raw.c linux-2.6.32.41/net/ipv6/raw.c
67930 --- linux-2.6.32.41/net/ipv6/raw.c 2011-03-27 14:31:47.000000000 -0400
67931 +++ linux-2.6.32.41/net/ipv6/raw.c 2011-05-16 21:46:57.000000000 -0400
67932 @@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct s
67933 {
67934 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
67935 skb_checksum_complete(skb)) {
67936 - atomic_inc(&sk->sk_drops);
67937 + atomic_inc_unchecked(&sk->sk_drops);
67938 kfree_skb(skb);
67939 return NET_RX_DROP;
67940 }
67941
67942 /* Charge it to the socket. */
67943 if (sock_queue_rcv_skb(sk,skb)<0) {
67944 - atomic_inc(&sk->sk_drops);
67945 + atomic_inc_unchecked(&sk->sk_drops);
67946 kfree_skb(skb);
67947 return NET_RX_DROP;
67948 }
67949 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
67950 struct raw6_sock *rp = raw6_sk(sk);
67951
67952 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
67953 - atomic_inc(&sk->sk_drops);
67954 + atomic_inc_unchecked(&sk->sk_drops);
67955 kfree_skb(skb);
67956 return NET_RX_DROP;
67957 }
67958 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
67959
67960 if (inet->hdrincl) {
67961 if (skb_checksum_complete(skb)) {
67962 - atomic_inc(&sk->sk_drops);
67963 + atomic_inc_unchecked(&sk->sk_drops);
67964 kfree_skb(skb);
67965 return NET_RX_DROP;
67966 }
67967 @@ -518,7 +518,7 @@ csum_copy_err:
67968 as some normal condition.
67969 */
67970 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
67971 - atomic_inc(&sk->sk_drops);
67972 + atomic_inc_unchecked(&sk->sk_drops);
67973 goto out;
67974 }
67975
67976 @@ -600,7 +600,7 @@ out:
67977 return err;
67978 }
67979
67980 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
67981 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
67982 struct flowi *fl, struct rt6_info *rt,
67983 unsigned int flags)
67984 {
67985 @@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *i
67986 u16 proto;
67987 int err;
67988
67989 + pax_track_stack();
67990 +
67991 /* Rough check on arithmetic overflow,
67992 better check is made in ip6_append_data().
67993 */
67994 @@ -916,12 +918,17 @@ do_confirm:
67995 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
67996 char __user *optval, int optlen)
67997 {
67998 + struct icmp6_filter filter;
67999 +
68000 switch (optname) {
68001 case ICMPV6_FILTER:
68002 + if (optlen < 0)
68003 + return -EINVAL;
68004 if (optlen > sizeof(struct icmp6_filter))
68005 optlen = sizeof(struct icmp6_filter);
68006 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
68007 + if (copy_from_user(&filter, optval, optlen))
68008 return -EFAULT;
68009 + memcpy(&raw6_sk(sk)->filter, &filter, optlen);
68010 return 0;
68011 default:
68012 return -ENOPROTOOPT;
68013 @@ -933,6 +940,7 @@ static int rawv6_seticmpfilter(struct so
68014 static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
68015 char __user *optval, int __user *optlen)
68016 {
68017 + struct icmp6_filter filter;
68018 int len;
68019
68020 switch (optname) {
68021 @@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct so
68022 len = sizeof(struct icmp6_filter);
68023 if (put_user(len, optlen))
68024 return -EFAULT;
68025 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
68026 + memcpy(&filter, &raw6_sk(sk)->filter, len);
68027 + if (copy_to_user(optval, &filter, len))
68028 return -EFAULT;
68029 return 0;
68030 default:
68031 @@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct se
68032 0, 0L, 0,
68033 sock_i_uid(sp), 0,
68034 sock_i_ino(sp),
68035 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
68036 + atomic_read(&sp->sk_refcnt),
68037 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68038 + NULL,
68039 +#else
68040 + sp,
68041 +#endif
68042 + atomic_read_unchecked(&sp->sk_drops));
68043 }
68044
68045 static int raw6_seq_show(struct seq_file *seq, void *v)
68046 diff -urNp linux-2.6.32.41/net/ipv6/tcp_ipv6.c linux-2.6.32.41/net/ipv6/tcp_ipv6.c
68047 --- linux-2.6.32.41/net/ipv6/tcp_ipv6.c 2011-03-27 14:31:47.000000000 -0400
68048 +++ linux-2.6.32.41/net/ipv6/tcp_ipv6.c 2011-04-17 15:56:46.000000000 -0400
68049 @@ -88,6 +88,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
68050 }
68051 #endif
68052
68053 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68054 +extern int grsec_enable_blackhole;
68055 +#endif
68056 +
68057 static void tcp_v6_hash(struct sock *sk)
68058 {
68059 if (sk->sk_state != TCP_CLOSE) {
68060 @@ -1578,6 +1582,9 @@ static int tcp_v6_do_rcv(struct sock *sk
68061 return 0;
68062
68063 reset:
68064 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68065 + if (!grsec_enable_blackhole)
68066 +#endif
68067 tcp_v6_send_reset(sk, skb);
68068 discard:
68069 if (opt_skb)
68070 @@ -1655,12 +1662,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
68071 TCP_SKB_CB(skb)->sacked = 0;
68072
68073 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
68074 - if (!sk)
68075 + if (!sk) {
68076 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68077 + ret = 1;
68078 +#endif
68079 goto no_tcp_socket;
68080 + }
68081
68082 process:
68083 - if (sk->sk_state == TCP_TIME_WAIT)
68084 + if (sk->sk_state == TCP_TIME_WAIT) {
68085 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68086 + ret = 2;
68087 +#endif
68088 goto do_time_wait;
68089 + }
68090
68091 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
68092 goto discard_and_relse;
68093 @@ -1700,6 +1715,10 @@ no_tcp_socket:
68094 bad_packet:
68095 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
68096 } else {
68097 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68098 + if (!grsec_enable_blackhole || (ret == 1 &&
68099 + (skb->dev->flags & IFF_LOOPBACK)))
68100 +#endif
68101 tcp_v6_send_reset(NULL, skb);
68102 }
68103
68104 @@ -1915,7 +1934,13 @@ static void get_openreq6(struct seq_file
68105 uid,
68106 0, /* non standard timer */
68107 0, /* open_requests have no inode */
68108 - 0, req);
68109 + 0,
68110 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68111 + NULL
68112 +#else
68113 + req
68114 +#endif
68115 + );
68116 }
68117
68118 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
68119 @@ -1965,7 +1990,12 @@ static void get_tcp6_sock(struct seq_fil
68120 sock_i_uid(sp),
68121 icsk->icsk_probes_out,
68122 sock_i_ino(sp),
68123 - atomic_read(&sp->sk_refcnt), sp,
68124 + atomic_read(&sp->sk_refcnt),
68125 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68126 + NULL,
68127 +#else
68128 + sp,
68129 +#endif
68130 jiffies_to_clock_t(icsk->icsk_rto),
68131 jiffies_to_clock_t(icsk->icsk_ack.ato),
68132 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
68133 @@ -2000,7 +2030,13 @@ static void get_timewait6_sock(struct se
68134 dest->s6_addr32[2], dest->s6_addr32[3], destp,
68135 tw->tw_substate, 0, 0,
68136 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
68137 - atomic_read(&tw->tw_refcnt), tw);
68138 + atomic_read(&tw->tw_refcnt),
68139 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68140 + NULL
68141 +#else
68142 + tw
68143 +#endif
68144 + );
68145 }
68146
68147 static int tcp6_seq_show(struct seq_file *seq, void *v)
68148 diff -urNp linux-2.6.32.41/net/ipv6/udp.c linux-2.6.32.41/net/ipv6/udp.c
68149 --- linux-2.6.32.41/net/ipv6/udp.c 2011-03-27 14:31:47.000000000 -0400
68150 +++ linux-2.6.32.41/net/ipv6/udp.c 2011-05-04 17:58:16.000000000 -0400
68151 @@ -49,6 +49,10 @@
68152 #include <linux/seq_file.h>
68153 #include "udp_impl.h"
68154
68155 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68156 +extern int grsec_enable_blackhole;
68157 +#endif
68158 +
68159 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
68160 {
68161 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
68162 @@ -388,7 +392,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
68163 if (rc == -ENOMEM) {
68164 UDP6_INC_STATS_BH(sock_net(sk),
68165 UDP_MIB_RCVBUFERRORS, is_udplite);
68166 - atomic_inc(&sk->sk_drops);
68167 + atomic_inc_unchecked(&sk->sk_drops);
68168 }
68169 goto drop;
68170 }
68171 @@ -587,6 +591,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
68172 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
68173 proto == IPPROTO_UDPLITE);
68174
68175 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68176 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
68177 +#endif
68178 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
68179
68180 kfree_skb(skb);
68181 @@ -1206,8 +1213,13 @@ static void udp6_sock_seq_show(struct se
68182 0, 0L, 0,
68183 sock_i_uid(sp), 0,
68184 sock_i_ino(sp),
68185 - atomic_read(&sp->sk_refcnt), sp,
68186 - atomic_read(&sp->sk_drops));
68187 + atomic_read(&sp->sk_refcnt),
68188 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68189 + NULL,
68190 +#else
68191 + sp,
68192 +#endif
68193 + atomic_read_unchecked(&sp->sk_drops));
68194 }
68195
68196 int udp6_seq_show(struct seq_file *seq, void *v)
68197 diff -urNp linux-2.6.32.41/net/irda/ircomm/ircomm_tty.c linux-2.6.32.41/net/irda/ircomm/ircomm_tty.c
68198 --- linux-2.6.32.41/net/irda/ircomm/ircomm_tty.c 2011-03-27 14:31:47.000000000 -0400
68199 +++ linux-2.6.32.41/net/irda/ircomm/ircomm_tty.c 2011-04-17 15:56:46.000000000 -0400
68200 @@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(st
68201 add_wait_queue(&self->open_wait, &wait);
68202
68203 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
68204 - __FILE__,__LINE__, tty->driver->name, self->open_count );
68205 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
68206
68207 /* As far as I can see, we protect open_count - Jean II */
68208 spin_lock_irqsave(&self->spinlock, flags);
68209 if (!tty_hung_up_p(filp)) {
68210 extra_count = 1;
68211 - self->open_count--;
68212 + local_dec(&self->open_count);
68213 }
68214 spin_unlock_irqrestore(&self->spinlock, flags);
68215 - self->blocked_open++;
68216 + local_inc(&self->blocked_open);
68217
68218 while (1) {
68219 if (tty->termios->c_cflag & CBAUD) {
68220 @@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(st
68221 }
68222
68223 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
68224 - __FILE__,__LINE__, tty->driver->name, self->open_count );
68225 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
68226
68227 schedule();
68228 }
68229 @@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(st
68230 if (extra_count) {
68231 /* ++ is not atomic, so this should be protected - Jean II */
68232 spin_lock_irqsave(&self->spinlock, flags);
68233 - self->open_count++;
68234 + local_inc(&self->open_count);
68235 spin_unlock_irqrestore(&self->spinlock, flags);
68236 }
68237 - self->blocked_open--;
68238 + local_dec(&self->blocked_open);
68239
68240 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
68241 - __FILE__,__LINE__, tty->driver->name, self->open_count);
68242 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
68243
68244 if (!retval)
68245 self->flags |= ASYNC_NORMAL_ACTIVE;
68246 @@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_st
68247 }
68248 /* ++ is not atomic, so this should be protected - Jean II */
68249 spin_lock_irqsave(&self->spinlock, flags);
68250 - self->open_count++;
68251 + local_inc(&self->open_count);
68252
68253 tty->driver_data = self;
68254 self->tty = tty;
68255 spin_unlock_irqrestore(&self->spinlock, flags);
68256
68257 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
68258 - self->line, self->open_count);
68259 + self->line, local_read(&self->open_count));
68260
68261 /* Not really used by us, but lets do it anyway */
68262 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
68263 @@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_
68264 return;
68265 }
68266
68267 - if ((tty->count == 1) && (self->open_count != 1)) {
68268 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
68269 /*
68270 * Uh, oh. tty->count is 1, which means that the tty
68271 * structure will be freed. state->count should always
68272 @@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_
68273 */
68274 IRDA_DEBUG(0, "%s(), bad serial port count; "
68275 "tty->count is 1, state->count is %d\n", __func__ ,
68276 - self->open_count);
68277 - self->open_count = 1;
68278 + local_read(&self->open_count));
68279 + local_set(&self->open_count, 1);
68280 }
68281
68282 - if (--self->open_count < 0) {
68283 + if (local_dec_return(&self->open_count) < 0) {
68284 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
68285 - __func__, self->line, self->open_count);
68286 - self->open_count = 0;
68287 + __func__, self->line, local_read(&self->open_count));
68288 + local_set(&self->open_count, 0);
68289 }
68290 - if (self->open_count) {
68291 + if (local_read(&self->open_count)) {
68292 spin_unlock_irqrestore(&self->spinlock, flags);
68293
68294 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
68295 @@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_
68296 tty->closing = 0;
68297 self->tty = NULL;
68298
68299 - if (self->blocked_open) {
68300 + if (local_read(&self->blocked_open)) {
68301 if (self->close_delay)
68302 schedule_timeout_interruptible(self->close_delay);
68303 wake_up_interruptible(&self->open_wait);
68304 @@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty
68305 spin_lock_irqsave(&self->spinlock, flags);
68306 self->flags &= ~ASYNC_NORMAL_ACTIVE;
68307 self->tty = NULL;
68308 - self->open_count = 0;
68309 + local_set(&self->open_count, 0);
68310 spin_unlock_irqrestore(&self->spinlock, flags);
68311
68312 wake_up_interruptible(&self->open_wait);
68313 @@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct
68314 seq_putc(m, '\n');
68315
68316 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
68317 - seq_printf(m, "Open count: %d\n", self->open_count);
68318 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
68319 seq_printf(m, "Max data size: %d\n", self->max_data_size);
68320 seq_printf(m, "Max header size: %d\n", self->max_header_size);
68321
68322 diff -urNp linux-2.6.32.41/net/iucv/af_iucv.c linux-2.6.32.41/net/iucv/af_iucv.c
68323 --- linux-2.6.32.41/net/iucv/af_iucv.c 2011-03-27 14:31:47.000000000 -0400
68324 +++ linux-2.6.32.41/net/iucv/af_iucv.c 2011-05-04 17:56:28.000000000 -0400
68325 @@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct soc
68326
68327 write_lock_bh(&iucv_sk_list.lock);
68328
68329 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
68330 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
68331 while (__iucv_get_sock_by_name(name)) {
68332 sprintf(name, "%08x",
68333 - atomic_inc_return(&iucv_sk_list.autobind_name));
68334 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
68335 }
68336
68337 write_unlock_bh(&iucv_sk_list.lock);
68338 diff -urNp linux-2.6.32.41/net/key/af_key.c linux-2.6.32.41/net/key/af_key.c
68339 --- linux-2.6.32.41/net/key/af_key.c 2011-03-27 14:31:47.000000000 -0400
68340 +++ linux-2.6.32.41/net/key/af_key.c 2011-05-16 21:46:57.000000000 -0400
68341 @@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk
68342 struct xfrm_migrate m[XFRM_MAX_DEPTH];
68343 struct xfrm_kmaddress k;
68344
68345 + pax_track_stack();
68346 +
68347 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
68348 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
68349 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
68350 @@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_fil
68351 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
68352 else
68353 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
68354 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68355 + NULL,
68356 +#else
68357 s,
68358 +#endif
68359 atomic_read(&s->sk_refcnt),
68360 sk_rmem_alloc_get(s),
68361 sk_wmem_alloc_get(s),
68362 diff -urNp linux-2.6.32.41/net/mac80211/cfg.c linux-2.6.32.41/net/mac80211/cfg.c
68363 --- linux-2.6.32.41/net/mac80211/cfg.c 2011-03-27 14:31:47.000000000 -0400
68364 +++ linux-2.6.32.41/net/mac80211/cfg.c 2011-04-17 15:56:46.000000000 -0400
68365 @@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(st
68366 return err;
68367 }
68368
68369 -struct cfg80211_ops mac80211_config_ops = {
68370 +const struct cfg80211_ops mac80211_config_ops = {
68371 .add_virtual_intf = ieee80211_add_iface,
68372 .del_virtual_intf = ieee80211_del_iface,
68373 .change_virtual_intf = ieee80211_change_iface,
68374 diff -urNp linux-2.6.32.41/net/mac80211/cfg.h linux-2.6.32.41/net/mac80211/cfg.h
68375 --- linux-2.6.32.41/net/mac80211/cfg.h 2011-03-27 14:31:47.000000000 -0400
68376 +++ linux-2.6.32.41/net/mac80211/cfg.h 2011-04-17 15:56:46.000000000 -0400
68377 @@ -4,6 +4,6 @@
68378 #ifndef __CFG_H
68379 #define __CFG_H
68380
68381 -extern struct cfg80211_ops mac80211_config_ops;
68382 +extern const struct cfg80211_ops mac80211_config_ops;
68383
68384 #endif /* __CFG_H */
68385 diff -urNp linux-2.6.32.41/net/mac80211/debugfs_key.c linux-2.6.32.41/net/mac80211/debugfs_key.c
68386 --- linux-2.6.32.41/net/mac80211/debugfs_key.c 2011-03-27 14:31:47.000000000 -0400
68387 +++ linux-2.6.32.41/net/mac80211/debugfs_key.c 2011-04-17 15:56:46.000000000 -0400
68388 @@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file
68389 size_t count, loff_t *ppos)
68390 {
68391 struct ieee80211_key *key = file->private_data;
68392 - int i, res, bufsize = 2 * key->conf.keylen + 2;
68393 + int i, bufsize = 2 * key->conf.keylen + 2;
68394 char *buf = kmalloc(bufsize, GFP_KERNEL);
68395 char *p = buf;
68396 + ssize_t res;
68397 +
68398 + if (buf == NULL)
68399 + return -ENOMEM;
68400
68401 for (i = 0; i < key->conf.keylen; i++)
68402 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
68403 diff -urNp linux-2.6.32.41/net/mac80211/debugfs_sta.c linux-2.6.32.41/net/mac80211/debugfs_sta.c
68404 --- linux-2.6.32.41/net/mac80211/debugfs_sta.c 2011-03-27 14:31:47.000000000 -0400
68405 +++ linux-2.6.32.41/net/mac80211/debugfs_sta.c 2011-05-16 21:46:57.000000000 -0400
68406 @@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struc
68407 int i;
68408 struct sta_info *sta = file->private_data;
68409
68410 + pax_track_stack();
68411 +
68412 spin_lock_bh(&sta->lock);
68413 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
68414 sta->ampdu_mlme.dialog_token_allocator + 1);
68415 diff -urNp linux-2.6.32.41/net/mac80211/ieee80211_i.h linux-2.6.32.41/net/mac80211/ieee80211_i.h
68416 --- linux-2.6.32.41/net/mac80211/ieee80211_i.h 2011-03-27 14:31:47.000000000 -0400
68417 +++ linux-2.6.32.41/net/mac80211/ieee80211_i.h 2011-04-17 15:56:46.000000000 -0400
68418 @@ -25,6 +25,7 @@
68419 #include <linux/etherdevice.h>
68420 #include <net/cfg80211.h>
68421 #include <net/mac80211.h>
68422 +#include <asm/local.h>
68423 #include "key.h"
68424 #include "sta_info.h"
68425
68426 @@ -635,7 +636,7 @@ struct ieee80211_local {
68427 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
68428 spinlock_t queue_stop_reason_lock;
68429
68430 - int open_count;
68431 + local_t open_count;
68432 int monitors, cooked_mntrs;
68433 /* number of interfaces with corresponding FIF_ flags */
68434 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
68435 diff -urNp linux-2.6.32.41/net/mac80211/iface.c linux-2.6.32.41/net/mac80211/iface.c
68436 --- linux-2.6.32.41/net/mac80211/iface.c 2011-03-27 14:31:47.000000000 -0400
68437 +++ linux-2.6.32.41/net/mac80211/iface.c 2011-04-17 15:56:46.000000000 -0400
68438 @@ -166,7 +166,7 @@ static int ieee80211_open(struct net_dev
68439 break;
68440 }
68441
68442 - if (local->open_count == 0) {
68443 + if (local_read(&local->open_count) == 0) {
68444 res = drv_start(local);
68445 if (res)
68446 goto err_del_bss;
68447 @@ -196,7 +196,7 @@ static int ieee80211_open(struct net_dev
68448 * Validate the MAC address for this device.
68449 */
68450 if (!is_valid_ether_addr(dev->dev_addr)) {
68451 - if (!local->open_count)
68452 + if (!local_read(&local->open_count))
68453 drv_stop(local);
68454 return -EADDRNOTAVAIL;
68455 }
68456 @@ -292,7 +292,7 @@ static int ieee80211_open(struct net_dev
68457
68458 hw_reconf_flags |= __ieee80211_recalc_idle(local);
68459
68460 - local->open_count++;
68461 + local_inc(&local->open_count);
68462 if (hw_reconf_flags) {
68463 ieee80211_hw_config(local, hw_reconf_flags);
68464 /*
68465 @@ -320,7 +320,7 @@ static int ieee80211_open(struct net_dev
68466 err_del_interface:
68467 drv_remove_interface(local, &conf);
68468 err_stop:
68469 - if (!local->open_count)
68470 + if (!local_read(&local->open_count))
68471 drv_stop(local);
68472 err_del_bss:
68473 sdata->bss = NULL;
68474 @@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_dev
68475 WARN_ON(!list_empty(&sdata->u.ap.vlans));
68476 }
68477
68478 - local->open_count--;
68479 + local_dec(&local->open_count);
68480
68481 switch (sdata->vif.type) {
68482 case NL80211_IFTYPE_AP_VLAN:
68483 @@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_dev
68484
68485 ieee80211_recalc_ps(local, -1);
68486
68487 - if (local->open_count == 0) {
68488 + if (local_read(&local->open_count) == 0) {
68489 ieee80211_clear_tx_pending(local);
68490 ieee80211_stop_device(local);
68491
68492 diff -urNp linux-2.6.32.41/net/mac80211/main.c linux-2.6.32.41/net/mac80211/main.c
68493 --- linux-2.6.32.41/net/mac80211/main.c 2011-05-10 22:12:02.000000000 -0400
68494 +++ linux-2.6.32.41/net/mac80211/main.c 2011-05-10 22:12:34.000000000 -0400
68495 @@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211
68496 local->hw.conf.power_level = power;
68497 }
68498
68499 - if (changed && local->open_count) {
68500 + if (changed && local_read(&local->open_count)) {
68501 ret = drv_config(local, changed);
68502 /*
68503 * Goal:
68504 diff -urNp linux-2.6.32.41/net/mac80211/mlme.c linux-2.6.32.41/net/mac80211/mlme.c
68505 --- linux-2.6.32.41/net/mac80211/mlme.c 2011-03-27 14:31:47.000000000 -0400
68506 +++ linux-2.6.32.41/net/mac80211/mlme.c 2011-05-16 21:46:57.000000000 -0400
68507 @@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee
68508 bool have_higher_than_11mbit = false, newsta = false;
68509 u16 ap_ht_cap_flags;
68510
68511 + pax_track_stack();
68512 +
68513 /*
68514 * AssocResp and ReassocResp have identical structure, so process both
68515 * of them in this function.
68516 diff -urNp linux-2.6.32.41/net/mac80211/pm.c linux-2.6.32.41/net/mac80211/pm.c
68517 --- linux-2.6.32.41/net/mac80211/pm.c 2011-03-27 14:31:47.000000000 -0400
68518 +++ linux-2.6.32.41/net/mac80211/pm.c 2011-04-17 15:56:46.000000000 -0400
68519 @@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211
68520 }
68521
68522 /* stop hardware - this must stop RX */
68523 - if (local->open_count)
68524 + if (local_read(&local->open_count))
68525 ieee80211_stop_device(local);
68526
68527 local->suspended = true;
68528 diff -urNp linux-2.6.32.41/net/mac80211/rate.c linux-2.6.32.41/net/mac80211/rate.c
68529 --- linux-2.6.32.41/net/mac80211/rate.c 2011-03-27 14:31:47.000000000 -0400
68530 +++ linux-2.6.32.41/net/mac80211/rate.c 2011-04-17 15:56:46.000000000 -0400
68531 @@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct
68532 struct rate_control_ref *ref, *old;
68533
68534 ASSERT_RTNL();
68535 - if (local->open_count)
68536 + if (local_read(&local->open_count))
68537 return -EBUSY;
68538
68539 ref = rate_control_alloc(name, local);
68540 diff -urNp linux-2.6.32.41/net/mac80211/tx.c linux-2.6.32.41/net/mac80211/tx.c
68541 --- linux-2.6.32.41/net/mac80211/tx.c 2011-03-27 14:31:47.000000000 -0400
68542 +++ linux-2.6.32.41/net/mac80211/tx.c 2011-04-17 15:56:46.000000000 -0400
68543 @@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct
68544 return cpu_to_le16(dur);
68545 }
68546
68547 -static int inline is_ieee80211_device(struct ieee80211_local *local,
68548 +static inline int is_ieee80211_device(struct ieee80211_local *local,
68549 struct net_device *dev)
68550 {
68551 return local == wdev_priv(dev->ieee80211_ptr);
68552 diff -urNp linux-2.6.32.41/net/mac80211/util.c linux-2.6.32.41/net/mac80211/util.c
68553 --- linux-2.6.32.41/net/mac80211/util.c 2011-03-27 14:31:47.000000000 -0400
68554 +++ linux-2.6.32.41/net/mac80211/util.c 2011-04-17 15:56:46.000000000 -0400
68555 @@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_
68556 local->resuming = true;
68557
68558 /* restart hardware */
68559 - if (local->open_count) {
68560 + if (local_read(&local->open_count)) {
68561 /*
68562 * Upon resume hardware can sometimes be goofy due to
68563 * various platform / driver / bus issues, so restarting
68564 diff -urNp linux-2.6.32.41/net/netfilter/ipvs/ip_vs_app.c linux-2.6.32.41/net/netfilter/ipvs/ip_vs_app.c
68565 --- linux-2.6.32.41/net/netfilter/ipvs/ip_vs_app.c 2011-03-27 14:31:47.000000000 -0400
68566 +++ linux-2.6.32.41/net/netfilter/ipvs/ip_vs_app.c 2011-05-17 19:26:34.000000000 -0400
68567 @@ -564,7 +564,7 @@ static const struct file_operations ip_v
68568 .open = ip_vs_app_open,
68569 .read = seq_read,
68570 .llseek = seq_lseek,
68571 - .release = seq_release,
68572 + .release = seq_release_net,
68573 };
68574 #endif
68575
68576 diff -urNp linux-2.6.32.41/net/netfilter/ipvs/ip_vs_conn.c linux-2.6.32.41/net/netfilter/ipvs/ip_vs_conn.c
68577 --- linux-2.6.32.41/net/netfilter/ipvs/ip_vs_conn.c 2011-03-27 14:31:47.000000000 -0400
68578 +++ linux-2.6.32.41/net/netfilter/ipvs/ip_vs_conn.c 2011-05-17 19:26:34.000000000 -0400
68579 @@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
68580 /* if the connection is not template and is created
68581 * by sync, preserve the activity flag.
68582 */
68583 - cp->flags |= atomic_read(&dest->conn_flags) &
68584 + cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
68585 (~IP_VS_CONN_F_INACTIVE);
68586 else
68587 - cp->flags |= atomic_read(&dest->conn_flags);
68588 + cp->flags |= atomic_read_unchecked(&dest->conn_flags);
68589 cp->dest = dest;
68590
68591 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
68592 @@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const
68593 atomic_set(&cp->refcnt, 1);
68594
68595 atomic_set(&cp->n_control, 0);
68596 - atomic_set(&cp->in_pkts, 0);
68597 + atomic_set_unchecked(&cp->in_pkts, 0);
68598
68599 atomic_inc(&ip_vs_conn_count);
68600 if (flags & IP_VS_CONN_F_NO_CPORT)
68601 @@ -871,7 +871,7 @@ static const struct file_operations ip_v
68602 .open = ip_vs_conn_open,
68603 .read = seq_read,
68604 .llseek = seq_lseek,
68605 - .release = seq_release,
68606 + .release = seq_release_net,
68607 };
68608
68609 static const char *ip_vs_origin_name(unsigned flags)
68610 @@ -934,7 +934,7 @@ static const struct file_operations ip_v
68611 .open = ip_vs_conn_sync_open,
68612 .read = seq_read,
68613 .llseek = seq_lseek,
68614 - .release = seq_release,
68615 + .release = seq_release_net,
68616 };
68617
68618 #endif
68619 @@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip
68620
68621 /* Don't drop the entry if its number of incoming packets is not
68622 located in [0, 8] */
68623 - i = atomic_read(&cp->in_pkts);
68624 + i = atomic_read_unchecked(&cp->in_pkts);
68625 if (i > 8 || i < 0) return 0;
68626
68627 if (!todrop_rate[i]) return 0;
68628 diff -urNp linux-2.6.32.41/net/netfilter/ipvs/ip_vs_core.c linux-2.6.32.41/net/netfilter/ipvs/ip_vs_core.c
68629 --- linux-2.6.32.41/net/netfilter/ipvs/ip_vs_core.c 2011-03-27 14:31:47.000000000 -0400
68630 +++ linux-2.6.32.41/net/netfilter/ipvs/ip_vs_core.c 2011-05-04 17:56:28.000000000 -0400
68631 @@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *sv
68632 ret = cp->packet_xmit(skb, cp, pp);
68633 /* do not touch skb anymore */
68634
68635 - atomic_inc(&cp->in_pkts);
68636 + atomic_inc_unchecked(&cp->in_pkts);
68637 ip_vs_conn_put(cp);
68638 return ret;
68639 }
68640 @@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk
68641 * Sync connection if it is about to close to
68642 * encorage the standby servers to update the connections timeout
68643 */
68644 - pkts = atomic_add_return(1, &cp->in_pkts);
68645 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
68646 if (af == AF_INET &&
68647 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
68648 (((cp->protocol != IPPROTO_TCP ||
68649 diff -urNp linux-2.6.32.41/net/netfilter/ipvs/ip_vs_ctl.c linux-2.6.32.41/net/netfilter/ipvs/ip_vs_ctl.c
68650 --- linux-2.6.32.41/net/netfilter/ipvs/ip_vs_ctl.c 2011-03-27 14:31:47.000000000 -0400
68651 +++ linux-2.6.32.41/net/netfilter/ipvs/ip_vs_ctl.c 2011-05-17 19:26:34.000000000 -0400
68652 @@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service
68653 ip_vs_rs_hash(dest);
68654 write_unlock_bh(&__ip_vs_rs_lock);
68655 }
68656 - atomic_set(&dest->conn_flags, conn_flags);
68657 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
68658
68659 /* bind the service */
68660 if (!dest->svc) {
68661 @@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct se
68662 " %-7s %-6d %-10d %-10d\n",
68663 &dest->addr.in6,
68664 ntohs(dest->port),
68665 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
68666 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
68667 atomic_read(&dest->weight),
68668 atomic_read(&dest->activeconns),
68669 atomic_read(&dest->inactconns));
68670 @@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct se
68671 "%-7s %-6d %-10d %-10d\n",
68672 ntohl(dest->addr.ip),
68673 ntohs(dest->port),
68674 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
68675 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
68676 atomic_read(&dest->weight),
68677 atomic_read(&dest->activeconns),
68678 atomic_read(&dest->inactconns));
68679 @@ -1927,7 +1927,7 @@ static const struct file_operations ip_v
68680 .open = ip_vs_info_open,
68681 .read = seq_read,
68682 .llseek = seq_lseek,
68683 - .release = seq_release_private,
68684 + .release = seq_release_net,
68685 };
68686
68687 #endif
68688 @@ -1976,7 +1976,7 @@ static const struct file_operations ip_v
68689 .open = ip_vs_stats_seq_open,
68690 .read = seq_read,
68691 .llseek = seq_lseek,
68692 - .release = single_release,
68693 + .release = single_release_net,
68694 };
68695
68696 #endif
68697 @@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip
68698
68699 entry.addr = dest->addr.ip;
68700 entry.port = dest->port;
68701 - entry.conn_flags = atomic_read(&dest->conn_flags);
68702 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
68703 entry.weight = atomic_read(&dest->weight);
68704 entry.u_threshold = dest->u_threshold;
68705 entry.l_threshold = dest->l_threshold;
68706 @@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cm
68707 unsigned char arg[128];
68708 int ret = 0;
68709
68710 + pax_track_stack();
68711 +
68712 if (!capable(CAP_NET_ADMIN))
68713 return -EPERM;
68714
68715 @@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct s
68716 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
68717
68718 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
68719 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
68720 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
68721 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
68722 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
68723 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
68724 diff -urNp linux-2.6.32.41/net/netfilter/ipvs/ip_vs_sync.c linux-2.6.32.41/net/netfilter/ipvs/ip_vs_sync.c
68725 --- linux-2.6.32.41/net/netfilter/ipvs/ip_vs_sync.c 2011-03-27 14:31:47.000000000 -0400
68726 +++ linux-2.6.32.41/net/netfilter/ipvs/ip_vs_sync.c 2011-05-04 17:56:28.000000000 -0400
68727 @@ -438,7 +438,7 @@ static void ip_vs_process_message(const
68728
68729 if (opt)
68730 memcpy(&cp->in_seq, opt, sizeof(*opt));
68731 - atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
68732 + atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
68733 cp->state = state;
68734 cp->old_state = cp->state;
68735 /*
68736 diff -urNp linux-2.6.32.41/net/netfilter/ipvs/ip_vs_xmit.c linux-2.6.32.41/net/netfilter/ipvs/ip_vs_xmit.c
68737 --- linux-2.6.32.41/net/netfilter/ipvs/ip_vs_xmit.c 2011-03-27 14:31:47.000000000 -0400
68738 +++ linux-2.6.32.41/net/netfilter/ipvs/ip_vs_xmit.c 2011-05-04 17:56:28.000000000 -0400
68739 @@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
68740 else
68741 rc = NF_ACCEPT;
68742 /* do not touch skb anymore */
68743 - atomic_inc(&cp->in_pkts);
68744 + atomic_inc_unchecked(&cp->in_pkts);
68745 goto out;
68746 }
68747
68748 @@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
68749 else
68750 rc = NF_ACCEPT;
68751 /* do not touch skb anymore */
68752 - atomic_inc(&cp->in_pkts);
68753 + atomic_inc_unchecked(&cp->in_pkts);
68754 goto out;
68755 }
68756
68757 diff -urNp linux-2.6.32.41/net/netfilter/Kconfig linux-2.6.32.41/net/netfilter/Kconfig
68758 --- linux-2.6.32.41/net/netfilter/Kconfig 2011-03-27 14:31:47.000000000 -0400
68759 +++ linux-2.6.32.41/net/netfilter/Kconfig 2011-04-17 15:56:46.000000000 -0400
68760 @@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
68761
68762 To compile it as a module, choose M here. If unsure, say N.
68763
68764 +config NETFILTER_XT_MATCH_GRADM
68765 + tristate '"gradm" match support'
68766 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
68767 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
68768 + ---help---
68769 + The gradm match allows to match on grsecurity RBAC being enabled.
68770 + It is useful when iptables rules are applied early on bootup to
68771 + prevent connections to the machine (except from a trusted host)
68772 + while the RBAC system is disabled.
68773 +
68774 config NETFILTER_XT_MATCH_HASHLIMIT
68775 tristate '"hashlimit" match support'
68776 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
68777 diff -urNp linux-2.6.32.41/net/netfilter/Makefile linux-2.6.32.41/net/netfilter/Makefile
68778 --- linux-2.6.32.41/net/netfilter/Makefile 2011-03-27 14:31:47.000000000 -0400
68779 +++ linux-2.6.32.41/net/netfilter/Makefile 2011-04-17 15:56:46.000000000 -0400
68780 @@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRAC
68781 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
68782 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
68783 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
68784 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
68785 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
68786 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
68787 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
68788 diff -urNp linux-2.6.32.41/net/netfilter/nf_conntrack_netlink.c linux-2.6.32.41/net/netfilter/nf_conntrack_netlink.c
68789 --- linux-2.6.32.41/net/netfilter/nf_conntrack_netlink.c 2011-03-27 14:31:47.000000000 -0400
68790 +++ linux-2.6.32.41/net/netfilter/nf_conntrack_netlink.c 2011-04-17 15:56:46.000000000 -0400
68791 @@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlatt
68792 static int
68793 ctnetlink_parse_tuple(const struct nlattr * const cda[],
68794 struct nf_conntrack_tuple *tuple,
68795 - enum ctattr_tuple type, u_int8_t l3num)
68796 + enum ctattr_type type, u_int8_t l3num)
68797 {
68798 struct nlattr *tb[CTA_TUPLE_MAX+1];
68799 int err;
68800 diff -urNp linux-2.6.32.41/net/netfilter/nfnetlink_log.c linux-2.6.32.41/net/netfilter/nfnetlink_log.c
68801 --- linux-2.6.32.41/net/netfilter/nfnetlink_log.c 2011-03-27 14:31:47.000000000 -0400
68802 +++ linux-2.6.32.41/net/netfilter/nfnetlink_log.c 2011-05-04 17:56:28.000000000 -0400
68803 @@ -68,7 +68,7 @@ struct nfulnl_instance {
68804 };
68805
68806 static DEFINE_RWLOCK(instances_lock);
68807 -static atomic_t global_seq;
68808 +static atomic_unchecked_t global_seq;
68809
68810 #define INSTANCE_BUCKETS 16
68811 static struct hlist_head instance_table[INSTANCE_BUCKETS];
68812 @@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_ins
68813 /* global sequence number */
68814 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
68815 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
68816 - htonl(atomic_inc_return(&global_seq)));
68817 + htonl(atomic_inc_return_unchecked(&global_seq)));
68818
68819 if (data_len) {
68820 struct nlattr *nla;
68821 diff -urNp linux-2.6.32.41/net/netfilter/xt_gradm.c linux-2.6.32.41/net/netfilter/xt_gradm.c
68822 --- linux-2.6.32.41/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
68823 +++ linux-2.6.32.41/net/netfilter/xt_gradm.c 2011-04-17 15:56:46.000000000 -0400
68824 @@ -0,0 +1,51 @@
68825 +/*
68826 + * gradm match for netfilter
68827 + * Copyright © Zbigniew Krzystolik, 2010
68828 + *
68829 + * This program is free software; you can redistribute it and/or modify
68830 + * it under the terms of the GNU General Public License; either version
68831 + * 2 or 3 as published by the Free Software Foundation.
68832 + */
68833 +#include <linux/module.h>
68834 +#include <linux/moduleparam.h>
68835 +#include <linux/skbuff.h>
68836 +#include <linux/netfilter/x_tables.h>
68837 +#include <linux/grsecurity.h>
68838 +#include <linux/netfilter/xt_gradm.h>
68839 +
68840 +static bool
68841 +gradm_mt(const struct sk_buff *skb, const struct xt_match_param *par)
68842 +{
68843 + const struct xt_gradm_mtinfo *info = par->matchinfo;
68844 + bool retval = false;
68845 + if (gr_acl_is_enabled())
68846 + retval = true;
68847 + return retval ^ info->invflags;
68848 +}
68849 +
68850 +static struct xt_match gradm_mt_reg __read_mostly = {
68851 + .name = "gradm",
68852 + .revision = 0,
68853 + .family = NFPROTO_UNSPEC,
68854 + .match = gradm_mt,
68855 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
68856 + .me = THIS_MODULE,
68857 +};
68858 +
68859 +static int __init gradm_mt_init(void)
68860 +{
68861 + return xt_register_match(&gradm_mt_reg);
68862 +}
68863 +
68864 +static void __exit gradm_mt_exit(void)
68865 +{
68866 + xt_unregister_match(&gradm_mt_reg);
68867 +}
68868 +
68869 +module_init(gradm_mt_init);
68870 +module_exit(gradm_mt_exit);
68871 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
68872 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
68873 +MODULE_LICENSE("GPL");
68874 +MODULE_ALIAS("ipt_gradm");
68875 +MODULE_ALIAS("ip6t_gradm");
68876 diff -urNp linux-2.6.32.41/net/netlink/af_netlink.c linux-2.6.32.41/net/netlink/af_netlink.c
68877 --- linux-2.6.32.41/net/netlink/af_netlink.c 2011-03-27 14:31:47.000000000 -0400
68878 +++ linux-2.6.32.41/net/netlink/af_netlink.c 2011-05-04 17:56:28.000000000 -0400
68879 @@ -733,7 +733,7 @@ static void netlink_overrun(struct sock
68880 sk->sk_error_report(sk);
68881 }
68882 }
68883 - atomic_inc(&sk->sk_drops);
68884 + atomic_inc_unchecked(&sk->sk_drops);
68885 }
68886
68887 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
68888 @@ -1964,15 +1964,23 @@ static int netlink_seq_show(struct seq_f
68889 struct netlink_sock *nlk = nlk_sk(s);
68890
68891 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d\n",
68892 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68893 + NULL,
68894 +#else
68895 s,
68896 +#endif
68897 s->sk_protocol,
68898 nlk->pid,
68899 nlk->groups ? (u32)nlk->groups[0] : 0,
68900 sk_rmem_alloc_get(s),
68901 sk_wmem_alloc_get(s),
68902 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68903 + NULL,
68904 +#else
68905 nlk->cb,
68906 +#endif
68907 atomic_read(&s->sk_refcnt),
68908 - atomic_read(&s->sk_drops)
68909 + atomic_read_unchecked(&s->sk_drops)
68910 );
68911
68912 }
68913 diff -urNp linux-2.6.32.41/net/netrom/af_netrom.c linux-2.6.32.41/net/netrom/af_netrom.c
68914 --- linux-2.6.32.41/net/netrom/af_netrom.c 2011-03-27 14:31:47.000000000 -0400
68915 +++ linux-2.6.32.41/net/netrom/af_netrom.c 2011-04-17 15:56:46.000000000 -0400
68916 @@ -838,6 +838,7 @@ static int nr_getname(struct socket *soc
68917 struct sock *sk = sock->sk;
68918 struct nr_sock *nr = nr_sk(sk);
68919
68920 + memset(sax, 0, sizeof(*sax));
68921 lock_sock(sk);
68922 if (peer != 0) {
68923 if (sk->sk_state != TCP_ESTABLISHED) {
68924 @@ -852,7 +853,6 @@ static int nr_getname(struct socket *soc
68925 *uaddr_len = sizeof(struct full_sockaddr_ax25);
68926 } else {
68927 sax->fsa_ax25.sax25_family = AF_NETROM;
68928 - sax->fsa_ax25.sax25_ndigis = 0;
68929 sax->fsa_ax25.sax25_call = nr->source_addr;
68930 *uaddr_len = sizeof(struct sockaddr_ax25);
68931 }
68932 diff -urNp linux-2.6.32.41/net/packet/af_packet.c linux-2.6.32.41/net/packet/af_packet.c
68933 --- linux-2.6.32.41/net/packet/af_packet.c 2011-04-17 17:00:52.000000000 -0400
68934 +++ linux-2.6.32.41/net/packet/af_packet.c 2011-04-17 15:56:46.000000000 -0400
68935 @@ -2427,7 +2427,11 @@ static int packet_seq_show(struct seq_fi
68936
68937 seq_printf(seq,
68938 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
68939 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68940 + NULL,
68941 +#else
68942 s,
68943 +#endif
68944 atomic_read(&s->sk_refcnt),
68945 s->sk_type,
68946 ntohs(po->num),
68947 diff -urNp linux-2.6.32.41/net/phonet/af_phonet.c linux-2.6.32.41/net/phonet/af_phonet.c
68948 --- linux-2.6.32.41/net/phonet/af_phonet.c 2011-03-27 14:31:47.000000000 -0400
68949 +++ linux-2.6.32.41/net/phonet/af_phonet.c 2011-04-17 15:56:46.000000000 -0400
68950 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_pr
68951 {
68952 struct phonet_protocol *pp;
68953
68954 - if (protocol >= PHONET_NPROTO)
68955 + if (protocol < 0 || protocol >= PHONET_NPROTO)
68956 return NULL;
68957
68958 spin_lock(&proto_tab_lock);
68959 @@ -402,7 +402,7 @@ int __init_or_module phonet_proto_regist
68960 {
68961 int err = 0;
68962
68963 - if (protocol >= PHONET_NPROTO)
68964 + if (protocol < 0 || protocol >= PHONET_NPROTO)
68965 return -EINVAL;
68966
68967 err = proto_register(pp->prot, 1);
68968 diff -urNp linux-2.6.32.41/net/phonet/datagram.c linux-2.6.32.41/net/phonet/datagram.c
68969 --- linux-2.6.32.41/net/phonet/datagram.c 2011-03-27 14:31:47.000000000 -0400
68970 +++ linux-2.6.32.41/net/phonet/datagram.c 2011-05-04 17:56:28.000000000 -0400
68971 @@ -162,7 +162,7 @@ static int pn_backlog_rcv(struct sock *s
68972 if (err < 0) {
68973 kfree_skb(skb);
68974 if (err == -ENOMEM)
68975 - atomic_inc(&sk->sk_drops);
68976 + atomic_inc_unchecked(&sk->sk_drops);
68977 }
68978 return err ? NET_RX_DROP : NET_RX_SUCCESS;
68979 }
68980 diff -urNp linux-2.6.32.41/net/phonet/pep.c linux-2.6.32.41/net/phonet/pep.c
68981 --- linux-2.6.32.41/net/phonet/pep.c 2011-03-27 14:31:47.000000000 -0400
68982 +++ linux-2.6.32.41/net/phonet/pep.c 2011-05-04 17:56:28.000000000 -0400
68983 @@ -348,7 +348,7 @@ static int pipe_do_rcv(struct sock *sk,
68984
68985 case PNS_PEP_CTRL_REQ:
68986 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
68987 - atomic_inc(&sk->sk_drops);
68988 + atomic_inc_unchecked(&sk->sk_drops);
68989 break;
68990 }
68991 __skb_pull(skb, 4);
68992 @@ -362,12 +362,12 @@ static int pipe_do_rcv(struct sock *sk,
68993 if (!err)
68994 return 0;
68995 if (err == -ENOMEM)
68996 - atomic_inc(&sk->sk_drops);
68997 + atomic_inc_unchecked(&sk->sk_drops);
68998 break;
68999 }
69000
69001 if (pn->rx_credits == 0) {
69002 - atomic_inc(&sk->sk_drops);
69003 + atomic_inc_unchecked(&sk->sk_drops);
69004 err = -ENOBUFS;
69005 break;
69006 }
69007 diff -urNp linux-2.6.32.41/net/phonet/socket.c linux-2.6.32.41/net/phonet/socket.c
69008 --- linux-2.6.32.41/net/phonet/socket.c 2011-03-27 14:31:47.000000000 -0400
69009 +++ linux-2.6.32.41/net/phonet/socket.c 2011-05-04 17:57:07.000000000 -0400
69010 @@ -482,8 +482,13 @@ static int pn_sock_seq_show(struct seq_f
69011 sk->sk_state,
69012 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
69013 sock_i_uid(sk), sock_i_ino(sk),
69014 - atomic_read(&sk->sk_refcnt), sk,
69015 - atomic_read(&sk->sk_drops), &len);
69016 + atomic_read(&sk->sk_refcnt),
69017 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69018 + NULL,
69019 +#else
69020 + sk,
69021 +#endif
69022 + atomic_read_unchecked(&sk->sk_drops), &len);
69023 }
69024 seq_printf(seq, "%*s\n", 127 - len, "");
69025 return 0;
69026 diff -urNp linux-2.6.32.41/net/rds/cong.c linux-2.6.32.41/net/rds/cong.c
69027 --- linux-2.6.32.41/net/rds/cong.c 2011-03-27 14:31:47.000000000 -0400
69028 +++ linux-2.6.32.41/net/rds/cong.c 2011-05-04 17:56:28.000000000 -0400
69029 @@ -77,7 +77,7 @@
69030 * finds that the saved generation number is smaller than the global generation
69031 * number, it wakes up the process.
69032 */
69033 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
69034 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
69035
69036 /*
69037 * Congestion monitoring
69038 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_con
69039 rdsdebug("waking map %p for %pI4\n",
69040 map, &map->m_addr);
69041 rds_stats_inc(s_cong_update_received);
69042 - atomic_inc(&rds_cong_generation);
69043 + atomic_inc_unchecked(&rds_cong_generation);
69044 if (waitqueue_active(&map->m_waitq))
69045 wake_up(&map->m_waitq);
69046 if (waitqueue_active(&rds_poll_waitq))
69047 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
69048
69049 int rds_cong_updated_since(unsigned long *recent)
69050 {
69051 - unsigned long gen = atomic_read(&rds_cong_generation);
69052 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
69053
69054 if (likely(*recent == gen))
69055 return 0;
69056 diff -urNp linux-2.6.32.41/net/rds/iw_rdma.c linux-2.6.32.41/net/rds/iw_rdma.c
69057 --- linux-2.6.32.41/net/rds/iw_rdma.c 2011-03-27 14:31:47.000000000 -0400
69058 +++ linux-2.6.32.41/net/rds/iw_rdma.c 2011-05-16 21:46:57.000000000 -0400
69059 @@ -181,6 +181,8 @@ int rds_iw_update_cm_id(struct rds_iw_de
69060 struct rdma_cm_id *pcm_id;
69061 int rc;
69062
69063 + pax_track_stack();
69064 +
69065 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
69066 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
69067
69068 diff -urNp linux-2.6.32.41/net/rds/Kconfig linux-2.6.32.41/net/rds/Kconfig
69069 --- linux-2.6.32.41/net/rds/Kconfig 2011-03-27 14:31:47.000000000 -0400
69070 +++ linux-2.6.32.41/net/rds/Kconfig 2011-04-17 15:56:46.000000000 -0400
69071 @@ -1,7 +1,7 @@
69072
69073 config RDS
69074 tristate "The RDS Protocol (EXPERIMENTAL)"
69075 - depends on INET && EXPERIMENTAL
69076 + depends on INET && EXPERIMENTAL && BROKEN
69077 ---help---
69078 The RDS (Reliable Datagram Sockets) protocol provides reliable,
69079 sequenced delivery of datagrams over Infiniband, iWARP,
69080 diff -urNp linux-2.6.32.41/net/rxrpc/af_rxrpc.c linux-2.6.32.41/net/rxrpc/af_rxrpc.c
69081 --- linux-2.6.32.41/net/rxrpc/af_rxrpc.c 2011-03-27 14:31:47.000000000 -0400
69082 +++ linux-2.6.32.41/net/rxrpc/af_rxrpc.c 2011-05-04 17:56:28.000000000 -0400
69083 @@ -38,7 +38,7 @@ static const struct proto_ops rxrpc_rpc_
69084 __be32 rxrpc_epoch;
69085
69086 /* current debugging ID */
69087 -atomic_t rxrpc_debug_id;
69088 +atomic_unchecked_t rxrpc_debug_id;
69089
69090 /* count of skbs currently in use */
69091 atomic_t rxrpc_n_skbs;
69092 diff -urNp linux-2.6.32.41/net/rxrpc/ar-ack.c linux-2.6.32.41/net/rxrpc/ar-ack.c
69093 --- linux-2.6.32.41/net/rxrpc/ar-ack.c 2011-03-27 14:31:47.000000000 -0400
69094 +++ linux-2.6.32.41/net/rxrpc/ar-ack.c 2011-05-16 21:46:57.000000000 -0400
69095 @@ -174,7 +174,7 @@ static void rxrpc_resend(struct rxrpc_ca
69096
69097 _enter("{%d,%d,%d,%d},",
69098 call->acks_hard, call->acks_unacked,
69099 - atomic_read(&call->sequence),
69100 + atomic_read_unchecked(&call->sequence),
69101 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
69102
69103 stop = 0;
69104 @@ -198,7 +198,7 @@ static void rxrpc_resend(struct rxrpc_ca
69105
69106 /* each Tx packet has a new serial number */
69107 sp->hdr.serial =
69108 - htonl(atomic_inc_return(&call->conn->serial));
69109 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
69110
69111 hdr = (struct rxrpc_header *) txb->head;
69112 hdr->serial = sp->hdr.serial;
69113 @@ -401,7 +401,7 @@ static void rxrpc_rotate_tx_window(struc
69114 */
69115 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
69116 {
69117 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
69118 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
69119 }
69120
69121 /*
69122 @@ -627,7 +627,7 @@ process_further:
69123
69124 latest = ntohl(sp->hdr.serial);
69125 hard = ntohl(ack.firstPacket);
69126 - tx = atomic_read(&call->sequence);
69127 + tx = atomic_read_unchecked(&call->sequence);
69128
69129 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
69130 latest,
69131 @@ -840,6 +840,8 @@ void rxrpc_process_call(struct work_stru
69132 u32 abort_code = RX_PROTOCOL_ERROR;
69133 u8 *acks = NULL;
69134
69135 + pax_track_stack();
69136 +
69137 //printk("\n--------------------\n");
69138 _enter("{%d,%s,%lx} [%lu]",
69139 call->debug_id, rxrpc_call_states[call->state], call->events,
69140 @@ -1159,7 +1161,7 @@ void rxrpc_process_call(struct work_stru
69141 goto maybe_reschedule;
69142
69143 send_ACK_with_skew:
69144 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
69145 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
69146 ntohl(ack.serial));
69147 send_ACK:
69148 mtu = call->conn->trans->peer->if_mtu;
69149 @@ -1171,7 +1173,7 @@ send_ACK:
69150 ackinfo.rxMTU = htonl(5692);
69151 ackinfo.jumbo_max = htonl(4);
69152
69153 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
69154 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
69155 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
69156 ntohl(hdr.serial),
69157 ntohs(ack.maxSkew),
69158 @@ -1189,7 +1191,7 @@ send_ACK:
69159 send_message:
69160 _debug("send message");
69161
69162 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
69163 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
69164 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
69165 send_message_2:
69166
69167 diff -urNp linux-2.6.32.41/net/rxrpc/ar-call.c linux-2.6.32.41/net/rxrpc/ar-call.c
69168 --- linux-2.6.32.41/net/rxrpc/ar-call.c 2011-03-27 14:31:47.000000000 -0400
69169 +++ linux-2.6.32.41/net/rxrpc/ar-call.c 2011-05-04 17:56:28.000000000 -0400
69170 @@ -82,7 +82,7 @@ static struct rxrpc_call *rxrpc_alloc_ca
69171 spin_lock_init(&call->lock);
69172 rwlock_init(&call->state_lock);
69173 atomic_set(&call->usage, 1);
69174 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
69175 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
69176 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
69177
69178 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
69179 diff -urNp linux-2.6.32.41/net/rxrpc/ar-connection.c linux-2.6.32.41/net/rxrpc/ar-connection.c
69180 --- linux-2.6.32.41/net/rxrpc/ar-connection.c 2011-03-27 14:31:47.000000000 -0400
69181 +++ linux-2.6.32.41/net/rxrpc/ar-connection.c 2011-05-04 17:56:28.000000000 -0400
69182 @@ -205,7 +205,7 @@ static struct rxrpc_connection *rxrpc_al
69183 rwlock_init(&conn->lock);
69184 spin_lock_init(&conn->state_lock);
69185 atomic_set(&conn->usage, 1);
69186 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
69187 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
69188 conn->avail_calls = RXRPC_MAXCALLS;
69189 conn->size_align = 4;
69190 conn->header_size = sizeof(struct rxrpc_header);
69191 diff -urNp linux-2.6.32.41/net/rxrpc/ar-connevent.c linux-2.6.32.41/net/rxrpc/ar-connevent.c
69192 --- linux-2.6.32.41/net/rxrpc/ar-connevent.c 2011-03-27 14:31:47.000000000 -0400
69193 +++ linux-2.6.32.41/net/rxrpc/ar-connevent.c 2011-05-04 17:56:28.000000000 -0400
69194 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct
69195
69196 len = iov[0].iov_len + iov[1].iov_len;
69197
69198 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
69199 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
69200 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
69201
69202 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
69203 diff -urNp linux-2.6.32.41/net/rxrpc/ar-input.c linux-2.6.32.41/net/rxrpc/ar-input.c
69204 --- linux-2.6.32.41/net/rxrpc/ar-input.c 2011-03-27 14:31:47.000000000 -0400
69205 +++ linux-2.6.32.41/net/rxrpc/ar-input.c 2011-05-04 17:56:28.000000000 -0400
69206 @@ -339,9 +339,9 @@ void rxrpc_fast_process_packet(struct rx
69207 /* track the latest serial number on this connection for ACK packet
69208 * information */
69209 serial = ntohl(sp->hdr.serial);
69210 - hi_serial = atomic_read(&call->conn->hi_serial);
69211 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
69212 while (serial > hi_serial)
69213 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
69214 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
69215 serial);
69216
69217 /* request ACK generation for any ACK or DATA packet that requests
69218 diff -urNp linux-2.6.32.41/net/rxrpc/ar-internal.h linux-2.6.32.41/net/rxrpc/ar-internal.h
69219 --- linux-2.6.32.41/net/rxrpc/ar-internal.h 2011-03-27 14:31:47.000000000 -0400
69220 +++ linux-2.6.32.41/net/rxrpc/ar-internal.h 2011-05-04 17:56:28.000000000 -0400
69221 @@ -272,8 +272,8 @@ struct rxrpc_connection {
69222 int error; /* error code for local abort */
69223 int debug_id; /* debug ID for printks */
69224 unsigned call_counter; /* call ID counter */
69225 - atomic_t serial; /* packet serial number counter */
69226 - atomic_t hi_serial; /* highest serial number received */
69227 + atomic_unchecked_t serial; /* packet serial number counter */
69228 + atomic_unchecked_t hi_serial; /* highest serial number received */
69229 u8 avail_calls; /* number of calls available */
69230 u8 size_align; /* data size alignment (for security) */
69231 u8 header_size; /* rxrpc + security header size */
69232 @@ -346,7 +346,7 @@ struct rxrpc_call {
69233 spinlock_t lock;
69234 rwlock_t state_lock; /* lock for state transition */
69235 atomic_t usage;
69236 - atomic_t sequence; /* Tx data packet sequence counter */
69237 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
69238 u32 abort_code; /* local/remote abort code */
69239 enum { /* current state of call */
69240 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
69241 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(stru
69242 */
69243 extern atomic_t rxrpc_n_skbs;
69244 extern __be32 rxrpc_epoch;
69245 -extern atomic_t rxrpc_debug_id;
69246 +extern atomic_unchecked_t rxrpc_debug_id;
69247 extern struct workqueue_struct *rxrpc_workqueue;
69248
69249 /*
69250 diff -urNp linux-2.6.32.41/net/rxrpc/ar-key.c linux-2.6.32.41/net/rxrpc/ar-key.c
69251 --- linux-2.6.32.41/net/rxrpc/ar-key.c 2011-03-27 14:31:47.000000000 -0400
69252 +++ linux-2.6.32.41/net/rxrpc/ar-key.c 2011-04-17 15:56:46.000000000 -0400
69253 @@ -88,11 +88,11 @@ static int rxrpc_instantiate_xdr_rxkad(s
69254 return ret;
69255
69256 plen -= sizeof(*token);
69257 - token = kmalloc(sizeof(*token), GFP_KERNEL);
69258 + token = kzalloc(sizeof(*token), GFP_KERNEL);
69259 if (!token)
69260 return -ENOMEM;
69261
69262 - token->kad = kmalloc(plen, GFP_KERNEL);
69263 + token->kad = kzalloc(plen, GFP_KERNEL);
69264 if (!token->kad) {
69265 kfree(token);
69266 return -ENOMEM;
69267 @@ -730,10 +730,10 @@ static int rxrpc_instantiate(struct key
69268 goto error;
69269
69270 ret = -ENOMEM;
69271 - token = kmalloc(sizeof(*token), GFP_KERNEL);
69272 + token = kzalloc(sizeof(*token), GFP_KERNEL);
69273 if (!token)
69274 goto error;
69275 - token->kad = kmalloc(plen, GFP_KERNEL);
69276 + token->kad = kzalloc(plen, GFP_KERNEL);
69277 if (!token->kad)
69278 goto error_free;
69279
69280 diff -urNp linux-2.6.32.41/net/rxrpc/ar-local.c linux-2.6.32.41/net/rxrpc/ar-local.c
69281 --- linux-2.6.32.41/net/rxrpc/ar-local.c 2011-03-27 14:31:47.000000000 -0400
69282 +++ linux-2.6.32.41/net/rxrpc/ar-local.c 2011-05-04 17:56:28.000000000 -0400
69283 @@ -44,7 +44,7 @@ struct rxrpc_local *rxrpc_alloc_local(st
69284 spin_lock_init(&local->lock);
69285 rwlock_init(&local->services_lock);
69286 atomic_set(&local->usage, 1);
69287 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
69288 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
69289 memcpy(&local->srx, srx, sizeof(*srx));
69290 }
69291
69292 diff -urNp linux-2.6.32.41/net/rxrpc/ar-output.c linux-2.6.32.41/net/rxrpc/ar-output.c
69293 --- linux-2.6.32.41/net/rxrpc/ar-output.c 2011-03-27 14:31:47.000000000 -0400
69294 +++ linux-2.6.32.41/net/rxrpc/ar-output.c 2011-05-04 17:56:28.000000000 -0400
69295 @@ -680,9 +680,9 @@ static int rxrpc_send_data(struct kiocb
69296 sp->hdr.cid = call->cid;
69297 sp->hdr.callNumber = call->call_id;
69298 sp->hdr.seq =
69299 - htonl(atomic_inc_return(&call->sequence));
69300 + htonl(atomic_inc_return_unchecked(&call->sequence));
69301 sp->hdr.serial =
69302 - htonl(atomic_inc_return(&conn->serial));
69303 + htonl(atomic_inc_return_unchecked(&conn->serial));
69304 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
69305 sp->hdr.userStatus = 0;
69306 sp->hdr.securityIndex = conn->security_ix;
69307 diff -urNp linux-2.6.32.41/net/rxrpc/ar-peer.c linux-2.6.32.41/net/rxrpc/ar-peer.c
69308 --- linux-2.6.32.41/net/rxrpc/ar-peer.c 2011-03-27 14:31:47.000000000 -0400
69309 +++ linux-2.6.32.41/net/rxrpc/ar-peer.c 2011-05-04 17:56:28.000000000 -0400
69310 @@ -86,7 +86,7 @@ static struct rxrpc_peer *rxrpc_alloc_pe
69311 INIT_LIST_HEAD(&peer->error_targets);
69312 spin_lock_init(&peer->lock);
69313 atomic_set(&peer->usage, 1);
69314 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
69315 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
69316 memcpy(&peer->srx, srx, sizeof(*srx));
69317
69318 rxrpc_assess_MTU_size(peer);
69319 diff -urNp linux-2.6.32.41/net/rxrpc/ar-proc.c linux-2.6.32.41/net/rxrpc/ar-proc.c
69320 --- linux-2.6.32.41/net/rxrpc/ar-proc.c 2011-03-27 14:31:47.000000000 -0400
69321 +++ linux-2.6.32.41/net/rxrpc/ar-proc.c 2011-05-04 17:56:28.000000000 -0400
69322 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(str
69323 atomic_read(&conn->usage),
69324 rxrpc_conn_states[conn->state],
69325 key_serial(conn->key),
69326 - atomic_read(&conn->serial),
69327 - atomic_read(&conn->hi_serial));
69328 + atomic_read_unchecked(&conn->serial),
69329 + atomic_read_unchecked(&conn->hi_serial));
69330
69331 return 0;
69332 }
69333 diff -urNp linux-2.6.32.41/net/rxrpc/ar-transport.c linux-2.6.32.41/net/rxrpc/ar-transport.c
69334 --- linux-2.6.32.41/net/rxrpc/ar-transport.c 2011-03-27 14:31:47.000000000 -0400
69335 +++ linux-2.6.32.41/net/rxrpc/ar-transport.c 2011-05-04 17:56:28.000000000 -0400
69336 @@ -46,7 +46,7 @@ static struct rxrpc_transport *rxrpc_all
69337 spin_lock_init(&trans->client_lock);
69338 rwlock_init(&trans->conn_lock);
69339 atomic_set(&trans->usage, 1);
69340 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
69341 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
69342
69343 if (peer->srx.transport.family == AF_INET) {
69344 switch (peer->srx.transport_type) {
69345 diff -urNp linux-2.6.32.41/net/rxrpc/rxkad.c linux-2.6.32.41/net/rxrpc/rxkad.c
69346 --- linux-2.6.32.41/net/rxrpc/rxkad.c 2011-03-27 14:31:47.000000000 -0400
69347 +++ linux-2.6.32.41/net/rxrpc/rxkad.c 2011-05-16 21:46:57.000000000 -0400
69348 @@ -210,6 +210,8 @@ static int rxkad_secure_packet_encrypt(c
69349 u16 check;
69350 int nsg;
69351
69352 + pax_track_stack();
69353 +
69354 sp = rxrpc_skb(skb);
69355
69356 _enter("");
69357 @@ -337,6 +339,8 @@ static int rxkad_verify_packet_auth(cons
69358 u16 check;
69359 int nsg;
69360
69361 + pax_track_stack();
69362 +
69363 _enter("");
69364
69365 sp = rxrpc_skb(skb);
69366 @@ -609,7 +613,7 @@ static int rxkad_issue_challenge(struct
69367
69368 len = iov[0].iov_len + iov[1].iov_len;
69369
69370 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
69371 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
69372 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
69373
69374 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
69375 @@ -659,7 +663,7 @@ static int rxkad_send_response(struct rx
69376
69377 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
69378
69379 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
69380 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
69381 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
69382
69383 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
69384 diff -urNp linux-2.6.32.41/net/sctp/proc.c linux-2.6.32.41/net/sctp/proc.c
69385 --- linux-2.6.32.41/net/sctp/proc.c 2011-03-27 14:31:47.000000000 -0400
69386 +++ linux-2.6.32.41/net/sctp/proc.c 2011-04-17 15:56:46.000000000 -0400
69387 @@ -213,7 +213,12 @@ static int sctp_eps_seq_show(struct seq_
69388 sctp_for_each_hentry(epb, node, &head->chain) {
69389 ep = sctp_ep(epb);
69390 sk = epb->sk;
69391 - seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
69392 + seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ",
69393 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69394 + NULL, NULL,
69395 +#else
69396 + ep, sk,
69397 +#endif
69398 sctp_sk(sk)->type, sk->sk_state, hash,
69399 epb->bind_addr.port,
69400 sock_i_uid(sk), sock_i_ino(sk));
69401 @@ -320,7 +325,12 @@ static int sctp_assocs_seq_show(struct s
69402 seq_printf(seq,
69403 "%8p %8p %-3d %-3d %-2d %-4d "
69404 "%4d %8d %8d %7d %5lu %-5d %5d ",
69405 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
69406 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69407 + NULL, NULL,
69408 +#else
69409 + assoc, sk,
69410 +#endif
69411 + sctp_sk(sk)->type, sk->sk_state,
69412 assoc->state, hash,
69413 assoc->assoc_id,
69414 assoc->sndbuf_used,
69415 diff -urNp linux-2.6.32.41/net/sctp/socket.c linux-2.6.32.41/net/sctp/socket.c
69416 --- linux-2.6.32.41/net/sctp/socket.c 2011-03-27 14:31:47.000000000 -0400
69417 +++ linux-2.6.32.41/net/sctp/socket.c 2011-04-23 12:56:11.000000000 -0400
69418 @@ -5802,7 +5802,6 @@ pp_found:
69419 */
69420 int reuse = sk->sk_reuse;
69421 struct sock *sk2;
69422 - struct hlist_node *node;
69423
69424 SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n");
69425 if (pp->fastreuse && sk->sk_reuse &&
69426 diff -urNp linux-2.6.32.41/net/socket.c linux-2.6.32.41/net/socket.c
69427 --- linux-2.6.32.41/net/socket.c 2011-03-27 14:31:47.000000000 -0400
69428 +++ linux-2.6.32.41/net/socket.c 2011-05-16 21:46:57.000000000 -0400
69429 @@ -87,6 +87,7 @@
69430 #include <linux/wireless.h>
69431 #include <linux/nsproxy.h>
69432 #include <linux/magic.h>
69433 +#include <linux/in.h>
69434
69435 #include <asm/uaccess.h>
69436 #include <asm/unistd.h>
69437 @@ -97,6 +98,21 @@
69438 #include <net/sock.h>
69439 #include <linux/netfilter.h>
69440
69441 +extern void gr_attach_curr_ip(const struct sock *sk);
69442 +extern int gr_handle_sock_all(const int family, const int type,
69443 + const int protocol);
69444 +extern int gr_handle_sock_server(const struct sockaddr *sck);
69445 +extern int gr_handle_sock_server_other(const struct sock *sck);
69446 +extern int gr_handle_sock_client(const struct sockaddr *sck);
69447 +extern int gr_search_connect(struct socket * sock,
69448 + struct sockaddr_in * addr);
69449 +extern int gr_search_bind(struct socket * sock,
69450 + struct sockaddr_in * addr);
69451 +extern int gr_search_listen(struct socket * sock);
69452 +extern int gr_search_accept(struct socket * sock);
69453 +extern int gr_search_socket(const int domain, const int type,
69454 + const int protocol);
69455 +
69456 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
69457 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
69458 unsigned long nr_segs, loff_t pos);
69459 @@ -298,7 +314,7 @@ static int sockfs_get_sb(struct file_sys
69460 mnt);
69461 }
69462
69463 -static struct vfsmount *sock_mnt __read_mostly;
69464 +struct vfsmount *sock_mnt __read_mostly;
69465
69466 static struct file_system_type sock_fs_type = {
69467 .name = "sockfs",
69468 @@ -1154,6 +1170,8 @@ static int __sock_create(struct net *net
69469 return -EAFNOSUPPORT;
69470 if (type < 0 || type >= SOCK_MAX)
69471 return -EINVAL;
69472 + if (protocol < 0)
69473 + return -EINVAL;
69474
69475 /* Compatibility.
69476
69477 @@ -1283,6 +1301,16 @@ SYSCALL_DEFINE3(socket, int, family, int
69478 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
69479 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
69480
69481 + if(!gr_search_socket(family, type, protocol)) {
69482 + retval = -EACCES;
69483 + goto out;
69484 + }
69485 +
69486 + if (gr_handle_sock_all(family, type, protocol)) {
69487 + retval = -EACCES;
69488 + goto out;
69489 + }
69490 +
69491 retval = sock_create(family, type, protocol, &sock);
69492 if (retval < 0)
69493 goto out;
69494 @@ -1415,6 +1443,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
69495 if (sock) {
69496 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
69497 if (err >= 0) {
69498 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
69499 + err = -EACCES;
69500 + goto error;
69501 + }
69502 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
69503 + if (err)
69504 + goto error;
69505 +
69506 err = security_socket_bind(sock,
69507 (struct sockaddr *)&address,
69508 addrlen);
69509 @@ -1423,6 +1459,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
69510 (struct sockaddr *)
69511 &address, addrlen);
69512 }
69513 +error:
69514 fput_light(sock->file, fput_needed);
69515 }
69516 return err;
69517 @@ -1446,10 +1483,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, ba
69518 if ((unsigned)backlog > somaxconn)
69519 backlog = somaxconn;
69520
69521 + if (gr_handle_sock_server_other(sock->sk)) {
69522 + err = -EPERM;
69523 + goto error;
69524 + }
69525 +
69526 + err = gr_search_listen(sock);
69527 + if (err)
69528 + goto error;
69529 +
69530 err = security_socket_listen(sock, backlog);
69531 if (!err)
69532 err = sock->ops->listen(sock, backlog);
69533
69534 +error:
69535 fput_light(sock->file, fput_needed);
69536 }
69537 return err;
69538 @@ -1492,6 +1539,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
69539 newsock->type = sock->type;
69540 newsock->ops = sock->ops;
69541
69542 + if (gr_handle_sock_server_other(sock->sk)) {
69543 + err = -EPERM;
69544 + sock_release(newsock);
69545 + goto out_put;
69546 + }
69547 +
69548 + err = gr_search_accept(sock);
69549 + if (err) {
69550 + sock_release(newsock);
69551 + goto out_put;
69552 + }
69553 +
69554 /*
69555 * We don't need try_module_get here, as the listening socket (sock)
69556 * has the protocol module (sock->ops->owner) held.
69557 @@ -1534,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
69558 fd_install(newfd, newfile);
69559 err = newfd;
69560
69561 + gr_attach_curr_ip(newsock->sk);
69562 +
69563 out_put:
69564 fput_light(sock->file, fput_needed);
69565 out:
69566 @@ -1571,6 +1632,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct
69567 int, addrlen)
69568 {
69569 struct socket *sock;
69570 + struct sockaddr *sck;
69571 struct sockaddr_storage address;
69572 int err, fput_needed;
69573
69574 @@ -1581,6 +1643,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct
69575 if (err < 0)
69576 goto out_put;
69577
69578 + sck = (struct sockaddr *)&address;
69579 +
69580 + if (gr_handle_sock_client(sck)) {
69581 + err = -EACCES;
69582 + goto out_put;
69583 + }
69584 +
69585 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
69586 + if (err)
69587 + goto out_put;
69588 +
69589 err =
69590 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
69591 if (err)
69592 @@ -1882,6 +1955,8 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct
69593 int err, ctl_len, iov_size, total_len;
69594 int fput_needed;
69595
69596 + pax_track_stack();
69597 +
69598 err = -EFAULT;
69599 if (MSG_CMSG_COMPAT & flags) {
69600 if (get_compat_msghdr(&msg_sys, msg_compat))
69601 diff -urNp linux-2.6.32.41/net/sunrpc/sched.c linux-2.6.32.41/net/sunrpc/sched.c
69602 --- linux-2.6.32.41/net/sunrpc/sched.c 2011-03-27 14:31:47.000000000 -0400
69603 +++ linux-2.6.32.41/net/sunrpc/sched.c 2011-04-17 15:56:46.000000000 -0400
69604 @@ -234,10 +234,10 @@ static int rpc_wait_bit_killable(void *w
69605 #ifdef RPC_DEBUG
69606 static void rpc_task_set_debuginfo(struct rpc_task *task)
69607 {
69608 - static atomic_t rpc_pid;
69609 + static atomic_unchecked_t rpc_pid;
69610
69611 task->tk_magic = RPC_TASK_MAGIC_ID;
69612 - task->tk_pid = atomic_inc_return(&rpc_pid);
69613 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
69614 }
69615 #else
69616 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
69617 diff -urNp linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma.c linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma.c
69618 --- linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma.c 2011-03-27 14:31:47.000000000 -0400
69619 +++ linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma.c 2011-05-04 17:56:20.000000000 -0400
69620 @@ -59,15 +59,15 @@ unsigned int svcrdma_max_req_size = RPCR
69621 static unsigned int min_max_inline = 4096;
69622 static unsigned int max_max_inline = 65536;
69623
69624 -atomic_t rdma_stat_recv;
69625 -atomic_t rdma_stat_read;
69626 -atomic_t rdma_stat_write;
69627 -atomic_t rdma_stat_sq_starve;
69628 -atomic_t rdma_stat_rq_starve;
69629 -atomic_t rdma_stat_rq_poll;
69630 -atomic_t rdma_stat_rq_prod;
69631 -atomic_t rdma_stat_sq_poll;
69632 -atomic_t rdma_stat_sq_prod;
69633 +atomic_unchecked_t rdma_stat_recv;
69634 +atomic_unchecked_t rdma_stat_read;
69635 +atomic_unchecked_t rdma_stat_write;
69636 +atomic_unchecked_t rdma_stat_sq_starve;
69637 +atomic_unchecked_t rdma_stat_rq_starve;
69638 +atomic_unchecked_t rdma_stat_rq_poll;
69639 +atomic_unchecked_t rdma_stat_rq_prod;
69640 +atomic_unchecked_t rdma_stat_sq_poll;
69641 +atomic_unchecked_t rdma_stat_sq_prod;
69642
69643 /* Temporary NFS request map and context caches */
69644 struct kmem_cache *svc_rdma_map_cachep;
69645 @@ -105,7 +105,7 @@ static int read_reset_stat(ctl_table *ta
69646 len -= *ppos;
69647 if (len > *lenp)
69648 len = *lenp;
69649 - if (len && copy_to_user(buffer, str_buf, len))
69650 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
69651 return -EFAULT;
69652 *lenp = len;
69653 *ppos += len;
69654 @@ -149,63 +149,63 @@ static ctl_table svcrdma_parm_table[] =
69655 {
69656 .procname = "rdma_stat_read",
69657 .data = &rdma_stat_read,
69658 - .maxlen = sizeof(atomic_t),
69659 + .maxlen = sizeof(atomic_unchecked_t),
69660 .mode = 0644,
69661 .proc_handler = &read_reset_stat,
69662 },
69663 {
69664 .procname = "rdma_stat_recv",
69665 .data = &rdma_stat_recv,
69666 - .maxlen = sizeof(atomic_t),
69667 + .maxlen = sizeof(atomic_unchecked_t),
69668 .mode = 0644,
69669 .proc_handler = &read_reset_stat,
69670 },
69671 {
69672 .procname = "rdma_stat_write",
69673 .data = &rdma_stat_write,
69674 - .maxlen = sizeof(atomic_t),
69675 + .maxlen = sizeof(atomic_unchecked_t),
69676 .mode = 0644,
69677 .proc_handler = &read_reset_stat,
69678 },
69679 {
69680 .procname = "rdma_stat_sq_starve",
69681 .data = &rdma_stat_sq_starve,
69682 - .maxlen = sizeof(atomic_t),
69683 + .maxlen = sizeof(atomic_unchecked_t),
69684 .mode = 0644,
69685 .proc_handler = &read_reset_stat,
69686 },
69687 {
69688 .procname = "rdma_stat_rq_starve",
69689 .data = &rdma_stat_rq_starve,
69690 - .maxlen = sizeof(atomic_t),
69691 + .maxlen = sizeof(atomic_unchecked_t),
69692 .mode = 0644,
69693 .proc_handler = &read_reset_stat,
69694 },
69695 {
69696 .procname = "rdma_stat_rq_poll",
69697 .data = &rdma_stat_rq_poll,
69698 - .maxlen = sizeof(atomic_t),
69699 + .maxlen = sizeof(atomic_unchecked_t),
69700 .mode = 0644,
69701 .proc_handler = &read_reset_stat,
69702 },
69703 {
69704 .procname = "rdma_stat_rq_prod",
69705 .data = &rdma_stat_rq_prod,
69706 - .maxlen = sizeof(atomic_t),
69707 + .maxlen = sizeof(atomic_unchecked_t),
69708 .mode = 0644,
69709 .proc_handler = &read_reset_stat,
69710 },
69711 {
69712 .procname = "rdma_stat_sq_poll",
69713 .data = &rdma_stat_sq_poll,
69714 - .maxlen = sizeof(atomic_t),
69715 + .maxlen = sizeof(atomic_unchecked_t),
69716 .mode = 0644,
69717 .proc_handler = &read_reset_stat,
69718 },
69719 {
69720 .procname = "rdma_stat_sq_prod",
69721 .data = &rdma_stat_sq_prod,
69722 - .maxlen = sizeof(atomic_t),
69723 + .maxlen = sizeof(atomic_unchecked_t),
69724 .mode = 0644,
69725 .proc_handler = &read_reset_stat,
69726 },
69727 diff -urNp linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
69728 --- linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-03-27 14:31:47.000000000 -0400
69729 +++ linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-05-04 17:56:28.000000000 -0400
69730 @@ -495,7 +495,7 @@ next_sge:
69731 svc_rdma_put_context(ctxt, 0);
69732 goto out;
69733 }
69734 - atomic_inc(&rdma_stat_read);
69735 + atomic_inc_unchecked(&rdma_stat_read);
69736
69737 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
69738 chl_map->ch[ch_no].count -= read_wr.num_sge;
69739 @@ -606,7 +606,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
69740 dto_q);
69741 list_del_init(&ctxt->dto_q);
69742 } else {
69743 - atomic_inc(&rdma_stat_rq_starve);
69744 + atomic_inc_unchecked(&rdma_stat_rq_starve);
69745 clear_bit(XPT_DATA, &xprt->xpt_flags);
69746 ctxt = NULL;
69747 }
69748 @@ -626,7 +626,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
69749 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
69750 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
69751 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
69752 - atomic_inc(&rdma_stat_recv);
69753 + atomic_inc_unchecked(&rdma_stat_recv);
69754
69755 /* Build up the XDR from the receive buffers. */
69756 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
69757 diff -urNp linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma_sendto.c linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma_sendto.c
69758 --- linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-03-27 14:31:47.000000000 -0400
69759 +++ linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-05-04 17:56:28.000000000 -0400
69760 @@ -328,7 +328,7 @@ static int send_write(struct svcxprt_rdm
69761 write_wr.wr.rdma.remote_addr = to;
69762
69763 /* Post It */
69764 - atomic_inc(&rdma_stat_write);
69765 + atomic_inc_unchecked(&rdma_stat_write);
69766 if (svc_rdma_send(xprt, &write_wr))
69767 goto err;
69768 return 0;
69769 diff -urNp linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma_transport.c linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma_transport.c
69770 --- linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-03-27 14:31:47.000000000 -0400
69771 +++ linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-05-04 17:56:28.000000000 -0400
69772 @@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rd
69773 return;
69774
69775 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
69776 - atomic_inc(&rdma_stat_rq_poll);
69777 + atomic_inc_unchecked(&rdma_stat_rq_poll);
69778
69779 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
69780 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
69781 @@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rd
69782 }
69783
69784 if (ctxt)
69785 - atomic_inc(&rdma_stat_rq_prod);
69786 + atomic_inc_unchecked(&rdma_stat_rq_prod);
69787
69788 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
69789 /*
69790 @@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rd
69791 return;
69792
69793 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
69794 - atomic_inc(&rdma_stat_sq_poll);
69795 + atomic_inc_unchecked(&rdma_stat_sq_poll);
69796 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
69797 if (wc.status != IB_WC_SUCCESS)
69798 /* Close the transport */
69799 @@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rd
69800 }
69801
69802 if (ctxt)
69803 - atomic_inc(&rdma_stat_sq_prod);
69804 + atomic_inc_unchecked(&rdma_stat_sq_prod);
69805 }
69806
69807 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
69808 @@ -1260,7 +1260,7 @@ int svc_rdma_send(struct svcxprt_rdma *x
69809 spin_lock_bh(&xprt->sc_lock);
69810 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
69811 spin_unlock_bh(&xprt->sc_lock);
69812 - atomic_inc(&rdma_stat_sq_starve);
69813 + atomic_inc_unchecked(&rdma_stat_sq_starve);
69814
69815 /* See if we can opportunistically reap SQ WR to make room */
69816 sq_cq_reap(xprt);
69817 diff -urNp linux-2.6.32.41/net/sysctl_net.c linux-2.6.32.41/net/sysctl_net.c
69818 --- linux-2.6.32.41/net/sysctl_net.c 2011-03-27 14:31:47.000000000 -0400
69819 +++ linux-2.6.32.41/net/sysctl_net.c 2011-04-17 15:56:46.000000000 -0400
69820 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ct
69821 struct ctl_table *table)
69822 {
69823 /* Allow network administrator to have same access as root. */
69824 - if (capable(CAP_NET_ADMIN)) {
69825 + if (capable_nolog(CAP_NET_ADMIN)) {
69826 int mode = (table->mode >> 6) & 7;
69827 return (mode << 6) | (mode << 3) | mode;
69828 }
69829 diff -urNp linux-2.6.32.41/net/unix/af_unix.c linux-2.6.32.41/net/unix/af_unix.c
69830 --- linux-2.6.32.41/net/unix/af_unix.c 2011-05-10 22:12:02.000000000 -0400
69831 +++ linux-2.6.32.41/net/unix/af_unix.c 2011-05-10 22:12:34.000000000 -0400
69832 @@ -745,6 +745,12 @@ static struct sock *unix_find_other(stru
69833 err = -ECONNREFUSED;
69834 if (!S_ISSOCK(inode->i_mode))
69835 goto put_fail;
69836 +
69837 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
69838 + err = -EACCES;
69839 + goto put_fail;
69840 + }
69841 +
69842 u = unix_find_socket_byinode(net, inode);
69843 if (!u)
69844 goto put_fail;
69845 @@ -765,6 +771,13 @@ static struct sock *unix_find_other(stru
69846 if (u) {
69847 struct dentry *dentry;
69848 dentry = unix_sk(u)->dentry;
69849 +
69850 + if (!gr_handle_chroot_unix(u->sk_peercred.pid)) {
69851 + err = -EPERM;
69852 + sock_put(u);
69853 + goto fail;
69854 + }
69855 +
69856 if (dentry)
69857 touch_atime(unix_sk(u)->mnt, dentry);
69858 } else
69859 @@ -850,11 +863,18 @@ static int unix_bind(struct socket *sock
69860 err = security_path_mknod(&nd.path, dentry, mode, 0);
69861 if (err)
69862 goto out_mknod_drop_write;
69863 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
69864 + err = -EACCES;
69865 + goto out_mknod_drop_write;
69866 + }
69867 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
69868 out_mknod_drop_write:
69869 mnt_drop_write(nd.path.mnt);
69870 if (err)
69871 goto out_mknod_dput;
69872 +
69873 + gr_handle_create(dentry, nd.path.mnt);
69874 +
69875 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
69876 dput(nd.path.dentry);
69877 nd.path.dentry = dentry;
69878 @@ -872,6 +892,10 @@ out_mknod_drop_write:
69879 goto out_unlock;
69880 }
69881
69882 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
69883 + sk->sk_peercred.pid = current->pid;
69884 +#endif
69885 +
69886 list = &unix_socket_table[addr->hash];
69887 } else {
69888 list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
69889 @@ -2211,7 +2235,11 @@ static int unix_seq_show(struct seq_file
69890 unix_state_lock(s);
69891
69892 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
69893 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69894 + NULL,
69895 +#else
69896 s,
69897 +#endif
69898 atomic_read(&s->sk_refcnt),
69899 0,
69900 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
69901 diff -urNp linux-2.6.32.41/net/wireless/wext.c linux-2.6.32.41/net/wireless/wext.c
69902 --- linux-2.6.32.41/net/wireless/wext.c 2011-03-27 14:31:47.000000000 -0400
69903 +++ linux-2.6.32.41/net/wireless/wext.c 2011-04-17 15:56:46.000000000 -0400
69904 @@ -816,8 +816,7 @@ static int ioctl_standard_iw_point(struc
69905 */
69906
69907 /* Support for very large requests */
69908 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
69909 - (user_length > descr->max_tokens)) {
69910 + if (user_length > descr->max_tokens) {
69911 /* Allow userspace to GET more than max so
69912 * we can support any size GET requests.
69913 * There is still a limit : -ENOMEM.
69914 @@ -854,22 +853,6 @@ static int ioctl_standard_iw_point(struc
69915 }
69916 }
69917
69918 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
69919 - /*
69920 - * If this is a GET, but not NOMAX, it means that the extra
69921 - * data is not bounded by userspace, but by max_tokens. Thus
69922 - * set the length to max_tokens. This matches the extra data
69923 - * allocation.
69924 - * The driver should fill it with the number of tokens it
69925 - * provided, and it may check iwp->length rather than having
69926 - * knowledge of max_tokens. If the driver doesn't change the
69927 - * iwp->length, this ioctl just copies back max_token tokens
69928 - * filled with zeroes. Hopefully the driver isn't claiming
69929 - * them to be valid data.
69930 - */
69931 - iwp->length = descr->max_tokens;
69932 - }
69933 -
69934 err = handler(dev, info, (union iwreq_data *) iwp, extra);
69935
69936 iwp->length += essid_compat;
69937 diff -urNp linux-2.6.32.41/net/xfrm/xfrm_policy.c linux-2.6.32.41/net/xfrm/xfrm_policy.c
69938 --- linux-2.6.32.41/net/xfrm/xfrm_policy.c 2011-03-27 14:31:47.000000000 -0400
69939 +++ linux-2.6.32.41/net/xfrm/xfrm_policy.c 2011-05-04 17:56:20.000000000 -0400
69940 @@ -586,7 +586,7 @@ int xfrm_policy_insert(int dir, struct x
69941 hlist_add_head(&policy->bydst, chain);
69942 xfrm_pol_hold(policy);
69943 net->xfrm.policy_count[dir]++;
69944 - atomic_inc(&flow_cache_genid);
69945 + atomic_inc_unchecked(&flow_cache_genid);
69946 if (delpol)
69947 __xfrm_policy_unlink(delpol, dir);
69948 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
69949 @@ -669,7 +669,7 @@ struct xfrm_policy *xfrm_policy_bysel_ct
69950 write_unlock_bh(&xfrm_policy_lock);
69951
69952 if (ret && delete) {
69953 - atomic_inc(&flow_cache_genid);
69954 + atomic_inc_unchecked(&flow_cache_genid);
69955 xfrm_policy_kill(ret);
69956 }
69957 return ret;
69958 @@ -710,7 +710,7 @@ struct xfrm_policy *xfrm_policy_byid(str
69959 write_unlock_bh(&xfrm_policy_lock);
69960
69961 if (ret && delete) {
69962 - atomic_inc(&flow_cache_genid);
69963 + atomic_inc_unchecked(&flow_cache_genid);
69964 xfrm_policy_kill(ret);
69965 }
69966 return ret;
69967 @@ -824,7 +824,7 @@ int xfrm_policy_flush(struct net *net, u
69968 }
69969
69970 }
69971 - atomic_inc(&flow_cache_genid);
69972 + atomic_inc_unchecked(&flow_cache_genid);
69973 out:
69974 write_unlock_bh(&xfrm_policy_lock);
69975 return err;
69976 @@ -1088,7 +1088,7 @@ int xfrm_policy_delete(struct xfrm_polic
69977 write_unlock_bh(&xfrm_policy_lock);
69978 if (pol) {
69979 if (dir < XFRM_POLICY_MAX)
69980 - atomic_inc(&flow_cache_genid);
69981 + atomic_inc_unchecked(&flow_cache_genid);
69982 xfrm_policy_kill(pol);
69983 return 0;
69984 }
69985 @@ -1477,7 +1477,7 @@ free_dst:
69986 goto out;
69987 }
69988
69989 -static int inline
69990 +static inline int
69991 xfrm_dst_alloc_copy(void **target, void *src, int size)
69992 {
69993 if (!*target) {
69994 @@ -1489,7 +1489,7 @@ xfrm_dst_alloc_copy(void **target, void
69995 return 0;
69996 }
69997
69998 -static int inline
69999 +static inline int
70000 xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
70001 {
70002 #ifdef CONFIG_XFRM_SUB_POLICY
70003 @@ -1501,7 +1501,7 @@ xfrm_dst_update_parent(struct dst_entry
70004 #endif
70005 }
70006
70007 -static int inline
70008 +static inline int
70009 xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
70010 {
70011 #ifdef CONFIG_XFRM_SUB_POLICY
70012 @@ -1537,7 +1537,7 @@ int __xfrm_lookup(struct net *net, struc
70013 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
70014
70015 restart:
70016 - genid = atomic_read(&flow_cache_genid);
70017 + genid = atomic_read_unchecked(&flow_cache_genid);
70018 policy = NULL;
70019 for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
70020 pols[pi] = NULL;
70021 @@ -1680,7 +1680,7 @@ restart:
70022 goto error;
70023 }
70024 if (nx == -EAGAIN ||
70025 - genid != atomic_read(&flow_cache_genid)) {
70026 + genid != atomic_read_unchecked(&flow_cache_genid)) {
70027 xfrm_pols_put(pols, npols);
70028 goto restart;
70029 }
70030 diff -urNp linux-2.6.32.41/net/xfrm/xfrm_user.c linux-2.6.32.41/net/xfrm/xfrm_user.c
70031 --- linux-2.6.32.41/net/xfrm/xfrm_user.c 2011-03-27 14:31:47.000000000 -0400
70032 +++ linux-2.6.32.41/net/xfrm/xfrm_user.c 2011-05-16 21:46:57.000000000 -0400
70033 @@ -1169,6 +1169,8 @@ static int copy_to_user_tmpl(struct xfrm
70034 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
70035 int i;
70036
70037 + pax_track_stack();
70038 +
70039 if (xp->xfrm_nr == 0)
70040 return 0;
70041
70042 @@ -1784,6 +1786,8 @@ static int xfrm_do_migrate(struct sk_buf
70043 int err;
70044 int n = 0;
70045
70046 + pax_track_stack();
70047 +
70048 if (attrs[XFRMA_MIGRATE] == NULL)
70049 return -EINVAL;
70050
70051 diff -urNp linux-2.6.32.41/samples/kobject/kset-example.c linux-2.6.32.41/samples/kobject/kset-example.c
70052 --- linux-2.6.32.41/samples/kobject/kset-example.c 2011-03-27 14:31:47.000000000 -0400
70053 +++ linux-2.6.32.41/samples/kobject/kset-example.c 2011-04-17 15:56:46.000000000 -0400
70054 @@ -87,7 +87,7 @@ static ssize_t foo_attr_store(struct kob
70055 }
70056
70057 /* Our custom sysfs_ops that we will associate with our ktype later on */
70058 -static struct sysfs_ops foo_sysfs_ops = {
70059 +static const struct sysfs_ops foo_sysfs_ops = {
70060 .show = foo_attr_show,
70061 .store = foo_attr_store,
70062 };
70063 diff -urNp linux-2.6.32.41/scripts/basic/fixdep.c linux-2.6.32.41/scripts/basic/fixdep.c
70064 --- linux-2.6.32.41/scripts/basic/fixdep.c 2011-03-27 14:31:47.000000000 -0400
70065 +++ linux-2.6.32.41/scripts/basic/fixdep.c 2011-04-17 15:56:46.000000000 -0400
70066 @@ -222,9 +222,9 @@ static void use_config(char *m, int slen
70067
70068 static void parse_config_file(char *map, size_t len)
70069 {
70070 - int *end = (int *) (map + len);
70071 + unsigned int *end = (unsigned int *) (map + len);
70072 /* start at +1, so that p can never be < map */
70073 - int *m = (int *) map + 1;
70074 + unsigned int *m = (unsigned int *) map + 1;
70075 char *p, *q;
70076
70077 for (; m < end; m++) {
70078 @@ -371,7 +371,7 @@ static void print_deps(void)
70079 static void traps(void)
70080 {
70081 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
70082 - int *p = (int *)test;
70083 + unsigned int *p = (unsigned int *)test;
70084
70085 if (*p != INT_CONF) {
70086 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
70087 diff -urNp linux-2.6.32.41/scripts/Makefile.build linux-2.6.32.41/scripts/Makefile.build
70088 --- linux-2.6.32.41/scripts/Makefile.build 2011-03-27 14:31:47.000000000 -0400
70089 +++ linux-2.6.32.41/scripts/Makefile.build 2011-06-04 20:46:51.000000000 -0400
70090 @@ -59,7 +59,7 @@ endif
70091 endif
70092
70093 # Do not include host rules unless needed
70094 -ifneq ($(hostprogs-y)$(hostprogs-m),)
70095 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
70096 include scripts/Makefile.host
70097 endif
70098
70099 diff -urNp linux-2.6.32.41/scripts/Makefile.clean linux-2.6.32.41/scripts/Makefile.clean
70100 --- linux-2.6.32.41/scripts/Makefile.clean 2011-03-27 14:31:47.000000000 -0400
70101 +++ linux-2.6.32.41/scripts/Makefile.clean 2011-06-04 20:47:19.000000000 -0400
70102 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subd
70103 __clean-files := $(extra-y) $(always) \
70104 $(targets) $(clean-files) \
70105 $(host-progs) \
70106 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
70107 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
70108 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
70109
70110 # as clean-files is given relative to the current directory, this adds
70111 # a $(obj) prefix, except for absolute paths
70112 diff -urNp linux-2.6.32.41/scripts/Makefile.host linux-2.6.32.41/scripts/Makefile.host
70113 --- linux-2.6.32.41/scripts/Makefile.host 2011-03-27 14:31:47.000000000 -0400
70114 +++ linux-2.6.32.41/scripts/Makefile.host 2011-06-04 20:48:22.000000000 -0400
70115 @@ -31,6 +31,7 @@
70116 # Note: Shared libraries consisting of C++ files are not supported
70117
70118 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
70119 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
70120
70121 # C code
70122 # Executables compiled from a single .c file
70123 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(hos
70124 # Shared libaries (only .c supported)
70125 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
70126 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
70127 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
70128 # Remove .so files from "xxx-objs"
70129 host-cobjs := $(filter-out %.so,$(host-cobjs))
70130
70131 diff -urNp linux-2.6.32.41/scripts/mod/file2alias.c linux-2.6.32.41/scripts/mod/file2alias.c
70132 --- linux-2.6.32.41/scripts/mod/file2alias.c 2011-03-27 14:31:47.000000000 -0400
70133 +++ linux-2.6.32.41/scripts/mod/file2alias.c 2011-04-17 15:56:46.000000000 -0400
70134 @@ -72,7 +72,7 @@ static void device_id_check(const char *
70135 unsigned long size, unsigned long id_size,
70136 void *symval)
70137 {
70138 - int i;
70139 + unsigned int i;
70140
70141 if (size % id_size || size < id_size) {
70142 if (cross_build != 0)
70143 @@ -102,7 +102,7 @@ static void device_id_check(const char *
70144 /* USB is special because the bcdDevice can be matched against a numeric range */
70145 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
70146 static void do_usb_entry(struct usb_device_id *id,
70147 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
70148 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
70149 unsigned char range_lo, unsigned char range_hi,
70150 struct module *mod)
70151 {
70152 @@ -368,7 +368,7 @@ static void do_pnp_device_entry(void *sy
70153 for (i = 0; i < count; i++) {
70154 const char *id = (char *)devs[i].id;
70155 char acpi_id[sizeof(devs[0].id)];
70156 - int j;
70157 + unsigned int j;
70158
70159 buf_printf(&mod->dev_table_buf,
70160 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
70161 @@ -398,7 +398,7 @@ static void do_pnp_card_entries(void *sy
70162
70163 for (j = 0; j < PNP_MAX_DEVICES; j++) {
70164 const char *id = (char *)card->devs[j].id;
70165 - int i2, j2;
70166 + unsigned int i2, j2;
70167 int dup = 0;
70168
70169 if (!id[0])
70170 @@ -424,7 +424,7 @@ static void do_pnp_card_entries(void *sy
70171 /* add an individual alias for every device entry */
70172 if (!dup) {
70173 char acpi_id[sizeof(card->devs[0].id)];
70174 - int k;
70175 + unsigned int k;
70176
70177 buf_printf(&mod->dev_table_buf,
70178 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
70179 @@ -699,7 +699,7 @@ static void dmi_ascii_filter(char *d, co
70180 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
70181 char *alias)
70182 {
70183 - int i, j;
70184 + unsigned int i, j;
70185
70186 sprintf(alias, "dmi*");
70187
70188 diff -urNp linux-2.6.32.41/scripts/mod/modpost.c linux-2.6.32.41/scripts/mod/modpost.c
70189 --- linux-2.6.32.41/scripts/mod/modpost.c 2011-03-27 14:31:47.000000000 -0400
70190 +++ linux-2.6.32.41/scripts/mod/modpost.c 2011-04-17 15:56:46.000000000 -0400
70191 @@ -835,6 +835,7 @@ enum mismatch {
70192 INIT_TO_EXIT,
70193 EXIT_TO_INIT,
70194 EXPORT_TO_INIT_EXIT,
70195 + DATA_TO_TEXT
70196 };
70197
70198 struct sectioncheck {
70199 @@ -920,6 +921,12 @@ const struct sectioncheck sectioncheck[]
70200 .fromsec = { "__ksymtab*", NULL },
70201 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
70202 .mismatch = EXPORT_TO_INIT_EXIT
70203 +},
70204 +/* Do not reference code from writable data */
70205 +{
70206 + .fromsec = { DATA_SECTIONS, NULL },
70207 + .tosec = { TEXT_SECTIONS, NULL },
70208 + .mismatch = DATA_TO_TEXT
70209 }
70210 };
70211
70212 @@ -1024,10 +1031,10 @@ static Elf_Sym *find_elf_symbol(struct e
70213 continue;
70214 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
70215 continue;
70216 - if (sym->st_value == addr)
70217 - return sym;
70218 /* Find a symbol nearby - addr are maybe negative */
70219 d = sym->st_value - addr;
70220 + if (d == 0)
70221 + return sym;
70222 if (d < 0)
70223 d = addr - sym->st_value;
70224 if (d < distance) {
70225 @@ -1268,6 +1275,14 @@ static void report_sec_mismatch(const ch
70226 "Fix this by removing the %sannotation of %s "
70227 "or drop the export.\n",
70228 tosym, sec2annotation(tosec), sec2annotation(tosec), tosym);
70229 + case DATA_TO_TEXT:
70230 +/*
70231 + fprintf(stderr,
70232 + "The variable %s references\n"
70233 + "the %s %s%s%s\n",
70234 + fromsym, to, sec2annotation(tosec), tosym, to_p);
70235 +*/
70236 + break;
70237 case NO_MISMATCH:
70238 /* To get warnings on missing members */
70239 break;
70240 @@ -1651,7 +1666,7 @@ void __attribute__((format(printf, 2, 3)
70241 va_end(ap);
70242 }
70243
70244 -void buf_write(struct buffer *buf, const char *s, int len)
70245 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
70246 {
70247 if (buf->size - buf->pos < len) {
70248 buf->size += len + SZ;
70249 @@ -1863,7 +1878,7 @@ static void write_if_changed(struct buff
70250 if (fstat(fileno(file), &st) < 0)
70251 goto close_write;
70252
70253 - if (st.st_size != b->pos)
70254 + if (st.st_size != (off_t)b->pos)
70255 goto close_write;
70256
70257 tmp = NOFAIL(malloc(b->pos));
70258 diff -urNp linux-2.6.32.41/scripts/mod/modpost.h linux-2.6.32.41/scripts/mod/modpost.h
70259 --- linux-2.6.32.41/scripts/mod/modpost.h 2011-03-27 14:31:47.000000000 -0400
70260 +++ linux-2.6.32.41/scripts/mod/modpost.h 2011-04-17 15:56:46.000000000 -0400
70261 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *e
70262
70263 struct buffer {
70264 char *p;
70265 - int pos;
70266 - int size;
70267 + unsigned int pos;
70268 + unsigned int size;
70269 };
70270
70271 void __attribute__((format(printf, 2, 3)))
70272 buf_printf(struct buffer *buf, const char *fmt, ...);
70273
70274 void
70275 -buf_write(struct buffer *buf, const char *s, int len);
70276 +buf_write(struct buffer *buf, const char *s, unsigned int len);
70277
70278 struct module {
70279 struct module *next;
70280 diff -urNp linux-2.6.32.41/scripts/mod/sumversion.c linux-2.6.32.41/scripts/mod/sumversion.c
70281 --- linux-2.6.32.41/scripts/mod/sumversion.c 2011-03-27 14:31:47.000000000 -0400
70282 +++ linux-2.6.32.41/scripts/mod/sumversion.c 2011-04-17 15:56:46.000000000 -0400
70283 @@ -455,7 +455,7 @@ static void write_version(const char *fi
70284 goto out;
70285 }
70286
70287 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
70288 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
70289 warn("writing sum in %s failed: %s\n",
70290 filename, strerror(errno));
70291 goto out;
70292 diff -urNp linux-2.6.32.41/scripts/pnmtologo.c linux-2.6.32.41/scripts/pnmtologo.c
70293 --- linux-2.6.32.41/scripts/pnmtologo.c 2011-03-27 14:31:47.000000000 -0400
70294 +++ linux-2.6.32.41/scripts/pnmtologo.c 2011-04-17 15:56:46.000000000 -0400
70295 @@ -237,14 +237,14 @@ static void write_header(void)
70296 fprintf(out, " * Linux logo %s\n", logoname);
70297 fputs(" */\n\n", out);
70298 fputs("#include <linux/linux_logo.h>\n\n", out);
70299 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
70300 + fprintf(out, "static unsigned char %s_data[] = {\n",
70301 logoname);
70302 }
70303
70304 static void write_footer(void)
70305 {
70306 fputs("\n};\n\n", out);
70307 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
70308 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
70309 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
70310 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
70311 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
70312 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
70313 fputs("\n};\n\n", out);
70314
70315 /* write logo clut */
70316 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
70317 + fprintf(out, "static unsigned char %s_clut[] = {\n",
70318 logoname);
70319 write_hex_cnt = 0;
70320 for (i = 0; i < logo_clutsize; i++) {
70321 diff -urNp linux-2.6.32.41/scripts/tags.sh linux-2.6.32.41/scripts/tags.sh
70322 --- linux-2.6.32.41/scripts/tags.sh 2011-03-27 14:31:47.000000000 -0400
70323 +++ linux-2.6.32.41/scripts/tags.sh 2011-06-07 18:06:04.000000000 -0400
70324 @@ -93,6 +93,11 @@ docscope()
70325 cscope -b -f cscope.out
70326 }
70327
70328 +dogtags()
70329 +{
70330 + all_sources | gtags -f -
70331 +}
70332 +
70333 exuberant()
70334 {
70335 all_sources | xargs $1 -a \
70336 @@ -164,6 +169,10 @@ case "$1" in
70337 docscope
70338 ;;
70339
70340 + "gtags")
70341 + dogtags
70342 + ;;
70343 +
70344 "tags")
70345 rm -f tags
70346 xtags ctags
70347 diff -urNp linux-2.6.32.41/security/capability.c linux-2.6.32.41/security/capability.c
70348 --- linux-2.6.32.41/security/capability.c 2011-03-27 14:31:47.000000000 -0400
70349 +++ linux-2.6.32.41/security/capability.c 2011-04-17 15:56:46.000000000 -0400
70350 @@ -890,7 +890,7 @@ static void cap_audit_rule_free(void *ls
70351 }
70352 #endif /* CONFIG_AUDIT */
70353
70354 -struct security_operations default_security_ops = {
70355 +struct security_operations default_security_ops __read_only = {
70356 .name = "default",
70357 };
70358
70359 diff -urNp linux-2.6.32.41/security/commoncap.c linux-2.6.32.41/security/commoncap.c
70360 --- linux-2.6.32.41/security/commoncap.c 2011-03-27 14:31:47.000000000 -0400
70361 +++ linux-2.6.32.41/security/commoncap.c 2011-04-17 15:56:46.000000000 -0400
70362 @@ -27,7 +27,7 @@
70363 #include <linux/sched.h>
70364 #include <linux/prctl.h>
70365 #include <linux/securebits.h>
70366 -
70367 +#include <net/sock.h>
70368 /*
70369 * If a non-root user executes a setuid-root binary in
70370 * !secure(SECURE_NOROOT) mode, then we raise capabilities.
70371 @@ -50,9 +50,11 @@ static void warn_setuid_and_fcaps_mixed(
70372 }
70373 }
70374
70375 +extern kernel_cap_t gr_cap_rtnetlink(struct sock *sk);
70376 +
70377 int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
70378 {
70379 - NETLINK_CB(skb).eff_cap = current_cap();
70380 + NETLINK_CB(skb).eff_cap = gr_cap_rtnetlink(sk);
70381 return 0;
70382 }
70383
70384 @@ -582,6 +584,9 @@ int cap_bprm_secureexec(struct linux_bin
70385 {
70386 const struct cred *cred = current_cred();
70387
70388 + if (gr_acl_enable_at_secure())
70389 + return 1;
70390 +
70391 if (cred->uid != 0) {
70392 if (bprm->cap_effective)
70393 return 1;
70394 diff -urNp linux-2.6.32.41/security/integrity/ima/ima_api.c linux-2.6.32.41/security/integrity/ima/ima_api.c
70395 --- linux-2.6.32.41/security/integrity/ima/ima_api.c 2011-03-27 14:31:47.000000000 -0400
70396 +++ linux-2.6.32.41/security/integrity/ima/ima_api.c 2011-04-17 15:56:46.000000000 -0400
70397 @@ -74,7 +74,7 @@ void ima_add_violation(struct inode *ino
70398 int result;
70399
70400 /* can overflow, only indicator */
70401 - atomic_long_inc(&ima_htable.violations);
70402 + atomic_long_inc_unchecked(&ima_htable.violations);
70403
70404 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
70405 if (!entry) {
70406 diff -urNp linux-2.6.32.41/security/integrity/ima/ima_fs.c linux-2.6.32.41/security/integrity/ima/ima_fs.c
70407 --- linux-2.6.32.41/security/integrity/ima/ima_fs.c 2011-03-27 14:31:47.000000000 -0400
70408 +++ linux-2.6.32.41/security/integrity/ima/ima_fs.c 2011-04-17 15:56:46.000000000 -0400
70409 @@ -27,12 +27,12 @@
70410 static int valid_policy = 1;
70411 #define TMPBUFLEN 12
70412 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
70413 - loff_t *ppos, atomic_long_t *val)
70414 + loff_t *ppos, atomic_long_unchecked_t *val)
70415 {
70416 char tmpbuf[TMPBUFLEN];
70417 ssize_t len;
70418
70419 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
70420 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
70421 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
70422 }
70423
70424 diff -urNp linux-2.6.32.41/security/integrity/ima/ima.h linux-2.6.32.41/security/integrity/ima/ima.h
70425 --- linux-2.6.32.41/security/integrity/ima/ima.h 2011-03-27 14:31:47.000000000 -0400
70426 +++ linux-2.6.32.41/security/integrity/ima/ima.h 2011-04-17 15:56:46.000000000 -0400
70427 @@ -84,8 +84,8 @@ void ima_add_violation(struct inode *ino
70428 extern spinlock_t ima_queue_lock;
70429
70430 struct ima_h_table {
70431 - atomic_long_t len; /* number of stored measurements in the list */
70432 - atomic_long_t violations;
70433 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
70434 + atomic_long_unchecked_t violations;
70435 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
70436 };
70437 extern struct ima_h_table ima_htable;
70438 diff -urNp linux-2.6.32.41/security/integrity/ima/ima_queue.c linux-2.6.32.41/security/integrity/ima/ima_queue.c
70439 --- linux-2.6.32.41/security/integrity/ima/ima_queue.c 2011-03-27 14:31:47.000000000 -0400
70440 +++ linux-2.6.32.41/security/integrity/ima/ima_queue.c 2011-04-17 15:56:46.000000000 -0400
70441 @@ -78,7 +78,7 @@ static int ima_add_digest_entry(struct i
70442 INIT_LIST_HEAD(&qe->later);
70443 list_add_tail_rcu(&qe->later, &ima_measurements);
70444
70445 - atomic_long_inc(&ima_htable.len);
70446 + atomic_long_inc_unchecked(&ima_htable.len);
70447 key = ima_hash_key(entry->digest);
70448 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
70449 return 0;
70450 diff -urNp linux-2.6.32.41/security/Kconfig linux-2.6.32.41/security/Kconfig
70451 --- linux-2.6.32.41/security/Kconfig 2011-03-27 14:31:47.000000000 -0400
70452 +++ linux-2.6.32.41/security/Kconfig 2011-06-04 20:45:36.000000000 -0400
70453 @@ -4,6 +4,555 @@
70454
70455 menu "Security options"
70456
70457 +source grsecurity/Kconfig
70458 +
70459 +menu "PaX"
70460 +
70461 + config ARCH_TRACK_EXEC_LIMIT
70462 + bool
70463 +
70464 + config PAX_PER_CPU_PGD
70465 + bool
70466 +
70467 + config TASK_SIZE_MAX_SHIFT
70468 + int
70469 + depends on X86_64
70470 + default 47 if !PAX_PER_CPU_PGD
70471 + default 42 if PAX_PER_CPU_PGD
70472 +
70473 + config PAX_ENABLE_PAE
70474 + bool
70475 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
70476 +
70477 +config PAX
70478 + bool "Enable various PaX features"
70479 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
70480 + help
70481 + This allows you to enable various PaX features. PaX adds
70482 + intrusion prevention mechanisms to the kernel that reduce
70483 + the risks posed by exploitable memory corruption bugs.
70484 +
70485 +menu "PaX Control"
70486 + depends on PAX
70487 +
70488 +config PAX_SOFTMODE
70489 + bool 'Support soft mode'
70490 + select PAX_PT_PAX_FLAGS
70491 + help
70492 + Enabling this option will allow you to run PaX in soft mode, that
70493 + is, PaX features will not be enforced by default, only on executables
70494 + marked explicitly. You must also enable PT_PAX_FLAGS support as it
70495 + is the only way to mark executables for soft mode use.
70496 +
70497 + Soft mode can be activated by using the "pax_softmode=1" kernel command
70498 + line option on boot. Furthermore you can control various PaX features
70499 + at runtime via the entries in /proc/sys/kernel/pax.
70500 +
70501 +config PAX_EI_PAX
70502 + bool 'Use legacy ELF header marking'
70503 + help
70504 + Enabling this option will allow you to control PaX features on
70505 + a per executable basis via the 'chpax' utility available at
70506 + http://pax.grsecurity.net/. The control flags will be read from
70507 + an otherwise reserved part of the ELF header. This marking has
70508 + numerous drawbacks (no support for soft-mode, toolchain does not
70509 + know about the non-standard use of the ELF header) therefore it
70510 + has been deprecated in favour of PT_PAX_FLAGS support.
70511 +
70512 + Note that if you enable PT_PAX_FLAGS marking support as well,
70513 + the PT_PAX_FLAG marks will override the legacy EI_PAX marks.
70514 +
70515 +config PAX_PT_PAX_FLAGS
70516 + bool 'Use ELF program header marking'
70517 + help
70518 + Enabling this option will allow you to control PaX features on
70519 + a per executable basis via the 'paxctl' utility available at
70520 + http://pax.grsecurity.net/. The control flags will be read from
70521 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
70522 + has the benefits of supporting both soft mode and being fully
70523 + integrated into the toolchain (the binutils patch is available
70524 + from http://pax.grsecurity.net).
70525 +
70526 + If your toolchain does not support PT_PAX_FLAGS markings,
70527 + you can create one in most cases with 'paxctl -C'.
70528 +
70529 + Note that if you enable the legacy EI_PAX marking support as well,
70530 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
70531 +
70532 +choice
70533 + prompt 'MAC system integration'
70534 + default PAX_HAVE_ACL_FLAGS
70535 + help
70536 + Mandatory Access Control systems have the option of controlling
70537 + PaX flags on a per executable basis, choose the method supported
70538 + by your particular system.
70539 +
70540 + - "none": if your MAC system does not interact with PaX,
70541 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
70542 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
70543 +
70544 + NOTE: this option is for developers/integrators only.
70545 +
70546 + config PAX_NO_ACL_FLAGS
70547 + bool 'none'
70548 +
70549 + config PAX_HAVE_ACL_FLAGS
70550 + bool 'direct'
70551 +
70552 + config PAX_HOOK_ACL_FLAGS
70553 + bool 'hook'
70554 +endchoice
70555 +
70556 +endmenu
70557 +
70558 +menu "Non-executable pages"
70559 + depends on PAX
70560 +
70561 +config PAX_NOEXEC
70562 + bool "Enforce non-executable pages"
70563 + depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
70564 + help
70565 + By design some architectures do not allow for protecting memory
70566 + pages against execution or even if they do, Linux does not make
70567 + use of this feature. In practice this means that if a page is
70568 + readable (such as the stack or heap) it is also executable.
70569 +
70570 + There is a well known exploit technique that makes use of this
70571 + fact and a common programming mistake where an attacker can
70572 + introduce code of his choice somewhere in the attacked program's
70573 + memory (typically the stack or the heap) and then execute it.
70574 +
70575 + If the attacked program was running with different (typically
70576 + higher) privileges than that of the attacker, then he can elevate
70577 + his own privilege level (e.g. get a root shell, write to files for
70578 + which he does not have write access to, etc).
70579 +
70580 + Enabling this option will let you choose from various features
70581 + that prevent the injection and execution of 'foreign' code in
70582 + a program.
70583 +
70584 + This will also break programs that rely on the old behaviour and
70585 + expect that dynamically allocated memory via the malloc() family
70586 + of functions is executable (which it is not). Notable examples
70587 + are the XFree86 4.x server, the java runtime and wine.
70588 +
70589 +config PAX_PAGEEXEC
70590 + bool "Paging based non-executable pages"
70591 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
70592 + select S390_SWITCH_AMODE if S390
70593 + select S390_EXEC_PROTECT if S390
70594 + select ARCH_TRACK_EXEC_LIMIT if X86_32
70595 + help
70596 + This implementation is based on the paging feature of the CPU.
70597 + On i386 without hardware non-executable bit support there is a
70598 + variable but usually low performance impact, however on Intel's
70599 + P4 core based CPUs it is very high so you should not enable this
70600 + for kernels meant to be used on such CPUs.
70601 +
70602 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
70603 + with hardware non-executable bit support there is no performance
70604 + impact, on ppc the impact is negligible.
70605 +
70606 + Note that several architectures require various emulations due to
70607 + badly designed userland ABIs, this will cause a performance impact
70608 + but will disappear as soon as userland is fixed. For example, ppc
70609 + userland MUST have been built with secure-plt by a recent toolchain.
70610 +
70611 +config PAX_SEGMEXEC
70612 + bool "Segmentation based non-executable pages"
70613 + depends on PAX_NOEXEC && X86_32
70614 + help
70615 + This implementation is based on the segmentation feature of the
70616 + CPU and has a very small performance impact, however applications
70617 + will be limited to a 1.5 GB address space instead of the normal
70618 + 3 GB.
70619 +
70620 +config PAX_EMUTRAMP
70621 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
70622 + default y if PARISC
70623 + help
70624 + There are some programs and libraries that for one reason or
70625 + another attempt to execute special small code snippets from
70626 + non-executable memory pages. Most notable examples are the
70627 + signal handler return code generated by the kernel itself and
70628 + the GCC trampolines.
70629 +
70630 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
70631 + such programs will no longer work under your kernel.
70632 +
70633 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
70634 + utilities to enable trampoline emulation for the affected programs
70635 + yet still have the protection provided by the non-executable pages.
70636 +
70637 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
70638 + your system will not even boot.
70639 +
70640 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
70641 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
70642 + for the affected files.
70643 +
70644 + NOTE: enabling this feature *may* open up a loophole in the
70645 + protection provided by non-executable pages that an attacker
70646 + could abuse. Therefore the best solution is to not have any
70647 + files on your system that would require this option. This can
70648 + be achieved by not using libc5 (which relies on the kernel
70649 + signal handler return code) and not using or rewriting programs
70650 + that make use of the nested function implementation of GCC.
70651 + Skilled users can just fix GCC itself so that it implements
70652 + nested function calls in a way that does not interfere with PaX.
70653 +
70654 +config PAX_EMUSIGRT
70655 + bool "Automatically emulate sigreturn trampolines"
70656 + depends on PAX_EMUTRAMP && PARISC
70657 + default y
70658 + help
70659 + Enabling this option will have the kernel automatically detect
70660 + and emulate signal return trampolines executing on the stack
70661 + that would otherwise lead to task termination.
70662 +
70663 + This solution is intended as a temporary one for users with
70664 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
70665 + Modula-3 runtime, etc) or executables linked to such, basically
70666 + everything that does not specify its own SA_RESTORER function in
70667 + normal executable memory like glibc 2.1+ does.
70668 +
70669 + On parisc you MUST enable this option, otherwise your system will
70670 + not even boot.
70671 +
70672 + NOTE: this feature cannot be disabled on a per executable basis
70673 + and since it *does* open up a loophole in the protection provided
70674 + by non-executable pages, the best solution is to not have any
70675 + files on your system that would require this option.
70676 +
70677 +config PAX_MPROTECT
70678 + bool "Restrict mprotect()"
70679 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
70680 + help
70681 + Enabling this option will prevent programs from
70682 + - changing the executable status of memory pages that were
70683 + not originally created as executable,
70684 + - making read-only executable pages writable again,
70685 + - creating executable pages from anonymous memory,
70686 + - making read-only-after-relocations (RELRO) data pages writable again.
70687 +
70688 + You should say Y here to complete the protection provided by
70689 + the enforcement of non-executable pages.
70690 +
70691 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
70692 + this feature on a per file basis.
70693 +
70694 +config PAX_MPROTECT_COMPAT
70695 + bool "Use legacy/compat protection demoting (read help)"
70696 + depends on PAX_MPROTECT
70697 + default n
70698 + help
70699 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
70700 + by sending the proper error code to the application. For some broken
70701 + userland, this can cause problems with Python or other applications. The
70702 + current implementation however allows for applications like clamav to
70703 + detect if JIT compilation/execution is allowed and to fall back gracefully
70704 + to an interpreter-based mode if it does not. While we encourage everyone
70705 + to use the current implementation as-is and push upstream to fix broken
70706 + userland (note that the RWX logging option can assist with this), in some
70707 + environments this may not be possible. Having to disable MPROTECT
70708 + completely on certain binaries reduces the security benefit of PaX,
70709 + so this option is provided for those environments to revert to the old
70710 + behavior.
70711 +
70712 +config PAX_ELFRELOCS
70713 + bool "Allow ELF text relocations (read help)"
70714 + depends on PAX_MPROTECT
70715 + default n
70716 + help
70717 + Non-executable pages and mprotect() restrictions are effective
70718 + in preventing the introduction of new executable code into an
70719 + attacked task's address space. There remain only two venues
70720 + for this kind of attack: if the attacker can execute already
70721 + existing code in the attacked task then he can either have it
70722 + create and mmap() a file containing his code or have it mmap()
70723 + an already existing ELF library that does not have position
70724 + independent code in it and use mprotect() on it to make it
70725 + writable and copy his code there. While protecting against
70726 + the former approach is beyond PaX, the latter can be prevented
70727 + by having only PIC ELF libraries on one's system (which do not
70728 + need to relocate their code). If you are sure this is your case,
70729 + as is the case with all modern Linux distributions, then leave
70730 + this option disabled. You should say 'n' here.
70731 +
70732 +config PAX_ETEXECRELOCS
70733 + bool "Allow ELF ET_EXEC text relocations"
70734 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
70735 + select PAX_ELFRELOCS
70736 + default y
70737 + help
70738 + On some architectures there are incorrectly created applications
70739 + that require text relocations and would not work without enabling
70740 + this option. If you are an alpha, ia64 or parisc user, you should
70741 + enable this option and disable it once you have made sure that
70742 + none of your applications need it.
70743 +
70744 +config PAX_EMUPLT
70745 + bool "Automatically emulate ELF PLT"
70746 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
70747 + default y
70748 + help
70749 + Enabling this option will have the kernel automatically detect
70750 + and emulate the Procedure Linkage Table entries in ELF files.
70751 + On some architectures such entries are in writable memory, and
70752 + become non-executable leading to task termination. Therefore
70753 + it is mandatory that you enable this option on alpha, parisc,
70754 + sparc and sparc64, otherwise your system would not even boot.
70755 +
70756 + NOTE: this feature *does* open up a loophole in the protection
70757 + provided by the non-executable pages, therefore the proper
70758 + solution is to modify the toolchain to produce a PLT that does
70759 + not need to be writable.
70760 +
70761 +config PAX_DLRESOLVE
70762 + bool 'Emulate old glibc resolver stub'
70763 + depends on PAX_EMUPLT && SPARC
70764 + default n
70765 + help
70766 + This option is needed if userland has an old glibc (before 2.4)
70767 + that puts a 'save' instruction into the runtime generated resolver
70768 + stub that needs special emulation.
70769 +
70770 +config PAX_KERNEXEC
70771 + bool "Enforce non-executable kernel pages"
70772 + depends on PAX_NOEXEC && (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
70773 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
70774 + help
70775 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
70776 + that is, enabling this option will make it harder to inject
70777 + and execute 'foreign' code in kernel memory itself.
70778 +
70779 + Note that on x86_64 kernels there is a known regression when
70780 + this feature and KVM/VMX are both enabled in the host kernel.
70781 +
70782 +config PAX_KERNEXEC_MODULE_TEXT
70783 + int "Minimum amount of memory reserved for module code"
70784 + default "4"
70785 + depends on PAX_KERNEXEC && X86_32 && MODULES
70786 + help
70787 + Due to implementation details the kernel must reserve a fixed
70788 + amount of memory for module code at compile time that cannot be
70789 + changed at runtime. Here you can specify the minimum amount
70790 + in MB that will be reserved. Due to the same implementation
70791 + details this size will always be rounded up to the next 2/4 MB
70792 + boundary (depends on PAE) so the actually available memory for
70793 + module code will usually be more than this minimum.
70794 +
70795 + The default 4 MB should be enough for most users but if you have
70796 + an excessive number of modules (e.g., most distribution configs
70797 + compile many drivers as modules) or use huge modules such as
70798 + nvidia's kernel driver, you will need to adjust this amount.
70799 + A good rule of thumb is to look at your currently loaded kernel
70800 + modules and add up their sizes.
70801 +
70802 +endmenu
70803 +
70804 +menu "Address Space Layout Randomization"
70805 + depends on PAX
70806 +
70807 +config PAX_ASLR
70808 + bool "Address Space Layout Randomization"
70809 + depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
70810 + help
70811 + Many if not most exploit techniques rely on the knowledge of
70812 + certain addresses in the attacked program. The following options
70813 + will allow the kernel to apply a certain amount of randomization
70814 + to specific parts of the program thereby forcing an attacker to
70815 + guess them in most cases. Any failed guess will most likely crash
70816 + the attacked program which allows the kernel to detect such attempts
70817 + and react on them. PaX itself provides no reaction mechanisms,
70818 + instead it is strongly encouraged that you make use of Nergal's
70819 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
70820 + (http://www.grsecurity.net/) built-in crash detection features or
70821 + develop one yourself.
70822 +
70823 + By saying Y here you can choose to randomize the following areas:
70824 + - top of the task's kernel stack
70825 + - top of the task's userland stack
70826 + - base address for mmap() requests that do not specify one
70827 + (this includes all libraries)
70828 + - base address of the main executable
70829 +
70830 + It is strongly recommended to say Y here as address space layout
70831 + randomization has negligible impact on performance yet it provides
70832 + a very effective protection.
70833 +
70834 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
70835 + this feature on a per file basis.
70836 +
70837 +config PAX_RANDKSTACK
70838 + bool "Randomize kernel stack base"
70839 + depends on PAX_ASLR && X86_TSC && X86
70840 + help
70841 + By saying Y here the kernel will randomize every task's kernel
70842 + stack on every system call. This will not only force an attacker
70843 + to guess it but also prevent him from making use of possible
70844 + leaked information about it.
70845 +
70846 + Since the kernel stack is a rather scarce resource, randomization
70847 + may cause unexpected stack overflows, therefore you should very
70848 + carefully test your system. Note that once enabled in the kernel
70849 + configuration, this feature cannot be disabled on a per file basis.
70850 +
70851 +config PAX_RANDUSTACK
70852 + bool "Randomize user stack base"
70853 + depends on PAX_ASLR
70854 + help
70855 + By saying Y here the kernel will randomize every task's userland
70856 + stack. The randomization is done in two steps where the second
70857 + one may apply a big amount of shift to the top of the stack and
70858 + cause problems for programs that want to use lots of memory (more
70859 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
70860 + For this reason the second step can be controlled by 'chpax' or
70861 + 'paxctl' on a per file basis.
70862 +
70863 +config PAX_RANDMMAP
70864 + bool "Randomize mmap() base"
70865 + depends on PAX_ASLR
70866 + help
70867 + By saying Y here the kernel will use a randomized base address for
70868 + mmap() requests that do not specify one themselves. As a result
70869 + all dynamically loaded libraries will appear at random addresses
70870 + and therefore be harder to exploit by a technique where an attacker
70871 + attempts to execute library code for his purposes (e.g. spawn a
70872 + shell from an exploited program that is running at an elevated
70873 + privilege level).
70874 +
70875 + Furthermore, if a program is relinked as a dynamic ELF file, its
70876 + base address will be randomized as well, completing the full
70877 + randomization of the address space layout. Attacking such programs
70878 + becomes a guess game. You can find an example of doing this at
70879 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
70880 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
70881 +
70882 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
70883 + feature on a per file basis.
70884 +
70885 +endmenu
70886 +
70887 +menu "Miscellaneous hardening features"
70888 +
70889 +config PAX_MEMORY_SANITIZE
70890 + bool "Sanitize all freed memory"
70891 + help
70892 + By saying Y here the kernel will erase memory pages as soon as they
70893 + are freed. This in turn reduces the lifetime of data stored in the
70894 + pages, making it less likely that sensitive information such as
70895 + passwords, cryptographic secrets, etc stay in memory for too long.
70896 +
70897 + This is especially useful for programs whose runtime is short, long
70898 + lived processes and the kernel itself benefit from this as long as
70899 + they operate on whole memory pages and ensure timely freeing of pages
70900 + that may hold sensitive information.
70901 +
70902 + The tradeoff is performance impact, on a single CPU system kernel
70903 + compilation sees a 3% slowdown, other systems and workloads may vary
70904 + and you are advised to test this feature on your expected workload
70905 + before deploying it.
70906 +
70907 + Note that this feature does not protect data stored in live pages,
70908 + e.g., process memory swapped to disk may stay there for a long time.
70909 +
70910 +config PAX_MEMORY_STACKLEAK
70911 + bool "Sanitize kernel stack"
70912 + depends on X86
70913 + help
70914 + By saying Y here the kernel will erase the kernel stack before it
70915 + returns from a system call. This in turn reduces the information
70916 + that a kernel stack leak bug can reveal.
70917 +
70918 + Note that such a bug can still leak information that was put on
70919 + the stack by the current system call (the one eventually triggering
70920 + the bug) but traces of earlier system calls on the kernel stack
70921 + cannot leak anymore.
70922 +
70923 + The tradeoff is performance impact, on a single CPU system kernel
70924 + compilation sees a 1% slowdown, other systems and workloads may vary
70925 + and you are advised to test this feature on your expected workload
70926 + before deploying it.
70927 +
70928 + Note: full support for this feature requires gcc with plugin support
70929 + so make sure your compiler is at least gcc 4.5.0 (cross compilation
70930 + is not supported). Using older gcc versions means that functions
70931 + with large enough stack frames may leave uninitialized memory behind
70932 + that may be exposed to a later syscall leaking the stack.
70933 +
70934 +config PAX_MEMORY_UDEREF
70935 + bool "Prevent invalid userland pointer dereference"
70936 + depends on X86 && !UML_X86 && !XEN
70937 + select PAX_PER_CPU_PGD if X86_64
70938 + help
70939 + By saying Y here the kernel will be prevented from dereferencing
70940 + userland pointers in contexts where the kernel expects only kernel
70941 + pointers. This is both a useful runtime debugging feature and a
70942 + security measure that prevents exploiting a class of kernel bugs.
70943 +
70944 + The tradeoff is that some virtualization solutions may experience
70945 + a huge slowdown and therefore you should not enable this feature
70946 + for kernels meant to run in such environments. Whether a given VM
70947 + solution is affected or not is best determined by simply trying it
70948 + out, the performance impact will be obvious right on boot as this
70949 + mechanism engages from very early on. A good rule of thumb is that
70950 + VMs running on CPUs without hardware virtualization support (i.e.,
70951 + the majority of IA-32 CPUs) will likely experience the slowdown.
70952 +
70953 +config PAX_REFCOUNT
70954 + bool "Prevent various kernel object reference counter overflows"
70955 + depends on GRKERNSEC && (X86 || SPARC64)
70956 + help
70957 + By saying Y here the kernel will detect and prevent overflowing
70958 + various (but not all) kinds of object reference counters. Such
70959 + overflows can normally occur due to bugs only and are often, if
70960 + not always, exploitable.
70961 +
70962 + The tradeoff is that data structures protected by an overflowed
70963 + refcount will never be freed and therefore will leak memory. Note
70964 + that this leak also happens even without this protection but in
70965 + that case the overflow can eventually trigger the freeing of the
70966 + data structure while it is still being used elsewhere, resulting
70967 + in the exploitable situation that this feature prevents.
70968 +
70969 + Since this has a negligible performance impact, you should enable
70970 + this feature.
70971 +
70972 +config PAX_USERCOPY
70973 + bool "Harden heap object copies between kernel and userland"
70974 + depends on X86 || PPC || SPARC
70975 + depends on GRKERNSEC && (SLAB || SLUB)
70976 + help
70977 + By saying Y here the kernel will enforce the size of heap objects
70978 + when they are copied in either direction between the kernel and
70979 + userland, even if only a part of the heap object is copied.
70980 +
70981 + Specifically, this checking prevents information leaking from the
70982 + kernel heap during kernel to userland copies (if the kernel heap
70983 + object is otherwise fully initialized) and prevents kernel heap
70984 + overflows during userland to kernel copies.
70985 +
70986 + Note that the current implementation provides the strictest bounds
70987 + checks for the SLUB allocator.
70988 +
70989 + Enabling this option also enables per-slab cache protection against
70990 + data in a given cache being copied into/out of via userland
70991 + accessors. Though the whitelist of regions will be reduced over
70992 + time, it notably protects important data structures like task structs.
70993 +
70994 +
70995 + If frame pointers are enabled on x86, this option will also
70996 + restrict copies into and out of the kernel stack to local variables
70997 + within a single frame.
70998 +
70999 + Since this has a negligible performance impact, you should enable
71000 + this feature.
71001 +
71002 +endmenu
71003 +
71004 +endmenu
71005 +
71006 config KEYS
71007 bool "Enable access key retention support"
71008 help
71009 @@ -146,7 +695,7 @@ config INTEL_TXT
71010 config LSM_MMAP_MIN_ADDR
71011 int "Low address space for LSM to protect from user allocation"
71012 depends on SECURITY && SECURITY_SELINUX
71013 - default 65536
71014 + default 32768
71015 help
71016 This is the portion of low virtual memory which should be protected
71017 from userspace allocation. Keeping a user from writing to low pages
71018 diff -urNp linux-2.6.32.41/security/keys/keyring.c linux-2.6.32.41/security/keys/keyring.c
71019 --- linux-2.6.32.41/security/keys/keyring.c 2011-03-27 14:31:47.000000000 -0400
71020 +++ linux-2.6.32.41/security/keys/keyring.c 2011-04-18 22:03:00.000000000 -0400
71021 @@ -214,15 +214,15 @@ static long keyring_read(const struct ke
71022 ret = -EFAULT;
71023
71024 for (loop = 0; loop < klist->nkeys; loop++) {
71025 + key_serial_t serial;
71026 key = klist->keys[loop];
71027 + serial = key->serial;
71028
71029 tmp = sizeof(key_serial_t);
71030 if (tmp > buflen)
71031 tmp = buflen;
71032
71033 - if (copy_to_user(buffer,
71034 - &key->serial,
71035 - tmp) != 0)
71036 + if (copy_to_user(buffer, &serial, tmp))
71037 goto error;
71038
71039 buflen -= tmp;
71040 diff -urNp linux-2.6.32.41/security/min_addr.c linux-2.6.32.41/security/min_addr.c
71041 --- linux-2.6.32.41/security/min_addr.c 2011-03-27 14:31:47.000000000 -0400
71042 +++ linux-2.6.32.41/security/min_addr.c 2011-04-17 15:56:46.000000000 -0400
71043 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG
71044 */
71045 static void update_mmap_min_addr(void)
71046 {
71047 +#ifndef SPARC
71048 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
71049 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
71050 mmap_min_addr = dac_mmap_min_addr;
71051 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
71052 #else
71053 mmap_min_addr = dac_mmap_min_addr;
71054 #endif
71055 +#endif
71056 }
71057
71058 /*
71059 diff -urNp linux-2.6.32.41/security/root_plug.c linux-2.6.32.41/security/root_plug.c
71060 --- linux-2.6.32.41/security/root_plug.c 2011-03-27 14:31:47.000000000 -0400
71061 +++ linux-2.6.32.41/security/root_plug.c 2011-04-17 15:56:46.000000000 -0400
71062 @@ -70,7 +70,7 @@ static int rootplug_bprm_check_security
71063 return 0;
71064 }
71065
71066 -static struct security_operations rootplug_security_ops = {
71067 +static struct security_operations rootplug_security_ops __read_only = {
71068 .bprm_check_security = rootplug_bprm_check_security,
71069 };
71070
71071 diff -urNp linux-2.6.32.41/security/security.c linux-2.6.32.41/security/security.c
71072 --- linux-2.6.32.41/security/security.c 2011-03-27 14:31:47.000000000 -0400
71073 +++ linux-2.6.32.41/security/security.c 2011-04-17 15:56:46.000000000 -0400
71074 @@ -24,7 +24,7 @@ static __initdata char chosen_lsm[SECURI
71075 extern struct security_operations default_security_ops;
71076 extern void security_fixup_ops(struct security_operations *ops);
71077
71078 -struct security_operations *security_ops; /* Initialized to NULL */
71079 +struct security_operations *security_ops __read_only; /* Initialized to NULL */
71080
71081 static inline int verify(struct security_operations *ops)
71082 {
71083 @@ -106,7 +106,7 @@ int __init security_module_enable(struct
71084 * If there is already a security module registered with the kernel,
71085 * an error will be returned. Otherwise %0 is returned on success.
71086 */
71087 -int register_security(struct security_operations *ops)
71088 +int __init register_security(struct security_operations *ops)
71089 {
71090 if (verify(ops)) {
71091 printk(KERN_DEBUG "%s could not verify "
71092 diff -urNp linux-2.6.32.41/security/selinux/hooks.c linux-2.6.32.41/security/selinux/hooks.c
71093 --- linux-2.6.32.41/security/selinux/hooks.c 2011-03-27 14:31:47.000000000 -0400
71094 +++ linux-2.6.32.41/security/selinux/hooks.c 2011-04-17 15:56:46.000000000 -0400
71095 @@ -131,7 +131,7 @@ int selinux_enabled = 1;
71096 * Minimal support for a secondary security module,
71097 * just to allow the use of the capability module.
71098 */
71099 -static struct security_operations *secondary_ops;
71100 +static struct security_operations *secondary_ops __read_only;
71101
71102 /* Lists of inode and superblock security structures initialized
71103 before the policy was loaded. */
71104 @@ -5457,7 +5457,7 @@ static int selinux_key_getsecurity(struc
71105
71106 #endif
71107
71108 -static struct security_operations selinux_ops = {
71109 +static struct security_operations selinux_ops __read_only = {
71110 .name = "selinux",
71111
71112 .ptrace_access_check = selinux_ptrace_access_check,
71113 @@ -5841,7 +5841,9 @@ int selinux_disable(void)
71114 avc_disable();
71115
71116 /* Reset security_ops to the secondary module, dummy or capability. */
71117 + pax_open_kernel();
71118 security_ops = secondary_ops;
71119 + pax_close_kernel();
71120
71121 /* Unregister netfilter hooks. */
71122 selinux_nf_ip_exit();
71123 diff -urNp linux-2.6.32.41/security/selinux/include/xfrm.h linux-2.6.32.41/security/selinux/include/xfrm.h
71124 --- linux-2.6.32.41/security/selinux/include/xfrm.h 2011-03-27 14:31:47.000000000 -0400
71125 +++ linux-2.6.32.41/security/selinux/include/xfrm.h 2011-05-18 20:09:37.000000000 -0400
71126 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct s
71127
71128 static inline void selinux_xfrm_notify_policyload(void)
71129 {
71130 - atomic_inc(&flow_cache_genid);
71131 + atomic_inc_unchecked(&flow_cache_genid);
71132 }
71133 #else
71134 static inline int selinux_xfrm_enabled(void)
71135 diff -urNp linux-2.6.32.41/security/selinux/ss/services.c linux-2.6.32.41/security/selinux/ss/services.c
71136 --- linux-2.6.32.41/security/selinux/ss/services.c 2011-03-27 14:31:47.000000000 -0400
71137 +++ linux-2.6.32.41/security/selinux/ss/services.c 2011-05-16 21:46:57.000000000 -0400
71138 @@ -1715,6 +1715,8 @@ int security_load_policy(void *data, siz
71139 int rc = 0;
71140 struct policy_file file = { data, len }, *fp = &file;
71141
71142 + pax_track_stack();
71143 +
71144 if (!ss_initialized) {
71145 avtab_cache_init();
71146 if (policydb_read(&policydb, fp)) {
71147 diff -urNp linux-2.6.32.41/security/smack/smack_lsm.c linux-2.6.32.41/security/smack/smack_lsm.c
71148 --- linux-2.6.32.41/security/smack/smack_lsm.c 2011-03-27 14:31:47.000000000 -0400
71149 +++ linux-2.6.32.41/security/smack/smack_lsm.c 2011-04-17 15:56:46.000000000 -0400
71150 @@ -3073,7 +3073,7 @@ static int smack_inode_getsecctx(struct
71151 return 0;
71152 }
71153
71154 -struct security_operations smack_ops = {
71155 +struct security_operations smack_ops __read_only = {
71156 .name = "smack",
71157
71158 .ptrace_access_check = smack_ptrace_access_check,
71159 diff -urNp linux-2.6.32.41/security/tomoyo/tomoyo.c linux-2.6.32.41/security/tomoyo/tomoyo.c
71160 --- linux-2.6.32.41/security/tomoyo/tomoyo.c 2011-03-27 14:31:47.000000000 -0400
71161 +++ linux-2.6.32.41/security/tomoyo/tomoyo.c 2011-04-17 15:56:46.000000000 -0400
71162 @@ -275,7 +275,7 @@ static int tomoyo_dentry_open(struct fil
71163 * tomoyo_security_ops is a "struct security_operations" which is used for
71164 * registering TOMOYO.
71165 */
71166 -static struct security_operations tomoyo_security_ops = {
71167 +static struct security_operations tomoyo_security_ops __read_only = {
71168 .name = "tomoyo",
71169 .cred_alloc_blank = tomoyo_cred_alloc_blank,
71170 .cred_prepare = tomoyo_cred_prepare,
71171 diff -urNp linux-2.6.32.41/sound/aoa/codecs/onyx.c linux-2.6.32.41/sound/aoa/codecs/onyx.c
71172 --- linux-2.6.32.41/sound/aoa/codecs/onyx.c 2011-03-27 14:31:47.000000000 -0400
71173 +++ linux-2.6.32.41/sound/aoa/codecs/onyx.c 2011-04-17 15:56:46.000000000 -0400
71174 @@ -53,7 +53,7 @@ struct onyx {
71175 spdif_locked:1,
71176 analog_locked:1,
71177 original_mute:2;
71178 - int open_count;
71179 + local_t open_count;
71180 struct codec_info *codec_info;
71181
71182 /* mutex serializes concurrent access to the device
71183 @@ -752,7 +752,7 @@ static int onyx_open(struct codec_info_i
71184 struct onyx *onyx = cii->codec_data;
71185
71186 mutex_lock(&onyx->mutex);
71187 - onyx->open_count++;
71188 + local_inc(&onyx->open_count);
71189 mutex_unlock(&onyx->mutex);
71190
71191 return 0;
71192 @@ -764,8 +764,7 @@ static int onyx_close(struct codec_info_
71193 struct onyx *onyx = cii->codec_data;
71194
71195 mutex_lock(&onyx->mutex);
71196 - onyx->open_count--;
71197 - if (!onyx->open_count)
71198 + if (local_dec_and_test(&onyx->open_count))
71199 onyx->spdif_locked = onyx->analog_locked = 0;
71200 mutex_unlock(&onyx->mutex);
71201
71202 diff -urNp linux-2.6.32.41/sound/aoa/codecs/onyx.h linux-2.6.32.41/sound/aoa/codecs/onyx.h
71203 --- linux-2.6.32.41/sound/aoa/codecs/onyx.h 2011-03-27 14:31:47.000000000 -0400
71204 +++ linux-2.6.32.41/sound/aoa/codecs/onyx.h 2011-04-17 15:56:46.000000000 -0400
71205 @@ -11,6 +11,7 @@
71206 #include <linux/i2c.h>
71207 #include <asm/pmac_low_i2c.h>
71208 #include <asm/prom.h>
71209 +#include <asm/local.h>
71210
71211 /* PCM3052 register definitions */
71212
71213 diff -urNp linux-2.6.32.41/sound/drivers/mts64.c linux-2.6.32.41/sound/drivers/mts64.c
71214 --- linux-2.6.32.41/sound/drivers/mts64.c 2011-03-27 14:31:47.000000000 -0400
71215 +++ linux-2.6.32.41/sound/drivers/mts64.c 2011-04-17 15:56:46.000000000 -0400
71216 @@ -27,6 +27,7 @@
71217 #include <sound/initval.h>
71218 #include <sound/rawmidi.h>
71219 #include <sound/control.h>
71220 +#include <asm/local.h>
71221
71222 #define CARD_NAME "Miditerminal 4140"
71223 #define DRIVER_NAME "MTS64"
71224 @@ -65,7 +66,7 @@ struct mts64 {
71225 struct pardevice *pardev;
71226 int pardev_claimed;
71227
71228 - int open_count;
71229 + local_t open_count;
71230 int current_midi_output_port;
71231 int current_midi_input_port;
71232 u8 mode[MTS64_NUM_INPUT_PORTS];
71233 @@ -695,7 +696,7 @@ static int snd_mts64_rawmidi_open(struct
71234 {
71235 struct mts64 *mts = substream->rmidi->private_data;
71236
71237 - if (mts->open_count == 0) {
71238 + if (local_read(&mts->open_count) == 0) {
71239 /* We don't need a spinlock here, because this is just called
71240 if the device has not been opened before.
71241 So there aren't any IRQs from the device */
71242 @@ -703,7 +704,7 @@ static int snd_mts64_rawmidi_open(struct
71243
71244 msleep(50);
71245 }
71246 - ++(mts->open_count);
71247 + local_inc(&mts->open_count);
71248
71249 return 0;
71250 }
71251 @@ -713,8 +714,7 @@ static int snd_mts64_rawmidi_close(struc
71252 struct mts64 *mts = substream->rmidi->private_data;
71253 unsigned long flags;
71254
71255 - --(mts->open_count);
71256 - if (mts->open_count == 0) {
71257 + if (local_dec_return(&mts->open_count) == 0) {
71258 /* We need the spinlock_irqsave here because we can still
71259 have IRQs at this point */
71260 spin_lock_irqsave(&mts->lock, flags);
71261 @@ -723,8 +723,8 @@ static int snd_mts64_rawmidi_close(struc
71262
71263 msleep(500);
71264
71265 - } else if (mts->open_count < 0)
71266 - mts->open_count = 0;
71267 + } else if (local_read(&mts->open_count) < 0)
71268 + local_set(&mts->open_count, 0);
71269
71270 return 0;
71271 }
71272 diff -urNp linux-2.6.32.41/sound/drivers/portman2x4.c linux-2.6.32.41/sound/drivers/portman2x4.c
71273 --- linux-2.6.32.41/sound/drivers/portman2x4.c 2011-03-27 14:31:47.000000000 -0400
71274 +++ linux-2.6.32.41/sound/drivers/portman2x4.c 2011-04-17 15:56:46.000000000 -0400
71275 @@ -46,6 +46,7 @@
71276 #include <sound/initval.h>
71277 #include <sound/rawmidi.h>
71278 #include <sound/control.h>
71279 +#include <asm/local.h>
71280
71281 #define CARD_NAME "Portman 2x4"
71282 #define DRIVER_NAME "portman"
71283 @@ -83,7 +84,7 @@ struct portman {
71284 struct pardevice *pardev;
71285 int pardev_claimed;
71286
71287 - int open_count;
71288 + local_t open_count;
71289 int mode[PORTMAN_NUM_INPUT_PORTS];
71290 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
71291 };
71292 diff -urNp linux-2.6.32.41/sound/oss/sb_audio.c linux-2.6.32.41/sound/oss/sb_audio.c
71293 --- linux-2.6.32.41/sound/oss/sb_audio.c 2011-03-27 14:31:47.000000000 -0400
71294 +++ linux-2.6.32.41/sound/oss/sb_audio.c 2011-04-17 15:56:46.000000000 -0400
71295 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
71296 buf16 = (signed short *)(localbuf + localoffs);
71297 while (c)
71298 {
71299 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
71300 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
71301 if (copy_from_user(lbuf8,
71302 userbuf+useroffs + p,
71303 locallen))
71304 diff -urNp linux-2.6.32.41/sound/oss/swarm_cs4297a.c linux-2.6.32.41/sound/oss/swarm_cs4297a.c
71305 --- linux-2.6.32.41/sound/oss/swarm_cs4297a.c 2011-03-27 14:31:47.000000000 -0400
71306 +++ linux-2.6.32.41/sound/oss/swarm_cs4297a.c 2011-04-17 15:56:46.000000000 -0400
71307 @@ -2577,7 +2577,6 @@ static int __init cs4297a_init(void)
71308 {
71309 struct cs4297a_state *s;
71310 u32 pwr, id;
71311 - mm_segment_t fs;
71312 int rval;
71313 #ifndef CONFIG_BCM_CS4297A_CSWARM
71314 u64 cfg;
71315 @@ -2667,22 +2666,23 @@ static int __init cs4297a_init(void)
71316 if (!rval) {
71317 char *sb1250_duart_present;
71318
71319 +#if 0
71320 + mm_segment_t fs;
71321 fs = get_fs();
71322 set_fs(KERNEL_DS);
71323 -#if 0
71324 val = SOUND_MASK_LINE;
71325 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
71326 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
71327 val = initvol[i].vol;
71328 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
71329 }
71330 + set_fs(fs);
71331 // cs4297a_write_ac97(s, 0x18, 0x0808);
71332 #else
71333 // cs4297a_write_ac97(s, 0x5e, 0x180);
71334 cs4297a_write_ac97(s, 0x02, 0x0808);
71335 cs4297a_write_ac97(s, 0x18, 0x0808);
71336 #endif
71337 - set_fs(fs);
71338
71339 list_add(&s->list, &cs4297a_devs);
71340
71341 diff -urNp linux-2.6.32.41/sound/pci/ac97/ac97_codec.c linux-2.6.32.41/sound/pci/ac97/ac97_codec.c
71342 --- linux-2.6.32.41/sound/pci/ac97/ac97_codec.c 2011-03-27 14:31:47.000000000 -0400
71343 +++ linux-2.6.32.41/sound/pci/ac97/ac97_codec.c 2011-04-17 15:56:46.000000000 -0400
71344 @@ -1952,7 +1952,7 @@ static int snd_ac97_dev_disconnect(struc
71345 }
71346
71347 /* build_ops to do nothing */
71348 -static struct snd_ac97_build_ops null_build_ops;
71349 +static const struct snd_ac97_build_ops null_build_ops;
71350
71351 #ifdef CONFIG_SND_AC97_POWER_SAVE
71352 static void do_update_power(struct work_struct *work)
71353 diff -urNp linux-2.6.32.41/sound/pci/ac97/ac97_patch.c linux-2.6.32.41/sound/pci/ac97/ac97_patch.c
71354 --- linux-2.6.32.41/sound/pci/ac97/ac97_patch.c 2011-03-27 14:31:47.000000000 -0400
71355 +++ linux-2.6.32.41/sound/pci/ac97/ac97_patch.c 2011-04-23 12:56:12.000000000 -0400
71356 @@ -371,7 +371,7 @@ static int patch_yamaha_ymf743_build_spd
71357 return 0;
71358 }
71359
71360 -static struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
71361 +static const struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
71362 .build_spdif = patch_yamaha_ymf743_build_spdif,
71363 .build_3d = patch_yamaha_ymf7x3_3d,
71364 };
71365 @@ -455,7 +455,7 @@ static int patch_yamaha_ymf753_post_spdi
71366 return 0;
71367 }
71368
71369 -static struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
71370 +static const struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
71371 .build_3d = patch_yamaha_ymf7x3_3d,
71372 .build_post_spdif = patch_yamaha_ymf753_post_spdif
71373 };
71374 @@ -502,7 +502,7 @@ static int patch_wolfson_wm9703_specific
71375 return 0;
71376 }
71377
71378 -static struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
71379 +static const struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
71380 .build_specific = patch_wolfson_wm9703_specific,
71381 };
71382
71383 @@ -533,7 +533,7 @@ static int patch_wolfson_wm9704_specific
71384 return 0;
71385 }
71386
71387 -static struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
71388 +static const struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
71389 .build_specific = patch_wolfson_wm9704_specific,
71390 };
71391
71392 @@ -555,7 +555,7 @@ static int patch_wolfson_wm9705_specific
71393 return 0;
71394 }
71395
71396 -static struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
71397 +static const struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
71398 .build_specific = patch_wolfson_wm9705_specific,
71399 };
71400
71401 @@ -692,7 +692,7 @@ static int patch_wolfson_wm9711_specific
71402 return 0;
71403 }
71404
71405 -static struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
71406 +static const struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
71407 .build_specific = patch_wolfson_wm9711_specific,
71408 };
71409
71410 @@ -886,7 +886,7 @@ static void patch_wolfson_wm9713_resume
71411 }
71412 #endif
71413
71414 -static struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
71415 +static const struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
71416 .build_specific = patch_wolfson_wm9713_specific,
71417 .build_3d = patch_wolfson_wm9713_3d,
71418 #ifdef CONFIG_PM
71419 @@ -991,7 +991,7 @@ static int patch_sigmatel_stac97xx_speci
71420 return 0;
71421 }
71422
71423 -static struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
71424 +static const struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
71425 .build_3d = patch_sigmatel_stac9700_3d,
71426 .build_specific = patch_sigmatel_stac97xx_specific
71427 };
71428 @@ -1038,7 +1038,7 @@ static int patch_sigmatel_stac9708_speci
71429 return patch_sigmatel_stac97xx_specific(ac97);
71430 }
71431
71432 -static struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
71433 +static const struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
71434 .build_3d = patch_sigmatel_stac9708_3d,
71435 .build_specific = patch_sigmatel_stac9708_specific
71436 };
71437 @@ -1267,7 +1267,7 @@ static int patch_sigmatel_stac9758_speci
71438 return 0;
71439 }
71440
71441 -static struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
71442 +static const struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
71443 .build_3d = patch_sigmatel_stac9700_3d,
71444 .build_specific = patch_sigmatel_stac9758_specific
71445 };
71446 @@ -1342,7 +1342,7 @@ static int patch_cirrus_build_spdif(stru
71447 return 0;
71448 }
71449
71450 -static struct snd_ac97_build_ops patch_cirrus_ops = {
71451 +static const struct snd_ac97_build_ops patch_cirrus_ops = {
71452 .build_spdif = patch_cirrus_build_spdif
71453 };
71454
71455 @@ -1399,7 +1399,7 @@ static int patch_conexant_build_spdif(st
71456 return 0;
71457 }
71458
71459 -static struct snd_ac97_build_ops patch_conexant_ops = {
71460 +static const struct snd_ac97_build_ops patch_conexant_ops = {
71461 .build_spdif = patch_conexant_build_spdif
71462 };
71463
71464 @@ -1575,7 +1575,7 @@ static void patch_ad1881_chained(struct
71465 }
71466 }
71467
71468 -static struct snd_ac97_build_ops patch_ad1881_build_ops = {
71469 +static const struct snd_ac97_build_ops patch_ad1881_build_ops = {
71470 #ifdef CONFIG_PM
71471 .resume = ad18xx_resume
71472 #endif
71473 @@ -1662,7 +1662,7 @@ static int patch_ad1885_specific(struct
71474 return 0;
71475 }
71476
71477 -static struct snd_ac97_build_ops patch_ad1885_build_ops = {
71478 +static const struct snd_ac97_build_ops patch_ad1885_build_ops = {
71479 .build_specific = &patch_ad1885_specific,
71480 #ifdef CONFIG_PM
71481 .resume = ad18xx_resume
71482 @@ -1689,7 +1689,7 @@ static int patch_ad1886_specific(struct
71483 return 0;
71484 }
71485
71486 -static struct snd_ac97_build_ops patch_ad1886_build_ops = {
71487 +static const struct snd_ac97_build_ops patch_ad1886_build_ops = {
71488 .build_specific = &patch_ad1886_specific,
71489 #ifdef CONFIG_PM
71490 .resume = ad18xx_resume
71491 @@ -1896,7 +1896,7 @@ static int patch_ad1981a_specific(struct
71492 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
71493 }
71494
71495 -static struct snd_ac97_build_ops patch_ad1981a_build_ops = {
71496 +static const struct snd_ac97_build_ops patch_ad1981a_build_ops = {
71497 .build_post_spdif = patch_ad198x_post_spdif,
71498 .build_specific = patch_ad1981a_specific,
71499 #ifdef CONFIG_PM
71500 @@ -1951,7 +1951,7 @@ static int patch_ad1981b_specific(struct
71501 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
71502 }
71503
71504 -static struct snd_ac97_build_ops patch_ad1981b_build_ops = {
71505 +static const struct snd_ac97_build_ops patch_ad1981b_build_ops = {
71506 .build_post_spdif = patch_ad198x_post_spdif,
71507 .build_specific = patch_ad1981b_specific,
71508 #ifdef CONFIG_PM
71509 @@ -2090,7 +2090,7 @@ static int patch_ad1888_specific(struct
71510 return patch_build_controls(ac97, snd_ac97_ad1888_controls, ARRAY_SIZE(snd_ac97_ad1888_controls));
71511 }
71512
71513 -static struct snd_ac97_build_ops patch_ad1888_build_ops = {
71514 +static const struct snd_ac97_build_ops patch_ad1888_build_ops = {
71515 .build_post_spdif = patch_ad198x_post_spdif,
71516 .build_specific = patch_ad1888_specific,
71517 #ifdef CONFIG_PM
71518 @@ -2139,7 +2139,7 @@ static int patch_ad1980_specific(struct
71519 return patch_build_controls(ac97, &snd_ac97_ad198x_2cmic, 1);
71520 }
71521
71522 -static struct snd_ac97_build_ops patch_ad1980_build_ops = {
71523 +static const struct snd_ac97_build_ops patch_ad1980_build_ops = {
71524 .build_post_spdif = patch_ad198x_post_spdif,
71525 .build_specific = patch_ad1980_specific,
71526 #ifdef CONFIG_PM
71527 @@ -2254,7 +2254,7 @@ static int patch_ad1985_specific(struct
71528 ARRAY_SIZE(snd_ac97_ad1985_controls));
71529 }
71530
71531 -static struct snd_ac97_build_ops patch_ad1985_build_ops = {
71532 +static const struct snd_ac97_build_ops patch_ad1985_build_ops = {
71533 .build_post_spdif = patch_ad198x_post_spdif,
71534 .build_specific = patch_ad1985_specific,
71535 #ifdef CONFIG_PM
71536 @@ -2546,7 +2546,7 @@ static int patch_ad1986_specific(struct
71537 ARRAY_SIZE(snd_ac97_ad1985_controls));
71538 }
71539
71540 -static struct snd_ac97_build_ops patch_ad1986_build_ops = {
71541 +static const struct snd_ac97_build_ops patch_ad1986_build_ops = {
71542 .build_post_spdif = patch_ad198x_post_spdif,
71543 .build_specific = patch_ad1986_specific,
71544 #ifdef CONFIG_PM
71545 @@ -2651,7 +2651,7 @@ static int patch_alc650_specific(struct
71546 return 0;
71547 }
71548
71549 -static struct snd_ac97_build_ops patch_alc650_ops = {
71550 +static const struct snd_ac97_build_ops patch_alc650_ops = {
71551 .build_specific = patch_alc650_specific,
71552 .update_jacks = alc650_update_jacks
71553 };
71554 @@ -2803,7 +2803,7 @@ static int patch_alc655_specific(struct
71555 return 0;
71556 }
71557
71558 -static struct snd_ac97_build_ops patch_alc655_ops = {
71559 +static const struct snd_ac97_build_ops patch_alc655_ops = {
71560 .build_specific = patch_alc655_specific,
71561 .update_jacks = alc655_update_jacks
71562 };
71563 @@ -2915,7 +2915,7 @@ static int patch_alc850_specific(struct
71564 return 0;
71565 }
71566
71567 -static struct snd_ac97_build_ops patch_alc850_ops = {
71568 +static const struct snd_ac97_build_ops patch_alc850_ops = {
71569 .build_specific = patch_alc850_specific,
71570 .update_jacks = alc850_update_jacks
71571 };
71572 @@ -2977,7 +2977,7 @@ static int patch_cm9738_specific(struct
71573 return patch_build_controls(ac97, snd_ac97_cm9738_controls, ARRAY_SIZE(snd_ac97_cm9738_controls));
71574 }
71575
71576 -static struct snd_ac97_build_ops patch_cm9738_ops = {
71577 +static const struct snd_ac97_build_ops patch_cm9738_ops = {
71578 .build_specific = patch_cm9738_specific,
71579 .update_jacks = cm9738_update_jacks
71580 };
71581 @@ -3068,7 +3068,7 @@ static int patch_cm9739_post_spdif(struc
71582 return patch_build_controls(ac97, snd_ac97_cm9739_controls_spdif, ARRAY_SIZE(snd_ac97_cm9739_controls_spdif));
71583 }
71584
71585 -static struct snd_ac97_build_ops patch_cm9739_ops = {
71586 +static const struct snd_ac97_build_ops patch_cm9739_ops = {
71587 .build_specific = patch_cm9739_specific,
71588 .build_post_spdif = patch_cm9739_post_spdif,
71589 .update_jacks = cm9739_update_jacks
71590 @@ -3242,7 +3242,7 @@ static int patch_cm9761_specific(struct
71591 return patch_build_controls(ac97, snd_ac97_cm9761_controls, ARRAY_SIZE(snd_ac97_cm9761_controls));
71592 }
71593
71594 -static struct snd_ac97_build_ops patch_cm9761_ops = {
71595 +static const struct snd_ac97_build_ops patch_cm9761_ops = {
71596 .build_specific = patch_cm9761_specific,
71597 .build_post_spdif = patch_cm9761_post_spdif,
71598 .update_jacks = cm9761_update_jacks
71599 @@ -3338,7 +3338,7 @@ static int patch_cm9780_specific(struct
71600 return patch_build_controls(ac97, cm9780_controls, ARRAY_SIZE(cm9780_controls));
71601 }
71602
71603 -static struct snd_ac97_build_ops patch_cm9780_ops = {
71604 +static const struct snd_ac97_build_ops patch_cm9780_ops = {
71605 .build_specific = patch_cm9780_specific,
71606 .build_post_spdif = patch_cm9761_post_spdif /* identical with CM9761 */
71607 };
71608 @@ -3458,7 +3458,7 @@ static int patch_vt1616_specific(struct
71609 return 0;
71610 }
71611
71612 -static struct snd_ac97_build_ops patch_vt1616_ops = {
71613 +static const struct snd_ac97_build_ops patch_vt1616_ops = {
71614 .build_specific = patch_vt1616_specific
71615 };
71616
71617 @@ -3812,7 +3812,7 @@ static int patch_it2646_specific(struct
71618 return 0;
71619 }
71620
71621 -static struct snd_ac97_build_ops patch_it2646_ops = {
71622 +static const struct snd_ac97_build_ops patch_it2646_ops = {
71623 .build_specific = patch_it2646_specific,
71624 .update_jacks = it2646_update_jacks
71625 };
71626 @@ -3846,7 +3846,7 @@ static int patch_si3036_specific(struct
71627 return 0;
71628 }
71629
71630 -static struct snd_ac97_build_ops patch_si3036_ops = {
71631 +static const struct snd_ac97_build_ops patch_si3036_ops = {
71632 .build_specific = patch_si3036_specific,
71633 };
71634
71635 @@ -3913,7 +3913,7 @@ static int patch_ucb1400_specific(struct
71636 return 0;
71637 }
71638
71639 -static struct snd_ac97_build_ops patch_ucb1400_ops = {
71640 +static const struct snd_ac97_build_ops patch_ucb1400_ops = {
71641 .build_specific = patch_ucb1400_specific,
71642 };
71643
71644 diff -urNp linux-2.6.32.41/sound/pci/hda/patch_intelhdmi.c linux-2.6.32.41/sound/pci/hda/patch_intelhdmi.c
71645 --- linux-2.6.32.41/sound/pci/hda/patch_intelhdmi.c 2011-03-27 14:31:47.000000000 -0400
71646 +++ linux-2.6.32.41/sound/pci/hda/patch_intelhdmi.c 2011-04-17 15:56:46.000000000 -0400
71647 @@ -511,10 +511,10 @@ static void hdmi_non_intrinsic_event(str
71648 cp_ready);
71649
71650 /* TODO */
71651 - if (cp_state)
71652 - ;
71653 - if (cp_ready)
71654 - ;
71655 + if (cp_state) {
71656 + }
71657 + if (cp_ready) {
71658 + }
71659 }
71660
71661
71662 diff -urNp linux-2.6.32.41/sound/pci/intel8x0m.c linux-2.6.32.41/sound/pci/intel8x0m.c
71663 --- linux-2.6.32.41/sound/pci/intel8x0m.c 2011-03-27 14:31:47.000000000 -0400
71664 +++ linux-2.6.32.41/sound/pci/intel8x0m.c 2011-04-23 12:56:12.000000000 -0400
71665 @@ -1264,7 +1264,7 @@ static struct shortname_table {
71666 { 0x5455, "ALi M5455" },
71667 { 0x746d, "AMD AMD8111" },
71668 #endif
71669 - { 0 },
71670 + { 0, },
71671 };
71672
71673 static int __devinit snd_intel8x0m_probe(struct pci_dev *pci,
71674 diff -urNp linux-2.6.32.41/sound/pci/ymfpci/ymfpci_main.c linux-2.6.32.41/sound/pci/ymfpci/ymfpci_main.c
71675 --- linux-2.6.32.41/sound/pci/ymfpci/ymfpci_main.c 2011-03-27 14:31:47.000000000 -0400
71676 +++ linux-2.6.32.41/sound/pci/ymfpci/ymfpci_main.c 2011-05-04 17:56:28.000000000 -0400
71677 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct sn
71678 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
71679 break;
71680 }
71681 - if (atomic_read(&chip->interrupt_sleep_count)) {
71682 - atomic_set(&chip->interrupt_sleep_count, 0);
71683 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
71684 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
71685 wake_up(&chip->interrupt_sleep);
71686 }
71687 __end:
71688 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct s
71689 continue;
71690 init_waitqueue_entry(&wait, current);
71691 add_wait_queue(&chip->interrupt_sleep, &wait);
71692 - atomic_inc(&chip->interrupt_sleep_count);
71693 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
71694 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
71695 remove_wait_queue(&chip->interrupt_sleep, &wait);
71696 }
71697 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(
71698 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
71699 spin_unlock(&chip->reg_lock);
71700
71701 - if (atomic_read(&chip->interrupt_sleep_count)) {
71702 - atomic_set(&chip->interrupt_sleep_count, 0);
71703 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
71704 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
71705 wake_up(&chip->interrupt_sleep);
71706 }
71707 }
71708 @@ -2369,7 +2369,7 @@ int __devinit snd_ymfpci_create(struct s
71709 spin_lock_init(&chip->reg_lock);
71710 spin_lock_init(&chip->voice_lock);
71711 init_waitqueue_head(&chip->interrupt_sleep);
71712 - atomic_set(&chip->interrupt_sleep_count, 0);
71713 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
71714 chip->card = card;
71715 chip->pci = pci;
71716 chip->irq = -1;
71717 diff -urNp linux-2.6.32.41/tools/gcc/Makefile linux-2.6.32.41/tools/gcc/Makefile
71718 --- linux-2.6.32.41/tools/gcc/Makefile 1969-12-31 19:00:00.000000000 -0500
71719 +++ linux-2.6.32.41/tools/gcc/Makefile 2011-06-04 20:52:13.000000000 -0400
71720 @@ -0,0 +1,11 @@
71721 +#CC := gcc
71722 +#PLUGIN_SOURCE_FILES := pax_plugin.c
71723 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
71724 +GCCPLUGINS_DIR := $(shell $(HOSTCC) -print-file-name=plugin)
71725 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W
71726 +
71727 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include
71728 +
71729 +hostlibs-y := pax_plugin.so
71730 +always := $(hostlibs-y)
71731 +pax_plugin-objs := pax_plugin.o
71732 diff -urNp linux-2.6.32.41/tools/gcc/pax_plugin.c linux-2.6.32.41/tools/gcc/pax_plugin.c
71733 --- linux-2.6.32.41/tools/gcc/pax_plugin.c 1969-12-31 19:00:00.000000000 -0500
71734 +++ linux-2.6.32.41/tools/gcc/pax_plugin.c 2011-06-04 20:52:13.000000000 -0400
71735 @@ -0,0 +1,242 @@
71736 +/*
71737 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
71738 + * Licensed under the GPL v2
71739 + *
71740 + * Note: the choice of the license means that the compilation process is
71741 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
71742 + * but for the kernel it doesn't matter since it doesn't link against
71743 + * any of the gcc libraries
71744 + *
71745 + * gcc plugin to help implement various PaX features
71746 + *
71747 + * - track lowest stack pointer
71748 + *
71749 + * TODO:
71750 + * - initialize all local variables
71751 + *
71752 + * BUGS:
71753 + */
71754 +#include "gcc-plugin.h"
71755 +#include "plugin-version.h"
71756 +#include "config.h"
71757 +#include "system.h"
71758 +#include "coretypes.h"
71759 +#include "tm.h"
71760 +#include "toplev.h"
71761 +#include "basic-block.h"
71762 +#include "gimple.h"
71763 +//#include "expr.h" where are you...
71764 +#include "diagnostic.h"
71765 +#include "rtl.h"
71766 +#include "emit-rtl.h"
71767 +#include "function.h"
71768 +#include "tree.h"
71769 +#include "tree-pass.h"
71770 +#include "intl.h"
71771 +
71772 +int plugin_is_GPL_compatible;
71773 +
71774 +static int track_frame_size = -1;
71775 +static const char track_function[] = "pax_track_stack";
71776 +static bool init_locals;
71777 +
71778 +static struct plugin_info pax_plugin_info = {
71779 + .version = "201106030000",
71780 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
71781 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
71782 +};
71783 +
71784 +static bool gate_pax_track_stack(void);
71785 +static unsigned int execute_pax_tree_instrument(void);
71786 +static unsigned int execute_pax_final(void);
71787 +
71788 +static struct gimple_opt_pass pax_tree_instrument_pass = {
71789 + .pass = {
71790 + .type = GIMPLE_PASS,
71791 + .name = "pax_tree_instrument",
71792 + .gate = gate_pax_track_stack,
71793 + .execute = execute_pax_tree_instrument,
71794 + .sub = NULL,
71795 + .next = NULL,
71796 + .static_pass_number = 0,
71797 + .tv_id = TV_NONE,
71798 + .properties_required = PROP_gimple_leh | PROP_cfg,
71799 + .properties_provided = 0,
71800 + .properties_destroyed = 0,
71801 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
71802 + .todo_flags_finish = TODO_verify_stmts // | TODO_dump_func
71803 + }
71804 +};
71805 +
71806 +static struct rtl_opt_pass pax_final_rtl_opt_pass = {
71807 + .pass = {
71808 + .type = RTL_PASS,
71809 + .name = "pax_final",
71810 + .gate = gate_pax_track_stack,
71811 + .execute = execute_pax_final,
71812 + .sub = NULL,
71813 + .next = NULL,
71814 + .static_pass_number = 0,
71815 + .tv_id = TV_NONE,
71816 + .properties_required = 0,
71817 + .properties_provided = 0,
71818 + .properties_destroyed = 0,
71819 + .todo_flags_start = 0,
71820 + .todo_flags_finish = 0
71821 + }
71822 +};
71823 +
71824 +static bool gate_pax_track_stack(void)
71825 +{
71826 + return track_frame_size >= 0;
71827 +}
71828 +
71829 +static void pax_add_instrumentation(gimple_stmt_iterator *gsi, bool before)
71830 +{
71831 + gimple call;
71832 + tree decl, type;
71833 +
71834 + // insert call to void pax_track_stack(void)
71835 + type = build_function_type_list(void_type_node, NULL_TREE);
71836 + decl = build_fn_decl(track_function, type);
71837 + DECL_ASSEMBLER_NAME(decl); // for LTO
71838 + call = gimple_build_call(decl, 0);
71839 + if (before)
71840 + gsi_insert_before(gsi, call, GSI_CONTINUE_LINKING);
71841 + else
71842 + gsi_insert_after(gsi, call, GSI_CONTINUE_LINKING);
71843 +}
71844 +
71845 +static unsigned int execute_pax_tree_instrument(void)
71846 +{
71847 + basic_block bb;
71848 + gimple_stmt_iterator gsi;
71849 +
71850 + // 1. loop through BBs and GIMPLE statements
71851 + FOR_EACH_BB(bb) {
71852 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
71853 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
71854 + tree decl;
71855 + gimple stmt = gsi_stmt(gsi);
71856 +
71857 + if (!is_gimple_call(stmt))
71858 + continue;
71859 + decl = gimple_call_fndecl(stmt);
71860 + if (!decl)
71861 + continue;
71862 + if (TREE_CODE(decl) != FUNCTION_DECL)
71863 + continue;
71864 + if (!DECL_BUILT_IN(decl))
71865 + continue;
71866 + if (DECL_BUILT_IN_CLASS(decl) != BUILT_IN_NORMAL)
71867 + continue;
71868 + if (DECL_FUNCTION_CODE(decl) != BUILT_IN_ALLOCA)
71869 + continue;
71870 +
71871 + // 2. insert track call after each __builtin_alloca call
71872 + pax_add_instrumentation(&gsi, false);
71873 +// print_node(stderr, "pax", decl, 4);
71874 + }
71875 + }
71876 +
71877 + // 3. insert track call at the beginning
71878 + bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
71879 + gsi = gsi_start_bb(bb);
71880 + pax_add_instrumentation(&gsi, true);
71881 +
71882 + return 0;
71883 +}
71884 +
71885 +static unsigned int execute_pax_final(void)
71886 +{
71887 + rtx insn;
71888 +
71889 + if (cfun->calls_alloca)
71890 + return 0;
71891 +
71892 + // 1. find pax_track_stack calls
71893 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
71894 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
71895 + rtx body;
71896 +
71897 + if (!CALL_P(insn))
71898 + continue;
71899 + body = PATTERN(insn);
71900 + if (GET_CODE(body) != CALL)
71901 + continue;
71902 + body = XEXP(body, 0);
71903 + if (GET_CODE(body) != MEM)
71904 + continue;
71905 + body = XEXP(body, 0);
71906 + if (GET_CODE(body) != SYMBOL_REF)
71907 + continue;
71908 + if (strcmp(XSTR(body, 0), track_function))
71909 + continue;
71910 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
71911 + // 2. delete call if function frame is not big enough
71912 + if (get_frame_size() >= track_frame_size)
71913 + continue;
71914 + delete_insn_and_edges(insn);
71915 + }
71916 +
71917 +// print_simple_rtl(stderr, get_insns());
71918 +// print_rtl(stderr, get_insns());
71919 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
71920 +
71921 + return 0;
71922 +}
71923 +
71924 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
71925 +{
71926 + const char * const plugin_name = plugin_info->base_name;
71927 + const int argc = plugin_info->argc;
71928 + const struct plugin_argument * const argv = plugin_info->argv;
71929 + int i;
71930 + struct register_pass_info pax_tree_instrument_pass_info = {
71931 + .pass = &pax_tree_instrument_pass.pass,
71932 +// .reference_pass_name = "tree_profile",
71933 + .reference_pass_name = "optimized",
71934 + .ref_pass_instance_number = 0,
71935 + .pos_op = PASS_POS_INSERT_AFTER
71936 + };
71937 + struct register_pass_info pax_final_pass_info = {
71938 + .pass = &pax_final_rtl_opt_pass.pass,
71939 + .reference_pass_name = "final",
71940 + .ref_pass_instance_number = 0,
71941 + .pos_op = PASS_POS_INSERT_BEFORE
71942 + };
71943 +
71944 + if (!plugin_default_version_check(version, &gcc_version)) {
71945 + error(G_("incompatible gcc/plugin versions"));
71946 + return 1;
71947 + }
71948 +
71949 + register_callback(plugin_name, PLUGIN_INFO, NULL, &pax_plugin_info);
71950 +
71951 + for (i = 0; i < argc; ++i) {
71952 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
71953 + if (!argv[i].value) {
71954 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
71955 + continue;
71956 + }
71957 + track_frame_size = atoi(argv[i].value);
71958 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
71959 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
71960 + continue;
71961 + }
71962 + if (!strcmp(argv[i].key, "initialize-locals")) {
71963 + if (argv[i].value) {
71964 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
71965 + continue;
71966 + }
71967 + init_locals = true;
71968 + continue;
71969 + }
71970 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
71971 + }
71972 +
71973 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pax_tree_instrument_pass_info);
71974 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pax_final_pass_info);
71975 +
71976 + return 0;
71977 +}
71978 Binary files linux-2.6.32.41/tools/gcc/pax_plugin.so and linux-2.6.32.41/tools/gcc/pax_plugin.so differ
71979 diff -urNp linux-2.6.32.41/usr/gen_init_cpio.c linux-2.6.32.41/usr/gen_init_cpio.c
71980 --- linux-2.6.32.41/usr/gen_init_cpio.c 2011-03-27 14:31:47.000000000 -0400
71981 +++ linux-2.6.32.41/usr/gen_init_cpio.c 2011-04-17 15:56:46.000000000 -0400
71982 @@ -299,7 +299,7 @@ static int cpio_mkfile(const char *name,
71983 int retval;
71984 int rc = -1;
71985 int namesize;
71986 - int i;
71987 + unsigned int i;
71988
71989 mode |= S_IFREG;
71990
71991 @@ -383,9 +383,10 @@ static char *cpio_replace_env(char *new_
71992 *env_var = *expanded = '\0';
71993 strncat(env_var, start + 2, end - start - 2);
71994 strncat(expanded, new_location, start - new_location);
71995 - strncat(expanded, getenv(env_var), PATH_MAX);
71996 - strncat(expanded, end + 1, PATH_MAX);
71997 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
71998 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
71999 strncpy(new_location, expanded, PATH_MAX);
72000 + new_location[PATH_MAX] = 0;
72001 } else
72002 break;
72003 }
72004 diff -urNp linux-2.6.32.41/virt/kvm/kvm_main.c linux-2.6.32.41/virt/kvm/kvm_main.c
72005 --- linux-2.6.32.41/virt/kvm/kvm_main.c 2011-03-27 14:31:47.000000000 -0400
72006 +++ linux-2.6.32.41/virt/kvm/kvm_main.c 2011-04-23 21:41:37.000000000 -0400
72007 @@ -1748,6 +1748,7 @@ static int kvm_vcpu_release(struct inode
72008 return 0;
72009 }
72010
72011 +/* cannot be const */
72012 static struct file_operations kvm_vcpu_fops = {
72013 .release = kvm_vcpu_release,
72014 .unlocked_ioctl = kvm_vcpu_ioctl,
72015 @@ -2344,6 +2345,7 @@ static int kvm_vm_mmap(struct file *file
72016 return 0;
72017 }
72018
72019 +/* cannot be const */
72020 static struct file_operations kvm_vm_fops = {
72021 .release = kvm_vm_release,
72022 .unlocked_ioctl = kvm_vm_ioctl,
72023 @@ -2431,6 +2433,7 @@ out:
72024 return r;
72025 }
72026
72027 +/* cannot be const */
72028 static struct file_operations kvm_chardev_ops = {
72029 .unlocked_ioctl = kvm_dev_ioctl,
72030 .compat_ioctl = kvm_dev_ioctl,
72031 @@ -2494,7 +2497,7 @@ asmlinkage void kvm_handle_fault_on_rebo
72032 if (kvm_rebooting)
72033 /* spin while reset goes on */
72034 while (true)
72035 - ;
72036 + cpu_relax();
72037 /* Fault while not rebooting. We want the trace. */
72038 BUG();
72039 }
72040 @@ -2714,7 +2717,7 @@ static void kvm_sched_out(struct preempt
72041 kvm_arch_vcpu_put(vcpu);
72042 }
72043
72044 -int kvm_init(void *opaque, unsigned int vcpu_size,
72045 +int kvm_init(const void *opaque, unsigned int vcpu_size,
72046 struct module *module)
72047 {
72048 int r;
72049 @@ -2767,7 +2770,7 @@ int kvm_init(void *opaque, unsigned int
72050 /* A kmem cache lets us meet the alignment requirements of fx_save. */
72051 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
72052 __alignof__(struct kvm_vcpu),
72053 - 0, NULL);
72054 + SLAB_USERCOPY, NULL);
72055 if (!kvm_vcpu_cache) {
72056 r = -ENOMEM;
72057 goto out_free_5;