]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.2.2-2.6.32.41-201106042120.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-2.6.32.41-201106042120.patch
1 diff -urNp linux-2.6.32.41/arch/alpha/include/asm/elf.h linux-2.6.32.41/arch/alpha/include/asm/elf.h
2 --- linux-2.6.32.41/arch/alpha/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3 +++ linux-2.6.32.41/arch/alpha/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
4 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8 +#ifdef CONFIG_PAX_ASLR
9 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10 +
11 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13 +#endif
14 +
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18 diff -urNp linux-2.6.32.41/arch/alpha/include/asm/pgtable.h linux-2.6.32.41/arch/alpha/include/asm/pgtable.h
19 --- linux-2.6.32.41/arch/alpha/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
20 +++ linux-2.6.32.41/arch/alpha/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
21 @@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25 +
26 +#ifdef CONFIG_PAX_PAGEEXEC
27 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30 +#else
31 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
32 +# define PAGE_COPY_NOEXEC PAGE_COPY
33 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
34 +#endif
35 +
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39 diff -urNp linux-2.6.32.41/arch/alpha/kernel/module.c linux-2.6.32.41/arch/alpha/kernel/module.c
40 --- linux-2.6.32.41/arch/alpha/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
41 +++ linux-2.6.32.41/arch/alpha/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
42 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46 - gp = (u64)me->module_core + me->core_size - 0x8000;
47 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51 diff -urNp linux-2.6.32.41/arch/alpha/kernel/osf_sys.c linux-2.6.32.41/arch/alpha/kernel/osf_sys.c
52 --- linux-2.6.32.41/arch/alpha/kernel/osf_sys.c 2011-03-27 14:31:47.000000000 -0400
53 +++ linux-2.6.32.41/arch/alpha/kernel/osf_sys.c 2011-04-17 15:56:45.000000000 -0400
54 @@ -1169,7 +1169,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58 - if (!vma || addr + len <= vma->vm_start)
59 + if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63 @@ -1205,6 +1205,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67 +#ifdef CONFIG_PAX_RANDMMAP
68 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69 +#endif
70 +
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74 @@ -1212,8 +1216,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79 - len, limit);
80 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81 +
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85 diff -urNp linux-2.6.32.41/arch/alpha/mm/fault.c linux-2.6.32.41/arch/alpha/mm/fault.c
86 --- linux-2.6.32.41/arch/alpha/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
87 +++ linux-2.6.32.41/arch/alpha/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
88 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92 +#ifdef CONFIG_PAX_PAGEEXEC
93 +/*
94 + * PaX: decide what to do with offenders (regs->pc = fault address)
95 + *
96 + * returns 1 when task should be killed
97 + * 2 when patched PLT trampoline was detected
98 + * 3 when unpatched PLT trampoline was detected
99 + */
100 +static int pax_handle_fetch_fault(struct pt_regs *regs)
101 +{
102 +
103 +#ifdef CONFIG_PAX_EMUPLT
104 + int err;
105 +
106 + do { /* PaX: patched PLT emulation #1 */
107 + unsigned int ldah, ldq, jmp;
108 +
109 + err = get_user(ldah, (unsigned int *)regs->pc);
110 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112 +
113 + if (err)
114 + break;
115 +
116 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118 + jmp == 0x6BFB0000U)
119 + {
120 + unsigned long r27, addr;
121 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123 +
124 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125 + err = get_user(r27, (unsigned long *)addr);
126 + if (err)
127 + break;
128 +
129 + regs->r27 = r27;
130 + regs->pc = r27;
131 + return 2;
132 + }
133 + } while (0);
134 +
135 + do { /* PaX: patched PLT emulation #2 */
136 + unsigned int ldah, lda, br;
137 +
138 + err = get_user(ldah, (unsigned int *)regs->pc);
139 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
140 + err |= get_user(br, (unsigned int *)(regs->pc+8));
141 +
142 + if (err)
143 + break;
144 +
145 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
147 + (br & 0xFFE00000U) == 0xC3E00000U)
148 + {
149 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152 +
153 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155 + return 2;
156 + }
157 + } while (0);
158 +
159 + do { /* PaX: unpatched PLT emulation */
160 + unsigned int br;
161 +
162 + err = get_user(br, (unsigned int *)regs->pc);
163 +
164 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165 + unsigned int br2, ldq, nop, jmp;
166 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167 +
168 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169 + err = get_user(br2, (unsigned int *)addr);
170 + err |= get_user(ldq, (unsigned int *)(addr+4));
171 + err |= get_user(nop, (unsigned int *)(addr+8));
172 + err |= get_user(jmp, (unsigned int *)(addr+12));
173 + err |= get_user(resolver, (unsigned long *)(addr+16));
174 +
175 + if (err)
176 + break;
177 +
178 + if (br2 == 0xC3600000U &&
179 + ldq == 0xA77B000CU &&
180 + nop == 0x47FF041FU &&
181 + jmp == 0x6B7B0000U)
182 + {
183 + regs->r28 = regs->pc+4;
184 + regs->r27 = addr+16;
185 + regs->pc = resolver;
186 + return 3;
187 + }
188 + }
189 + } while (0);
190 +#endif
191 +
192 + return 1;
193 +}
194 +
195 +void pax_report_insns(void *pc, void *sp)
196 +{
197 + unsigned long i;
198 +
199 + printk(KERN_ERR "PAX: bytes at PC: ");
200 + for (i = 0; i < 5; i++) {
201 + unsigned int c;
202 + if (get_user(c, (unsigned int *)pc+i))
203 + printk(KERN_CONT "???????? ");
204 + else
205 + printk(KERN_CONT "%08x ", c);
206 + }
207 + printk("\n");
208 +}
209 +#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217 - if (!(vma->vm_flags & VM_EXEC))
218 + if (!(vma->vm_flags & VM_EXEC)) {
219 +
220 +#ifdef CONFIG_PAX_PAGEEXEC
221 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222 + goto bad_area;
223 +
224 + up_read(&mm->mmap_sem);
225 + switch (pax_handle_fetch_fault(regs)) {
226 +
227 +#ifdef CONFIG_PAX_EMUPLT
228 + case 2:
229 + case 3:
230 + return;
231 +#endif
232 +
233 + }
234 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235 + do_group_exit(SIGKILL);
236 +#else
237 goto bad_area;
238 +#endif
239 +
240 + }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244 diff -urNp linux-2.6.32.41/arch/arm/include/asm/elf.h linux-2.6.32.41/arch/arm/include/asm/elf.h
245 --- linux-2.6.32.41/arch/arm/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
246 +++ linux-2.6.32.41/arch/arm/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
247 @@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253 +
254 +#ifdef CONFIG_PAX_ASLR
255 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256 +
257 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259 +#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263 diff -urNp linux-2.6.32.41/arch/arm/include/asm/kmap_types.h linux-2.6.32.41/arch/arm/include/asm/kmap_types.h
264 --- linux-2.6.32.41/arch/arm/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
265 +++ linux-2.6.32.41/arch/arm/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
266 @@ -19,6 +19,7 @@ enum km_type {
267 KM_SOFTIRQ0,
268 KM_SOFTIRQ1,
269 KM_L2_CACHE,
270 + KM_CLEARPAGE,
271 KM_TYPE_NR
272 };
273
274 diff -urNp linux-2.6.32.41/arch/arm/include/asm/uaccess.h linux-2.6.32.41/arch/arm/include/asm/uaccess.h
275 --- linux-2.6.32.41/arch/arm/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
276 +++ linux-2.6.32.41/arch/arm/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
277 @@ -403,6 +403,9 @@ extern unsigned long __must_check __strn
278
279 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
280 {
281 + if ((long)n < 0)
282 + return n;
283 +
284 if (access_ok(VERIFY_READ, from, n))
285 n = __copy_from_user(to, from, n);
286 else /* security hole - plug it */
287 @@ -412,6 +415,9 @@ static inline unsigned long __must_check
288
289 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
290 {
291 + if ((long)n < 0)
292 + return n;
293 +
294 if (access_ok(VERIFY_WRITE, to, n))
295 n = __copy_to_user(to, from, n);
296 return n;
297 diff -urNp linux-2.6.32.41/arch/arm/kernel/kgdb.c linux-2.6.32.41/arch/arm/kernel/kgdb.c
298 --- linux-2.6.32.41/arch/arm/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
299 +++ linux-2.6.32.41/arch/arm/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
300 @@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
301 * and we handle the normal undef case within the do_undefinstr
302 * handler.
303 */
304 -struct kgdb_arch arch_kgdb_ops = {
305 +const struct kgdb_arch arch_kgdb_ops = {
306 #ifndef __ARMEB__
307 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
308 #else /* ! __ARMEB__ */
309 diff -urNp linux-2.6.32.41/arch/arm/mach-at91/pm.c linux-2.6.32.41/arch/arm/mach-at91/pm.c
310 --- linux-2.6.32.41/arch/arm/mach-at91/pm.c 2011-03-27 14:31:47.000000000 -0400
311 +++ linux-2.6.32.41/arch/arm/mach-at91/pm.c 2011-04-17 15:56:45.000000000 -0400
312 @@ -348,7 +348,7 @@ static void at91_pm_end(void)
313 }
314
315
316 -static struct platform_suspend_ops at91_pm_ops ={
317 +static const struct platform_suspend_ops at91_pm_ops ={
318 .valid = at91_pm_valid_state,
319 .begin = at91_pm_begin,
320 .enter = at91_pm_enter,
321 diff -urNp linux-2.6.32.41/arch/arm/mach-omap1/pm.c linux-2.6.32.41/arch/arm/mach-omap1/pm.c
322 --- linux-2.6.32.41/arch/arm/mach-omap1/pm.c 2011-03-27 14:31:47.000000000 -0400
323 +++ linux-2.6.32.41/arch/arm/mach-omap1/pm.c 2011-04-17 15:56:45.000000000 -0400
324 @@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq
325
326
327
328 -static struct platform_suspend_ops omap_pm_ops ={
329 +static const struct platform_suspend_ops omap_pm_ops ={
330 .prepare = omap_pm_prepare,
331 .enter = omap_pm_enter,
332 .finish = omap_pm_finish,
333 diff -urNp linux-2.6.32.41/arch/arm/mach-omap2/pm24xx.c linux-2.6.32.41/arch/arm/mach-omap2/pm24xx.c
334 --- linux-2.6.32.41/arch/arm/mach-omap2/pm24xx.c 2011-03-27 14:31:47.000000000 -0400
335 +++ linux-2.6.32.41/arch/arm/mach-omap2/pm24xx.c 2011-04-17 15:56:45.000000000 -0400
336 @@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
337 enable_hlt();
338 }
339
340 -static struct platform_suspend_ops omap_pm_ops = {
341 +static const struct platform_suspend_ops omap_pm_ops = {
342 .prepare = omap2_pm_prepare,
343 .enter = omap2_pm_enter,
344 .finish = omap2_pm_finish,
345 diff -urNp linux-2.6.32.41/arch/arm/mach-omap2/pm34xx.c linux-2.6.32.41/arch/arm/mach-omap2/pm34xx.c
346 --- linux-2.6.32.41/arch/arm/mach-omap2/pm34xx.c 2011-03-27 14:31:47.000000000 -0400
347 +++ linux-2.6.32.41/arch/arm/mach-omap2/pm34xx.c 2011-04-17 15:56:45.000000000 -0400
348 @@ -401,7 +401,7 @@ static void omap3_pm_end(void)
349 return;
350 }
351
352 -static struct platform_suspend_ops omap_pm_ops = {
353 +static const struct platform_suspend_ops omap_pm_ops = {
354 .begin = omap3_pm_begin,
355 .end = omap3_pm_end,
356 .prepare = omap3_pm_prepare,
357 diff -urNp linux-2.6.32.41/arch/arm/mach-pnx4008/pm.c linux-2.6.32.41/arch/arm/mach-pnx4008/pm.c
358 --- linux-2.6.32.41/arch/arm/mach-pnx4008/pm.c 2011-03-27 14:31:47.000000000 -0400
359 +++ linux-2.6.32.41/arch/arm/mach-pnx4008/pm.c 2011-04-17 15:56:45.000000000 -0400
360 @@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_stat
361 (state == PM_SUSPEND_MEM);
362 }
363
364 -static struct platform_suspend_ops pnx4008_pm_ops = {
365 +static const struct platform_suspend_ops pnx4008_pm_ops = {
366 .enter = pnx4008_pm_enter,
367 .valid = pnx4008_pm_valid,
368 };
369 diff -urNp linux-2.6.32.41/arch/arm/mach-pxa/pm.c linux-2.6.32.41/arch/arm/mach-pxa/pm.c
370 --- linux-2.6.32.41/arch/arm/mach-pxa/pm.c 2011-03-27 14:31:47.000000000 -0400
371 +++ linux-2.6.32.41/arch/arm/mach-pxa/pm.c 2011-04-17 15:56:45.000000000 -0400
372 @@ -95,7 +95,7 @@ void pxa_pm_finish(void)
373 pxa_cpu_pm_fns->finish();
374 }
375
376 -static struct platform_suspend_ops pxa_pm_ops = {
377 +static const struct platform_suspend_ops pxa_pm_ops = {
378 .valid = pxa_pm_valid,
379 .enter = pxa_pm_enter,
380 .prepare = pxa_pm_prepare,
381 diff -urNp linux-2.6.32.41/arch/arm/mach-pxa/sharpsl_pm.c linux-2.6.32.41/arch/arm/mach-pxa/sharpsl_pm.c
382 --- linux-2.6.32.41/arch/arm/mach-pxa/sharpsl_pm.c 2011-03-27 14:31:47.000000000 -0400
383 +++ linux-2.6.32.41/arch/arm/mach-pxa/sharpsl_pm.c 2011-04-17 15:56:45.000000000 -0400
384 @@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status
385 }
386
387 #ifdef CONFIG_PM
388 -static struct platform_suspend_ops sharpsl_pm_ops = {
389 +static const struct platform_suspend_ops sharpsl_pm_ops = {
390 .prepare = pxa_pm_prepare,
391 .finish = pxa_pm_finish,
392 .enter = corgi_pxa_pm_enter,
393 diff -urNp linux-2.6.32.41/arch/arm/mach-sa1100/pm.c linux-2.6.32.41/arch/arm/mach-sa1100/pm.c
394 --- linux-2.6.32.41/arch/arm/mach-sa1100/pm.c 2011-03-27 14:31:47.000000000 -0400
395 +++ linux-2.6.32.41/arch/arm/mach-sa1100/pm.c 2011-04-17 15:56:45.000000000 -0400
396 @@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
397 return virt_to_phys(sp);
398 }
399
400 -static struct platform_suspend_ops sa11x0_pm_ops = {
401 +static const struct platform_suspend_ops sa11x0_pm_ops = {
402 .enter = sa11x0_pm_enter,
403 .valid = suspend_valid_only_mem,
404 };
405 diff -urNp linux-2.6.32.41/arch/arm/mm/fault.c linux-2.6.32.41/arch/arm/mm/fault.c
406 --- linux-2.6.32.41/arch/arm/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
407 +++ linux-2.6.32.41/arch/arm/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
408 @@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk,
409 }
410 #endif
411
412 +#ifdef CONFIG_PAX_PAGEEXEC
413 + if (fsr & FSR_LNX_PF) {
414 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
415 + do_group_exit(SIGKILL);
416 + }
417 +#endif
418 +
419 tsk->thread.address = addr;
420 tsk->thread.error_code = fsr;
421 tsk->thread.trap_no = 14;
422 @@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsign
423 }
424 #endif /* CONFIG_MMU */
425
426 +#ifdef CONFIG_PAX_PAGEEXEC
427 +void pax_report_insns(void *pc, void *sp)
428 +{
429 + long i;
430 +
431 + printk(KERN_ERR "PAX: bytes at PC: ");
432 + for (i = 0; i < 20; i++) {
433 + unsigned char c;
434 + if (get_user(c, (__force unsigned char __user *)pc+i))
435 + printk(KERN_CONT "?? ");
436 + else
437 + printk(KERN_CONT "%02x ", c);
438 + }
439 + printk("\n");
440 +
441 + printk(KERN_ERR "PAX: bytes at SP-4: ");
442 + for (i = -1; i < 20; i++) {
443 + unsigned long c;
444 + if (get_user(c, (__force unsigned long __user *)sp+i))
445 + printk(KERN_CONT "???????? ");
446 + else
447 + printk(KERN_CONT "%08lx ", c);
448 + }
449 + printk("\n");
450 +}
451 +#endif
452 +
453 /*
454 * First Level Translation Fault Handler
455 *
456 diff -urNp linux-2.6.32.41/arch/arm/mm/mmap.c linux-2.6.32.41/arch/arm/mm/mmap.c
457 --- linux-2.6.32.41/arch/arm/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
458 +++ linux-2.6.32.41/arch/arm/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
459 @@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp
460 if (len > TASK_SIZE)
461 return -ENOMEM;
462
463 +#ifdef CONFIG_PAX_RANDMMAP
464 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
465 +#endif
466 +
467 if (addr) {
468 if (do_align)
469 addr = COLOUR_ALIGN(addr, pgoff);
470 @@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp
471 addr = PAGE_ALIGN(addr);
472
473 vma = find_vma(mm, addr);
474 - if (TASK_SIZE - len >= addr &&
475 - (!vma || addr + len <= vma->vm_start))
476 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
477 return addr;
478 }
479 if (len > mm->cached_hole_size) {
480 - start_addr = addr = mm->free_area_cache;
481 + start_addr = addr = mm->free_area_cache;
482 } else {
483 - start_addr = addr = TASK_UNMAPPED_BASE;
484 - mm->cached_hole_size = 0;
485 + start_addr = addr = mm->mmap_base;
486 + mm->cached_hole_size = 0;
487 }
488
489 full_search:
490 @@ -94,14 +97,14 @@ full_search:
491 * Start a new search - just in case we missed
492 * some holes.
493 */
494 - if (start_addr != TASK_UNMAPPED_BASE) {
495 - start_addr = addr = TASK_UNMAPPED_BASE;
496 + if (start_addr != mm->mmap_base) {
497 + start_addr = addr = mm->mmap_base;
498 mm->cached_hole_size = 0;
499 goto full_search;
500 }
501 return -ENOMEM;
502 }
503 - if (!vma || addr + len <= vma->vm_start) {
504 + if (check_heap_stack_gap(vma, addr, len)) {
505 /*
506 * Remember the place where we stopped the search:
507 */
508 diff -urNp linux-2.6.32.41/arch/arm/plat-s3c/pm.c linux-2.6.32.41/arch/arm/plat-s3c/pm.c
509 --- linux-2.6.32.41/arch/arm/plat-s3c/pm.c 2011-03-27 14:31:47.000000000 -0400
510 +++ linux-2.6.32.41/arch/arm/plat-s3c/pm.c 2011-04-17 15:56:45.000000000 -0400
511 @@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
512 s3c_pm_check_cleanup();
513 }
514
515 -static struct platform_suspend_ops s3c_pm_ops = {
516 +static const struct platform_suspend_ops s3c_pm_ops = {
517 .enter = s3c_pm_enter,
518 .prepare = s3c_pm_prepare,
519 .finish = s3c_pm_finish,
520 diff -urNp linux-2.6.32.41/arch/avr32/include/asm/elf.h linux-2.6.32.41/arch/avr32/include/asm/elf.h
521 --- linux-2.6.32.41/arch/avr32/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
522 +++ linux-2.6.32.41/arch/avr32/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
523 @@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpreg
524 the loader. We need to make sure that it is out of the way of the program
525 that it will "exec", and that there is sufficient room for the brk. */
526
527 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
528 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
529
530 +#ifdef CONFIG_PAX_ASLR
531 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
532 +
533 +#define PAX_DELTA_MMAP_LEN 15
534 +#define PAX_DELTA_STACK_LEN 15
535 +#endif
536
537 /* This yields a mask that user programs can use to figure out what
538 instruction set this CPU supports. This could be done in user space,
539 diff -urNp linux-2.6.32.41/arch/avr32/include/asm/kmap_types.h linux-2.6.32.41/arch/avr32/include/asm/kmap_types.h
540 --- linux-2.6.32.41/arch/avr32/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
541 +++ linux-2.6.32.41/arch/avr32/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
542 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
543 D(11) KM_IRQ1,
544 D(12) KM_SOFTIRQ0,
545 D(13) KM_SOFTIRQ1,
546 -D(14) KM_TYPE_NR
547 +D(14) KM_CLEARPAGE,
548 +D(15) KM_TYPE_NR
549 };
550
551 #undef D
552 diff -urNp linux-2.6.32.41/arch/avr32/mach-at32ap/pm.c linux-2.6.32.41/arch/avr32/mach-at32ap/pm.c
553 --- linux-2.6.32.41/arch/avr32/mach-at32ap/pm.c 2011-03-27 14:31:47.000000000 -0400
554 +++ linux-2.6.32.41/arch/avr32/mach-at32ap/pm.c 2011-04-17 15:56:45.000000000 -0400
555 @@ -176,7 +176,7 @@ out:
556 return 0;
557 }
558
559 -static struct platform_suspend_ops avr32_pm_ops = {
560 +static const struct platform_suspend_ops avr32_pm_ops = {
561 .valid = avr32_pm_valid_state,
562 .enter = avr32_pm_enter,
563 };
564 diff -urNp linux-2.6.32.41/arch/avr32/mm/fault.c linux-2.6.32.41/arch/avr32/mm/fault.c
565 --- linux-2.6.32.41/arch/avr32/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
566 +++ linux-2.6.32.41/arch/avr32/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
567 @@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
568
569 int exception_trace = 1;
570
571 +#ifdef CONFIG_PAX_PAGEEXEC
572 +void pax_report_insns(void *pc, void *sp)
573 +{
574 + unsigned long i;
575 +
576 + printk(KERN_ERR "PAX: bytes at PC: ");
577 + for (i = 0; i < 20; i++) {
578 + unsigned char c;
579 + if (get_user(c, (unsigned char *)pc+i))
580 + printk(KERN_CONT "???????? ");
581 + else
582 + printk(KERN_CONT "%02x ", c);
583 + }
584 + printk("\n");
585 +}
586 +#endif
587 +
588 /*
589 * This routine handles page faults. It determines the address and the
590 * problem, and then passes it off to one of the appropriate routines.
591 @@ -157,6 +174,16 @@ bad_area:
592 up_read(&mm->mmap_sem);
593
594 if (user_mode(regs)) {
595 +
596 +#ifdef CONFIG_PAX_PAGEEXEC
597 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
598 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
599 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
600 + do_group_exit(SIGKILL);
601 + }
602 + }
603 +#endif
604 +
605 if (exception_trace && printk_ratelimit())
606 printk("%s%s[%d]: segfault at %08lx pc %08lx "
607 "sp %08lx ecr %lu\n",
608 diff -urNp linux-2.6.32.41/arch/blackfin/kernel/kgdb.c linux-2.6.32.41/arch/blackfin/kernel/kgdb.c
609 --- linux-2.6.32.41/arch/blackfin/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
610 +++ linux-2.6.32.41/arch/blackfin/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
611 @@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vecto
612 return -1; /* this means that we do not want to exit from the handler */
613 }
614
615 -struct kgdb_arch arch_kgdb_ops = {
616 +const struct kgdb_arch arch_kgdb_ops = {
617 .gdb_bpt_instr = {0xa1},
618 #ifdef CONFIG_SMP
619 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
620 diff -urNp linux-2.6.32.41/arch/blackfin/mach-common/pm.c linux-2.6.32.41/arch/blackfin/mach-common/pm.c
621 --- linux-2.6.32.41/arch/blackfin/mach-common/pm.c 2011-03-27 14:31:47.000000000 -0400
622 +++ linux-2.6.32.41/arch/blackfin/mach-common/pm.c 2011-04-17 15:56:45.000000000 -0400
623 @@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t
624 return 0;
625 }
626
627 -struct platform_suspend_ops bfin_pm_ops = {
628 +const struct platform_suspend_ops bfin_pm_ops = {
629 .enter = bfin_pm_enter,
630 .valid = bfin_pm_valid,
631 };
632 diff -urNp linux-2.6.32.41/arch/frv/include/asm/kmap_types.h linux-2.6.32.41/arch/frv/include/asm/kmap_types.h
633 --- linux-2.6.32.41/arch/frv/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
634 +++ linux-2.6.32.41/arch/frv/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
635 @@ -23,6 +23,7 @@ enum km_type {
636 KM_IRQ1,
637 KM_SOFTIRQ0,
638 KM_SOFTIRQ1,
639 + KM_CLEARPAGE,
640 KM_TYPE_NR
641 };
642
643 diff -urNp linux-2.6.32.41/arch/frv/mm/elf-fdpic.c linux-2.6.32.41/arch/frv/mm/elf-fdpic.c
644 --- linux-2.6.32.41/arch/frv/mm/elf-fdpic.c 2011-03-27 14:31:47.000000000 -0400
645 +++ linux-2.6.32.41/arch/frv/mm/elf-fdpic.c 2011-04-17 15:56:45.000000000 -0400
646 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
647 if (addr) {
648 addr = PAGE_ALIGN(addr);
649 vma = find_vma(current->mm, addr);
650 - if (TASK_SIZE - len >= addr &&
651 - (!vma || addr + len <= vma->vm_start))
652 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
653 goto success;
654 }
655
656 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
657 for (; vma; vma = vma->vm_next) {
658 if (addr > limit)
659 break;
660 - if (addr + len <= vma->vm_start)
661 + if (check_heap_stack_gap(vma, addr, len))
662 goto success;
663 addr = vma->vm_end;
664 }
665 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
666 for (; vma; vma = vma->vm_next) {
667 if (addr > limit)
668 break;
669 - if (addr + len <= vma->vm_start)
670 + if (check_heap_stack_gap(vma, addr, len))
671 goto success;
672 addr = vma->vm_end;
673 }
674 diff -urNp linux-2.6.32.41/arch/ia64/hp/common/hwsw_iommu.c linux-2.6.32.41/arch/ia64/hp/common/hwsw_iommu.c
675 --- linux-2.6.32.41/arch/ia64/hp/common/hwsw_iommu.c 2011-03-27 14:31:47.000000000 -0400
676 +++ linux-2.6.32.41/arch/ia64/hp/common/hwsw_iommu.c 2011-04-17 15:56:45.000000000 -0400
677 @@ -17,7 +17,7 @@
678 #include <linux/swiotlb.h>
679 #include <asm/machvec.h>
680
681 -extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
682 +extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
683
684 /* swiotlb declarations & definitions: */
685 extern int swiotlb_late_init_with_default_size (size_t size);
686 @@ -33,7 +33,7 @@ static inline int use_swiotlb(struct dev
687 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
688 }
689
690 -struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
691 +const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
692 {
693 if (use_swiotlb(dev))
694 return &swiotlb_dma_ops;
695 diff -urNp linux-2.6.32.41/arch/ia64/hp/common/sba_iommu.c linux-2.6.32.41/arch/ia64/hp/common/sba_iommu.c
696 --- linux-2.6.32.41/arch/ia64/hp/common/sba_iommu.c 2011-03-27 14:31:47.000000000 -0400
697 +++ linux-2.6.32.41/arch/ia64/hp/common/sba_iommu.c 2011-04-17 15:56:45.000000000 -0400
698 @@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_d
699 },
700 };
701
702 -extern struct dma_map_ops swiotlb_dma_ops;
703 +extern const struct dma_map_ops swiotlb_dma_ops;
704
705 static int __init
706 sba_init(void)
707 @@ -2211,7 +2211,7 @@ sba_page_override(char *str)
708
709 __setup("sbapagesize=",sba_page_override);
710
711 -struct dma_map_ops sba_dma_ops = {
712 +const struct dma_map_ops sba_dma_ops = {
713 .alloc_coherent = sba_alloc_coherent,
714 .free_coherent = sba_free_coherent,
715 .map_page = sba_map_page,
716 diff -urNp linux-2.6.32.41/arch/ia64/ia32/binfmt_elf32.c linux-2.6.32.41/arch/ia64/ia32/binfmt_elf32.c
717 --- linux-2.6.32.41/arch/ia64/ia32/binfmt_elf32.c 2011-03-27 14:31:47.000000000 -0400
718 +++ linux-2.6.32.41/arch/ia64/ia32/binfmt_elf32.c 2011-04-17 15:56:45.000000000 -0400
719 @@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_
720
721 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
722
723 +#ifdef CONFIG_PAX_ASLR
724 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
725 +
726 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
727 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
728 +#endif
729 +
730 /* Ugly but avoids duplication */
731 #include "../../../fs/binfmt_elf.c"
732
733 diff -urNp linux-2.6.32.41/arch/ia64/ia32/ia32priv.h linux-2.6.32.41/arch/ia64/ia32/ia32priv.h
734 --- linux-2.6.32.41/arch/ia64/ia32/ia32priv.h 2011-03-27 14:31:47.000000000 -0400
735 +++ linux-2.6.32.41/arch/ia64/ia32/ia32priv.h 2011-04-17 15:56:45.000000000 -0400
736 @@ -296,7 +296,14 @@ typedef struct compat_siginfo {
737 #define ELF_DATA ELFDATA2LSB
738 #define ELF_ARCH EM_386
739
740 -#define IA32_STACK_TOP IA32_PAGE_OFFSET
741 +#ifdef CONFIG_PAX_RANDUSTACK
742 +#define __IA32_DELTA_STACK (current->mm->delta_stack)
743 +#else
744 +#define __IA32_DELTA_STACK 0UL
745 +#endif
746 +
747 +#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
748 +
749 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
750 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
751
752 diff -urNp linux-2.6.32.41/arch/ia64/include/asm/dma-mapping.h linux-2.6.32.41/arch/ia64/include/asm/dma-mapping.h
753 --- linux-2.6.32.41/arch/ia64/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
754 +++ linux-2.6.32.41/arch/ia64/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
755 @@ -12,7 +12,7 @@
756
757 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
758
759 -extern struct dma_map_ops *dma_ops;
760 +extern const struct dma_map_ops *dma_ops;
761 extern struct ia64_machine_vector ia64_mv;
762 extern void set_iommu_machvec(void);
763
764 @@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct d
765 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
766 dma_addr_t *daddr, gfp_t gfp)
767 {
768 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
769 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
770 void *caddr;
771
772 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
773 @@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(s
774 static inline void dma_free_coherent(struct device *dev, size_t size,
775 void *caddr, dma_addr_t daddr)
776 {
777 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
778 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
779 debug_dma_free_coherent(dev, size, caddr, daddr);
780 ops->free_coherent(dev, size, caddr, daddr);
781 }
782 @@ -49,13 +49,13 @@ static inline void dma_free_coherent(str
783
784 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
785 {
786 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
787 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
788 return ops->mapping_error(dev, daddr);
789 }
790
791 static inline int dma_supported(struct device *dev, u64 mask)
792 {
793 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
794 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
795 return ops->dma_supported(dev, mask);
796 }
797
798 diff -urNp linux-2.6.32.41/arch/ia64/include/asm/elf.h linux-2.6.32.41/arch/ia64/include/asm/elf.h
799 --- linux-2.6.32.41/arch/ia64/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
800 +++ linux-2.6.32.41/arch/ia64/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
801 @@ -43,6 +43,13 @@
802 */
803 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
804
805 +#ifdef CONFIG_PAX_ASLR
806 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
807 +
808 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
809 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
810 +#endif
811 +
812 #define PT_IA_64_UNWIND 0x70000001
813
814 /* IA-64 relocations: */
815 diff -urNp linux-2.6.32.41/arch/ia64/include/asm/machvec.h linux-2.6.32.41/arch/ia64/include/asm/machvec.h
816 --- linux-2.6.32.41/arch/ia64/include/asm/machvec.h 2011-03-27 14:31:47.000000000 -0400
817 +++ linux-2.6.32.41/arch/ia64/include/asm/machvec.h 2011-04-17 15:56:45.000000000 -0400
818 @@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event
819 /* DMA-mapping interface: */
820 typedef void ia64_mv_dma_init (void);
821 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
822 -typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
823 +typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
824
825 /*
826 * WARNING: The legacy I/O space is _architected_. Platforms are
827 @@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(co
828 # endif /* CONFIG_IA64_GENERIC */
829
830 extern void swiotlb_dma_init(void);
831 -extern struct dma_map_ops *dma_get_ops(struct device *);
832 +extern const struct dma_map_ops *dma_get_ops(struct device *);
833
834 /*
835 * Define default versions so we can extend machvec for new platforms without having
836 diff -urNp linux-2.6.32.41/arch/ia64/include/asm/pgtable.h linux-2.6.32.41/arch/ia64/include/asm/pgtable.h
837 --- linux-2.6.32.41/arch/ia64/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
838 +++ linux-2.6.32.41/arch/ia64/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
839 @@ -12,7 +12,7 @@
840 * David Mosberger-Tang <davidm@hpl.hp.com>
841 */
842
843 -
844 +#include <linux/const.h>
845 #include <asm/mman.h>
846 #include <asm/page.h>
847 #include <asm/processor.h>
848 @@ -143,6 +143,17 @@
849 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
850 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
851 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
852 +
853 +#ifdef CONFIG_PAX_PAGEEXEC
854 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
855 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
856 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
857 +#else
858 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
859 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
860 +# define PAGE_COPY_NOEXEC PAGE_COPY
861 +#endif
862 +
863 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
864 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
865 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
866 diff -urNp linux-2.6.32.41/arch/ia64/include/asm/spinlock.h linux-2.6.32.41/arch/ia64/include/asm/spinlock.h
867 --- linux-2.6.32.41/arch/ia64/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
868 +++ linux-2.6.32.41/arch/ia64/include/asm/spinlock.h 2011-04-17 15:56:45.000000000 -0400
869 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
870 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
871
872 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
873 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
874 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
875 }
876
877 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
878 diff -urNp linux-2.6.32.41/arch/ia64/include/asm/uaccess.h linux-2.6.32.41/arch/ia64/include/asm/uaccess.h
879 --- linux-2.6.32.41/arch/ia64/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
880 +++ linux-2.6.32.41/arch/ia64/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
881 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
882 const void *__cu_from = (from); \
883 long __cu_len = (n); \
884 \
885 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
886 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
887 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
888 __cu_len; \
889 })
890 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
891 long __cu_len = (n); \
892 \
893 __chk_user_ptr(__cu_from); \
894 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
895 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
896 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
897 __cu_len; \
898 })
899 diff -urNp linux-2.6.32.41/arch/ia64/kernel/dma-mapping.c linux-2.6.32.41/arch/ia64/kernel/dma-mapping.c
900 --- linux-2.6.32.41/arch/ia64/kernel/dma-mapping.c 2011-03-27 14:31:47.000000000 -0400
901 +++ linux-2.6.32.41/arch/ia64/kernel/dma-mapping.c 2011-04-17 15:56:45.000000000 -0400
902 @@ -3,7 +3,7 @@
903 /* Set this to 1 if there is a HW IOMMU in the system */
904 int iommu_detected __read_mostly;
905
906 -struct dma_map_ops *dma_ops;
907 +const struct dma_map_ops *dma_ops;
908 EXPORT_SYMBOL(dma_ops);
909
910 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
911 @@ -16,7 +16,7 @@ static int __init dma_init(void)
912 }
913 fs_initcall(dma_init);
914
915 -struct dma_map_ops *dma_get_ops(struct device *dev)
916 +const struct dma_map_ops *dma_get_ops(struct device *dev)
917 {
918 return dma_ops;
919 }
920 diff -urNp linux-2.6.32.41/arch/ia64/kernel/module.c linux-2.6.32.41/arch/ia64/kernel/module.c
921 --- linux-2.6.32.41/arch/ia64/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
922 +++ linux-2.6.32.41/arch/ia64/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
923 @@ -315,8 +315,7 @@ module_alloc (unsigned long size)
924 void
925 module_free (struct module *mod, void *module_region)
926 {
927 - if (mod && mod->arch.init_unw_table &&
928 - module_region == mod->module_init) {
929 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
930 unw_remove_unwind_table(mod->arch.init_unw_table);
931 mod->arch.init_unw_table = NULL;
932 }
933 @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
934 }
935
936 static inline int
937 +in_init_rx (const struct module *mod, uint64_t addr)
938 +{
939 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
940 +}
941 +
942 +static inline int
943 +in_init_rw (const struct module *mod, uint64_t addr)
944 +{
945 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
946 +}
947 +
948 +static inline int
949 in_init (const struct module *mod, uint64_t addr)
950 {
951 - return addr - (uint64_t) mod->module_init < mod->init_size;
952 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
953 +}
954 +
955 +static inline int
956 +in_core_rx (const struct module *mod, uint64_t addr)
957 +{
958 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
959 +}
960 +
961 +static inline int
962 +in_core_rw (const struct module *mod, uint64_t addr)
963 +{
964 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
965 }
966
967 static inline int
968 in_core (const struct module *mod, uint64_t addr)
969 {
970 - return addr - (uint64_t) mod->module_core < mod->core_size;
971 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
972 }
973
974 static inline int
975 @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
976 break;
977
978 case RV_BDREL:
979 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
980 + if (in_init_rx(mod, val))
981 + val -= (uint64_t) mod->module_init_rx;
982 + else if (in_init_rw(mod, val))
983 + val -= (uint64_t) mod->module_init_rw;
984 + else if (in_core_rx(mod, val))
985 + val -= (uint64_t) mod->module_core_rx;
986 + else if (in_core_rw(mod, val))
987 + val -= (uint64_t) mod->module_core_rw;
988 break;
989
990 case RV_LTV:
991 @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
992 * addresses have been selected...
993 */
994 uint64_t gp;
995 - if (mod->core_size > MAX_LTOFF)
996 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
997 /*
998 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
999 * at the end of the module.
1000 */
1001 - gp = mod->core_size - MAX_LTOFF / 2;
1002 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1003 else
1004 - gp = mod->core_size / 2;
1005 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1006 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1007 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1008 mod->arch.gp = gp;
1009 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1010 }
1011 diff -urNp linux-2.6.32.41/arch/ia64/kernel/pci-dma.c linux-2.6.32.41/arch/ia64/kernel/pci-dma.c
1012 --- linux-2.6.32.41/arch/ia64/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
1013 +++ linux-2.6.32.41/arch/ia64/kernel/pci-dma.c 2011-04-17 15:56:45.000000000 -0400
1014 @@ -43,7 +43,7 @@ struct device fallback_dev = {
1015 .dma_mask = &fallback_dev.coherent_dma_mask,
1016 };
1017
1018 -extern struct dma_map_ops intel_dma_ops;
1019 +extern const struct dma_map_ops intel_dma_ops;
1020
1021 static int __init pci_iommu_init(void)
1022 {
1023 @@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *d
1024 }
1025 EXPORT_SYMBOL(iommu_dma_supported);
1026
1027 +extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1028 +extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1029 +extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1030 +extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1031 +extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1032 +extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1033 +extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1034 +
1035 +static const struct dma_map_ops intel_iommu_dma_ops = {
1036 + /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1037 + .alloc_coherent = intel_alloc_coherent,
1038 + .free_coherent = intel_free_coherent,
1039 + .map_sg = intel_map_sg,
1040 + .unmap_sg = intel_unmap_sg,
1041 + .map_page = intel_map_page,
1042 + .unmap_page = intel_unmap_page,
1043 + .mapping_error = intel_mapping_error,
1044 +
1045 + .sync_single_for_cpu = machvec_dma_sync_single,
1046 + .sync_sg_for_cpu = machvec_dma_sync_sg,
1047 + .sync_single_for_device = machvec_dma_sync_single,
1048 + .sync_sg_for_device = machvec_dma_sync_sg,
1049 + .dma_supported = iommu_dma_supported,
1050 +};
1051 +
1052 void __init pci_iommu_alloc(void)
1053 {
1054 - dma_ops = &intel_dma_ops;
1055 -
1056 - dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
1057 - dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
1058 - dma_ops->sync_single_for_device = machvec_dma_sync_single;
1059 - dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
1060 - dma_ops->dma_supported = iommu_dma_supported;
1061 + dma_ops = &intel_iommu_dma_ops;
1062
1063 /*
1064 * The order of these functions is important for
1065 diff -urNp linux-2.6.32.41/arch/ia64/kernel/pci-swiotlb.c linux-2.6.32.41/arch/ia64/kernel/pci-swiotlb.c
1066 --- linux-2.6.32.41/arch/ia64/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
1067 +++ linux-2.6.32.41/arch/ia64/kernel/pci-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
1068 @@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent
1069 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1070 }
1071
1072 -struct dma_map_ops swiotlb_dma_ops = {
1073 +const struct dma_map_ops swiotlb_dma_ops = {
1074 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1075 .free_coherent = swiotlb_free_coherent,
1076 .map_page = swiotlb_map_page,
1077 diff -urNp linux-2.6.32.41/arch/ia64/kernel/sys_ia64.c linux-2.6.32.41/arch/ia64/kernel/sys_ia64.c
1078 --- linux-2.6.32.41/arch/ia64/kernel/sys_ia64.c 2011-03-27 14:31:47.000000000 -0400
1079 +++ linux-2.6.32.41/arch/ia64/kernel/sys_ia64.c 2011-04-17 15:56:45.000000000 -0400
1080 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
1081 if (REGION_NUMBER(addr) == RGN_HPAGE)
1082 addr = 0;
1083 #endif
1084 +
1085 +#ifdef CONFIG_PAX_RANDMMAP
1086 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1087 + addr = mm->free_area_cache;
1088 + else
1089 +#endif
1090 +
1091 if (!addr)
1092 addr = mm->free_area_cache;
1093
1094 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
1095 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1096 /* At this point: (!vma || addr < vma->vm_end). */
1097 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1098 - if (start_addr != TASK_UNMAPPED_BASE) {
1099 + if (start_addr != mm->mmap_base) {
1100 /* Start a new search --- just in case we missed some holes. */
1101 - addr = TASK_UNMAPPED_BASE;
1102 + addr = mm->mmap_base;
1103 goto full_search;
1104 }
1105 return -ENOMEM;
1106 }
1107 - if (!vma || addr + len <= vma->vm_start) {
1108 + if (check_heap_stack_gap(vma, addr, len)) {
1109 /* Remember the address where we stopped this search: */
1110 mm->free_area_cache = addr + len;
1111 return addr;
1112 diff -urNp linux-2.6.32.41/arch/ia64/kernel/topology.c linux-2.6.32.41/arch/ia64/kernel/topology.c
1113 --- linux-2.6.32.41/arch/ia64/kernel/topology.c 2011-03-27 14:31:47.000000000 -0400
1114 +++ linux-2.6.32.41/arch/ia64/kernel/topology.c 2011-04-17 15:56:45.000000000 -0400
1115 @@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject
1116 return ret;
1117 }
1118
1119 -static struct sysfs_ops cache_sysfs_ops = {
1120 +static const struct sysfs_ops cache_sysfs_ops = {
1121 .show = cache_show
1122 };
1123
1124 diff -urNp linux-2.6.32.41/arch/ia64/kernel/vmlinux.lds.S linux-2.6.32.41/arch/ia64/kernel/vmlinux.lds.S
1125 --- linux-2.6.32.41/arch/ia64/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
1126 +++ linux-2.6.32.41/arch/ia64/kernel/vmlinux.lds.S 2011-04-17 15:56:45.000000000 -0400
1127 @@ -190,7 +190,7 @@ SECTIONS
1128 /* Per-cpu data: */
1129 . = ALIGN(PERCPU_PAGE_SIZE);
1130 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1131 - __phys_per_cpu_start = __per_cpu_load;
1132 + __phys_per_cpu_start = per_cpu_load;
1133 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1134 * into percpu page size
1135 */
1136 diff -urNp linux-2.6.32.41/arch/ia64/mm/fault.c linux-2.6.32.41/arch/ia64/mm/fault.c
1137 --- linux-2.6.32.41/arch/ia64/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1138 +++ linux-2.6.32.41/arch/ia64/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1139 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
1140 return pte_present(pte);
1141 }
1142
1143 +#ifdef CONFIG_PAX_PAGEEXEC
1144 +void pax_report_insns(void *pc, void *sp)
1145 +{
1146 + unsigned long i;
1147 +
1148 + printk(KERN_ERR "PAX: bytes at PC: ");
1149 + for (i = 0; i < 8; i++) {
1150 + unsigned int c;
1151 + if (get_user(c, (unsigned int *)pc+i))
1152 + printk(KERN_CONT "???????? ");
1153 + else
1154 + printk(KERN_CONT "%08x ", c);
1155 + }
1156 + printk("\n");
1157 +}
1158 +#endif
1159 +
1160 void __kprobes
1161 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1162 {
1163 @@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres
1164 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1165 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1166
1167 - if ((vma->vm_flags & mask) != mask)
1168 + if ((vma->vm_flags & mask) != mask) {
1169 +
1170 +#ifdef CONFIG_PAX_PAGEEXEC
1171 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1172 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1173 + goto bad_area;
1174 +
1175 + up_read(&mm->mmap_sem);
1176 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1177 + do_group_exit(SIGKILL);
1178 + }
1179 +#endif
1180 +
1181 goto bad_area;
1182
1183 + }
1184 +
1185 survive:
1186 /*
1187 * If for any reason at all we couldn't handle the fault, make
1188 diff -urNp linux-2.6.32.41/arch/ia64/mm/hugetlbpage.c linux-2.6.32.41/arch/ia64/mm/hugetlbpage.c
1189 --- linux-2.6.32.41/arch/ia64/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
1190 +++ linux-2.6.32.41/arch/ia64/mm/hugetlbpage.c 2011-04-17 15:56:45.000000000 -0400
1191 @@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(
1192 /* At this point: (!vmm || addr < vmm->vm_end). */
1193 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1194 return -ENOMEM;
1195 - if (!vmm || (addr + len) <= vmm->vm_start)
1196 + if (check_heap_stack_gap(vmm, addr, len))
1197 return addr;
1198 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1199 }
1200 diff -urNp linux-2.6.32.41/arch/ia64/mm/init.c linux-2.6.32.41/arch/ia64/mm/init.c
1201 --- linux-2.6.32.41/arch/ia64/mm/init.c 2011-03-27 14:31:47.000000000 -0400
1202 +++ linux-2.6.32.41/arch/ia64/mm/init.c 2011-04-17 15:56:45.000000000 -0400
1203 @@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1204 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1205 vma->vm_end = vma->vm_start + PAGE_SIZE;
1206 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1207 +
1208 +#ifdef CONFIG_PAX_PAGEEXEC
1209 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1210 + vma->vm_flags &= ~VM_EXEC;
1211 +
1212 +#ifdef CONFIG_PAX_MPROTECT
1213 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1214 + vma->vm_flags &= ~VM_MAYEXEC;
1215 +#endif
1216 +
1217 + }
1218 +#endif
1219 +
1220 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1221 down_write(&current->mm->mmap_sem);
1222 if (insert_vm_struct(current->mm, vma)) {
1223 diff -urNp linux-2.6.32.41/arch/ia64/sn/pci/pci_dma.c linux-2.6.32.41/arch/ia64/sn/pci/pci_dma.c
1224 --- linux-2.6.32.41/arch/ia64/sn/pci/pci_dma.c 2011-03-27 14:31:47.000000000 -0400
1225 +++ linux-2.6.32.41/arch/ia64/sn/pci/pci_dma.c 2011-04-17 15:56:45.000000000 -0400
1226 @@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *
1227 return ret;
1228 }
1229
1230 -static struct dma_map_ops sn_dma_ops = {
1231 +static const struct dma_map_ops sn_dma_ops = {
1232 .alloc_coherent = sn_dma_alloc_coherent,
1233 .free_coherent = sn_dma_free_coherent,
1234 .map_page = sn_dma_map_page,
1235 diff -urNp linux-2.6.32.41/arch/m32r/lib/usercopy.c linux-2.6.32.41/arch/m32r/lib/usercopy.c
1236 --- linux-2.6.32.41/arch/m32r/lib/usercopy.c 2011-03-27 14:31:47.000000000 -0400
1237 +++ linux-2.6.32.41/arch/m32r/lib/usercopy.c 2011-04-17 15:56:45.000000000 -0400
1238 @@ -14,6 +14,9 @@
1239 unsigned long
1240 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1241 {
1242 + if ((long)n < 0)
1243 + return n;
1244 +
1245 prefetch(from);
1246 if (access_ok(VERIFY_WRITE, to, n))
1247 __copy_user(to,from,n);
1248 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1249 unsigned long
1250 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1251 {
1252 + if ((long)n < 0)
1253 + return n;
1254 +
1255 prefetchw(to);
1256 if (access_ok(VERIFY_READ, from, n))
1257 __copy_user_zeroing(to,from,n);
1258 diff -urNp linux-2.6.32.41/arch/mips/alchemy/devboards/pm.c linux-2.6.32.41/arch/mips/alchemy/devboards/pm.c
1259 --- linux-2.6.32.41/arch/mips/alchemy/devboards/pm.c 2011-03-27 14:31:47.000000000 -0400
1260 +++ linux-2.6.32.41/arch/mips/alchemy/devboards/pm.c 2011-04-17 15:56:45.000000000 -0400
1261 @@ -78,7 +78,7 @@ static void db1x_pm_end(void)
1262
1263 }
1264
1265 -static struct platform_suspend_ops db1x_pm_ops = {
1266 +static const struct platform_suspend_ops db1x_pm_ops = {
1267 .valid = suspend_valid_only_mem,
1268 .begin = db1x_pm_begin,
1269 .enter = db1x_pm_enter,
1270 diff -urNp linux-2.6.32.41/arch/mips/include/asm/elf.h linux-2.6.32.41/arch/mips/include/asm/elf.h
1271 --- linux-2.6.32.41/arch/mips/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1272 +++ linux-2.6.32.41/arch/mips/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1273 @@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_str
1274 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1275 #endif
1276
1277 +#ifdef CONFIG_PAX_ASLR
1278 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1279 +
1280 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1281 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1282 +#endif
1283 +
1284 #endif /* _ASM_ELF_H */
1285 diff -urNp linux-2.6.32.41/arch/mips/include/asm/page.h linux-2.6.32.41/arch/mips/include/asm/page.h
1286 --- linux-2.6.32.41/arch/mips/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
1287 +++ linux-2.6.32.41/arch/mips/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
1288 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1289 #ifdef CONFIG_CPU_MIPS32
1290 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1291 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1292 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1293 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1294 #else
1295 typedef struct { unsigned long long pte; } pte_t;
1296 #define pte_val(x) ((x).pte)
1297 diff -urNp linux-2.6.32.41/arch/mips/include/asm/system.h linux-2.6.32.41/arch/mips/include/asm/system.h
1298 --- linux-2.6.32.41/arch/mips/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
1299 +++ linux-2.6.32.41/arch/mips/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
1300 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1301 */
1302 #define __ARCH_WANT_UNLOCKED_CTXSW
1303
1304 -extern unsigned long arch_align_stack(unsigned long sp);
1305 +#define arch_align_stack(x) ((x) & ~0xfUL)
1306
1307 #endif /* _ASM_SYSTEM_H */
1308 diff -urNp linux-2.6.32.41/arch/mips/kernel/binfmt_elfn32.c linux-2.6.32.41/arch/mips/kernel/binfmt_elfn32.c
1309 --- linux-2.6.32.41/arch/mips/kernel/binfmt_elfn32.c 2011-03-27 14:31:47.000000000 -0400
1310 +++ linux-2.6.32.41/arch/mips/kernel/binfmt_elfn32.c 2011-04-17 15:56:45.000000000 -0400
1311 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1312 #undef ELF_ET_DYN_BASE
1313 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1314
1315 +#ifdef CONFIG_PAX_ASLR
1316 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1317 +
1318 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1319 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1320 +#endif
1321 +
1322 #include <asm/processor.h>
1323 #include <linux/module.h>
1324 #include <linux/elfcore.h>
1325 diff -urNp linux-2.6.32.41/arch/mips/kernel/binfmt_elfo32.c linux-2.6.32.41/arch/mips/kernel/binfmt_elfo32.c
1326 --- linux-2.6.32.41/arch/mips/kernel/binfmt_elfo32.c 2011-03-27 14:31:47.000000000 -0400
1327 +++ linux-2.6.32.41/arch/mips/kernel/binfmt_elfo32.c 2011-04-17 15:56:45.000000000 -0400
1328 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1329 #undef ELF_ET_DYN_BASE
1330 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1331
1332 +#ifdef CONFIG_PAX_ASLR
1333 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1334 +
1335 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1336 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1337 +#endif
1338 +
1339 #include <asm/processor.h>
1340
1341 /*
1342 diff -urNp linux-2.6.32.41/arch/mips/kernel/kgdb.c linux-2.6.32.41/arch/mips/kernel/kgdb.c
1343 --- linux-2.6.32.41/arch/mips/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
1344 +++ linux-2.6.32.41/arch/mips/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
1345 @@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vecto
1346 return -1;
1347 }
1348
1349 +/* cannot be const */
1350 struct kgdb_arch arch_kgdb_ops;
1351
1352 /*
1353 diff -urNp linux-2.6.32.41/arch/mips/kernel/process.c linux-2.6.32.41/arch/mips/kernel/process.c
1354 --- linux-2.6.32.41/arch/mips/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
1355 +++ linux-2.6.32.41/arch/mips/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
1356 @@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_stru
1357 out:
1358 return pc;
1359 }
1360 -
1361 -/*
1362 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1363 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1364 - */
1365 -unsigned long arch_align_stack(unsigned long sp)
1366 -{
1367 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1368 - sp -= get_random_int() & ~PAGE_MASK;
1369 -
1370 - return sp & ALMASK;
1371 -}
1372 diff -urNp linux-2.6.32.41/arch/mips/kernel/syscall.c linux-2.6.32.41/arch/mips/kernel/syscall.c
1373 --- linux-2.6.32.41/arch/mips/kernel/syscall.c 2011-03-27 14:31:47.000000000 -0400
1374 +++ linux-2.6.32.41/arch/mips/kernel/syscall.c 2011-04-17 15:56:45.000000000 -0400
1375 @@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(str
1376 do_color_align = 0;
1377 if (filp || (flags & MAP_SHARED))
1378 do_color_align = 1;
1379 +
1380 +#ifdef CONFIG_PAX_RANDMMAP
1381 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1382 +#endif
1383 +
1384 if (addr) {
1385 if (do_color_align)
1386 addr = COLOUR_ALIGN(addr, pgoff);
1387 else
1388 addr = PAGE_ALIGN(addr);
1389 vmm = find_vma(current->mm, addr);
1390 - if (task_size - len >= addr &&
1391 - (!vmm || addr + len <= vmm->vm_start))
1392 + if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
1393 return addr;
1394 }
1395 - addr = TASK_UNMAPPED_BASE;
1396 + addr = current->mm->mmap_base;
1397 if (do_color_align)
1398 addr = COLOUR_ALIGN(addr, pgoff);
1399 else
1400 @@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(str
1401 /* At this point: (!vmm || addr < vmm->vm_end). */
1402 if (task_size - len < addr)
1403 return -ENOMEM;
1404 - if (!vmm || addr + len <= vmm->vm_start)
1405 + if (check_heap_stack_gap(vmm, addr, len))
1406 return addr;
1407 addr = vmm->vm_end;
1408 if (do_color_align)
1409 diff -urNp linux-2.6.32.41/arch/mips/mm/fault.c linux-2.6.32.41/arch/mips/mm/fault.c
1410 --- linux-2.6.32.41/arch/mips/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1411 +++ linux-2.6.32.41/arch/mips/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1412 @@ -26,6 +26,23 @@
1413 #include <asm/ptrace.h>
1414 #include <asm/highmem.h> /* For VMALLOC_END */
1415
1416 +#ifdef CONFIG_PAX_PAGEEXEC
1417 +void pax_report_insns(void *pc, void *sp)
1418 +{
1419 + unsigned long i;
1420 +
1421 + printk(KERN_ERR "PAX: bytes at PC: ");
1422 + for (i = 0; i < 5; i++) {
1423 + unsigned int c;
1424 + if (get_user(c, (unsigned int *)pc+i))
1425 + printk(KERN_CONT "???????? ");
1426 + else
1427 + printk(KERN_CONT "%08x ", c);
1428 + }
1429 + printk("\n");
1430 +}
1431 +#endif
1432 +
1433 /*
1434 * This routine handles page faults. It determines the address,
1435 * and the problem, and then passes it off to one of the appropriate
1436 diff -urNp linux-2.6.32.41/arch/parisc/include/asm/elf.h linux-2.6.32.41/arch/parisc/include/asm/elf.h
1437 --- linux-2.6.32.41/arch/parisc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1438 +++ linux-2.6.32.41/arch/parisc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1439 @@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration..
1440
1441 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1442
1443 +#ifdef CONFIG_PAX_ASLR
1444 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
1445 +
1446 +#define PAX_DELTA_MMAP_LEN 16
1447 +#define PAX_DELTA_STACK_LEN 16
1448 +#endif
1449 +
1450 /* This yields a mask that user programs can use to figure out what
1451 instruction set this CPU supports. This could be done in user space,
1452 but it's not easy, and we've already done it here. */
1453 diff -urNp linux-2.6.32.41/arch/parisc/include/asm/pgtable.h linux-2.6.32.41/arch/parisc/include/asm/pgtable.h
1454 --- linux-2.6.32.41/arch/parisc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1455 +++ linux-2.6.32.41/arch/parisc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1456 @@ -207,6 +207,17 @@
1457 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1458 #define PAGE_COPY PAGE_EXECREAD
1459 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1460 +
1461 +#ifdef CONFIG_PAX_PAGEEXEC
1462 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1463 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1464 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1465 +#else
1466 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1467 +# define PAGE_COPY_NOEXEC PAGE_COPY
1468 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1469 +#endif
1470 +
1471 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1472 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
1473 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
1474 diff -urNp linux-2.6.32.41/arch/parisc/kernel/module.c linux-2.6.32.41/arch/parisc/kernel/module.c
1475 --- linux-2.6.32.41/arch/parisc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1476 +++ linux-2.6.32.41/arch/parisc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1477 @@ -95,16 +95,38 @@
1478
1479 /* three functions to determine where in the module core
1480 * or init pieces the location is */
1481 +static inline int in_init_rx(struct module *me, void *loc)
1482 +{
1483 + return (loc >= me->module_init_rx &&
1484 + loc < (me->module_init_rx + me->init_size_rx));
1485 +}
1486 +
1487 +static inline int in_init_rw(struct module *me, void *loc)
1488 +{
1489 + return (loc >= me->module_init_rw &&
1490 + loc < (me->module_init_rw + me->init_size_rw));
1491 +}
1492 +
1493 static inline int in_init(struct module *me, void *loc)
1494 {
1495 - return (loc >= me->module_init &&
1496 - loc <= (me->module_init + me->init_size));
1497 + return in_init_rx(me, loc) || in_init_rw(me, loc);
1498 +}
1499 +
1500 +static inline int in_core_rx(struct module *me, void *loc)
1501 +{
1502 + return (loc >= me->module_core_rx &&
1503 + loc < (me->module_core_rx + me->core_size_rx));
1504 +}
1505 +
1506 +static inline int in_core_rw(struct module *me, void *loc)
1507 +{
1508 + return (loc >= me->module_core_rw &&
1509 + loc < (me->module_core_rw + me->core_size_rw));
1510 }
1511
1512 static inline int in_core(struct module *me, void *loc)
1513 {
1514 - return (loc >= me->module_core &&
1515 - loc <= (me->module_core + me->core_size));
1516 + return in_core_rx(me, loc) || in_core_rw(me, loc);
1517 }
1518
1519 static inline int in_local(struct module *me, void *loc)
1520 @@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_
1521 }
1522
1523 /* align things a bit */
1524 - me->core_size = ALIGN(me->core_size, 16);
1525 - me->arch.got_offset = me->core_size;
1526 - me->core_size += gots * sizeof(struct got_entry);
1527 -
1528 - me->core_size = ALIGN(me->core_size, 16);
1529 - me->arch.fdesc_offset = me->core_size;
1530 - me->core_size += fdescs * sizeof(Elf_Fdesc);
1531 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1532 + me->arch.got_offset = me->core_size_rw;
1533 + me->core_size_rw += gots * sizeof(struct got_entry);
1534 +
1535 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1536 + me->arch.fdesc_offset = me->core_size_rw;
1537 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1538
1539 me->arch.got_max = gots;
1540 me->arch.fdesc_max = fdescs;
1541 @@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module
1542
1543 BUG_ON(value == 0);
1544
1545 - got = me->module_core + me->arch.got_offset;
1546 + got = me->module_core_rw + me->arch.got_offset;
1547 for (i = 0; got[i].addr; i++)
1548 if (got[i].addr == value)
1549 goto out;
1550 @@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module
1551 #ifdef CONFIG_64BIT
1552 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1553 {
1554 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1555 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1556
1557 if (!value) {
1558 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1559 @@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module
1560
1561 /* Create new one */
1562 fdesc->addr = value;
1563 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1564 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1565 return (Elf_Addr)fdesc;
1566 }
1567 #endif /* CONFIG_64BIT */
1568 @@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
1569
1570 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1571 end = table + sechdrs[me->arch.unwind_section].sh_size;
1572 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1573 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1574
1575 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1576 me->arch.unwind_section, table, end, gp);
1577 diff -urNp linux-2.6.32.41/arch/parisc/kernel/sys_parisc.c linux-2.6.32.41/arch/parisc/kernel/sys_parisc.c
1578 --- linux-2.6.32.41/arch/parisc/kernel/sys_parisc.c 2011-03-27 14:31:47.000000000 -0400
1579 +++ linux-2.6.32.41/arch/parisc/kernel/sys_parisc.c 2011-04-17 15:56:45.000000000 -0400
1580 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1581 /* At this point: (!vma || addr < vma->vm_end). */
1582 if (TASK_SIZE - len < addr)
1583 return -ENOMEM;
1584 - if (!vma || addr + len <= vma->vm_start)
1585 + if (check_heap_stack_gap(vma, addr, len))
1586 return addr;
1587 addr = vma->vm_end;
1588 }
1589 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1590 /* At this point: (!vma || addr < vma->vm_end). */
1591 if (TASK_SIZE - len < addr)
1592 return -ENOMEM;
1593 - if (!vma || addr + len <= vma->vm_start)
1594 + if (check_heap_stack_gap(vma, addr, len))
1595 return addr;
1596 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1597 if (addr < vma->vm_end) /* handle wraparound */
1598 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1599 if (flags & MAP_FIXED)
1600 return addr;
1601 if (!addr)
1602 - addr = TASK_UNMAPPED_BASE;
1603 + addr = current->mm->mmap_base;
1604
1605 if (filp) {
1606 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1607 diff -urNp linux-2.6.32.41/arch/parisc/kernel/traps.c linux-2.6.32.41/arch/parisc/kernel/traps.c
1608 --- linux-2.6.32.41/arch/parisc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
1609 +++ linux-2.6.32.41/arch/parisc/kernel/traps.c 2011-04-17 15:56:45.000000000 -0400
1610 @@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1611
1612 down_read(&current->mm->mmap_sem);
1613 vma = find_vma(current->mm,regs->iaoq[0]);
1614 - if (vma && (regs->iaoq[0] >= vma->vm_start)
1615 - && (vma->vm_flags & VM_EXEC)) {
1616 -
1617 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1618 fault_address = regs->iaoq[0];
1619 fault_space = regs->iasq[0];
1620
1621 diff -urNp linux-2.6.32.41/arch/parisc/mm/fault.c linux-2.6.32.41/arch/parisc/mm/fault.c
1622 --- linux-2.6.32.41/arch/parisc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1623 +++ linux-2.6.32.41/arch/parisc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1624 @@ -15,6 +15,7 @@
1625 #include <linux/sched.h>
1626 #include <linux/interrupt.h>
1627 #include <linux/module.h>
1628 +#include <linux/unistd.h>
1629
1630 #include <asm/uaccess.h>
1631 #include <asm/traps.h>
1632 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1633 static unsigned long
1634 parisc_acctyp(unsigned long code, unsigned int inst)
1635 {
1636 - if (code == 6 || code == 16)
1637 + if (code == 6 || code == 7 || code == 16)
1638 return VM_EXEC;
1639
1640 switch (inst & 0xf0000000) {
1641 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1642 }
1643 #endif
1644
1645 +#ifdef CONFIG_PAX_PAGEEXEC
1646 +/*
1647 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1648 + *
1649 + * returns 1 when task should be killed
1650 + * 2 when rt_sigreturn trampoline was detected
1651 + * 3 when unpatched PLT trampoline was detected
1652 + */
1653 +static int pax_handle_fetch_fault(struct pt_regs *regs)
1654 +{
1655 +
1656 +#ifdef CONFIG_PAX_EMUPLT
1657 + int err;
1658 +
1659 + do { /* PaX: unpatched PLT emulation */
1660 + unsigned int bl, depwi;
1661 +
1662 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1663 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1664 +
1665 + if (err)
1666 + break;
1667 +
1668 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1669 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1670 +
1671 + err = get_user(ldw, (unsigned int *)addr);
1672 + err |= get_user(bv, (unsigned int *)(addr+4));
1673 + err |= get_user(ldw2, (unsigned int *)(addr+8));
1674 +
1675 + if (err)
1676 + break;
1677 +
1678 + if (ldw == 0x0E801096U &&
1679 + bv == 0xEAC0C000U &&
1680 + ldw2 == 0x0E881095U)
1681 + {
1682 + unsigned int resolver, map;
1683 +
1684 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1685 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1686 + if (err)
1687 + break;
1688 +
1689 + regs->gr[20] = instruction_pointer(regs)+8;
1690 + regs->gr[21] = map;
1691 + regs->gr[22] = resolver;
1692 + regs->iaoq[0] = resolver | 3UL;
1693 + regs->iaoq[1] = regs->iaoq[0] + 4;
1694 + return 3;
1695 + }
1696 + }
1697 + } while (0);
1698 +#endif
1699 +
1700 +#ifdef CONFIG_PAX_EMUTRAMP
1701 +
1702 +#ifndef CONFIG_PAX_EMUSIGRT
1703 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1704 + return 1;
1705 +#endif
1706 +
1707 + do { /* PaX: rt_sigreturn emulation */
1708 + unsigned int ldi1, ldi2, bel, nop;
1709 +
1710 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1711 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1712 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1713 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1714 +
1715 + if (err)
1716 + break;
1717 +
1718 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1719 + ldi2 == 0x3414015AU &&
1720 + bel == 0xE4008200U &&
1721 + nop == 0x08000240U)
1722 + {
1723 + regs->gr[25] = (ldi1 & 2) >> 1;
1724 + regs->gr[20] = __NR_rt_sigreturn;
1725 + regs->gr[31] = regs->iaoq[1] + 16;
1726 + regs->sr[0] = regs->iasq[1];
1727 + regs->iaoq[0] = 0x100UL;
1728 + regs->iaoq[1] = regs->iaoq[0] + 4;
1729 + regs->iasq[0] = regs->sr[2];
1730 + regs->iasq[1] = regs->sr[2];
1731 + return 2;
1732 + }
1733 + } while (0);
1734 +#endif
1735 +
1736 + return 1;
1737 +}
1738 +
1739 +void pax_report_insns(void *pc, void *sp)
1740 +{
1741 + unsigned long i;
1742 +
1743 + printk(KERN_ERR "PAX: bytes at PC: ");
1744 + for (i = 0; i < 5; i++) {
1745 + unsigned int c;
1746 + if (get_user(c, (unsigned int *)pc+i))
1747 + printk(KERN_CONT "???????? ");
1748 + else
1749 + printk(KERN_CONT "%08x ", c);
1750 + }
1751 + printk("\n");
1752 +}
1753 +#endif
1754 +
1755 int fixup_exception(struct pt_regs *regs)
1756 {
1757 const struct exception_table_entry *fix;
1758 @@ -192,8 +303,33 @@ good_area:
1759
1760 acc_type = parisc_acctyp(code,regs->iir);
1761
1762 - if ((vma->vm_flags & acc_type) != acc_type)
1763 + if ((vma->vm_flags & acc_type) != acc_type) {
1764 +
1765 +#ifdef CONFIG_PAX_PAGEEXEC
1766 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1767 + (address & ~3UL) == instruction_pointer(regs))
1768 + {
1769 + up_read(&mm->mmap_sem);
1770 + switch (pax_handle_fetch_fault(regs)) {
1771 +
1772 +#ifdef CONFIG_PAX_EMUPLT
1773 + case 3:
1774 + return;
1775 +#endif
1776 +
1777 +#ifdef CONFIG_PAX_EMUTRAMP
1778 + case 2:
1779 + return;
1780 +#endif
1781 +
1782 + }
1783 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1784 + do_group_exit(SIGKILL);
1785 + }
1786 +#endif
1787 +
1788 goto bad_area;
1789 + }
1790
1791 /*
1792 * If for any reason at all we couldn't handle the fault, make
1793 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/device.h linux-2.6.32.41/arch/powerpc/include/asm/device.h
1794 --- linux-2.6.32.41/arch/powerpc/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
1795 +++ linux-2.6.32.41/arch/powerpc/include/asm/device.h 2011-04-17 15:56:45.000000000 -0400
1796 @@ -14,7 +14,7 @@ struct dev_archdata {
1797 struct device_node *of_node;
1798
1799 /* DMA operations on that device */
1800 - struct dma_map_ops *dma_ops;
1801 + const struct dma_map_ops *dma_ops;
1802
1803 /*
1804 * When an iommu is in use, dma_data is used as a ptr to the base of the
1805 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/dma-mapping.h linux-2.6.32.41/arch/powerpc/include/asm/dma-mapping.h
1806 --- linux-2.6.32.41/arch/powerpc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
1807 +++ linux-2.6.32.41/arch/powerpc/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
1808 @@ -69,9 +69,9 @@ static inline unsigned long device_to_ma
1809 #ifdef CONFIG_PPC64
1810 extern struct dma_map_ops dma_iommu_ops;
1811 #endif
1812 -extern struct dma_map_ops dma_direct_ops;
1813 +extern const struct dma_map_ops dma_direct_ops;
1814
1815 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
1816 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
1817 {
1818 /* We don't handle the NULL dev case for ISA for now. We could
1819 * do it via an out of line call but it is not needed for now. The
1820 @@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dm
1821 return dev->archdata.dma_ops;
1822 }
1823
1824 -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
1825 +static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
1826 {
1827 dev->archdata.dma_ops = ops;
1828 }
1829 @@ -118,7 +118,7 @@ static inline void set_dma_offset(struct
1830
1831 static inline int dma_supported(struct device *dev, u64 mask)
1832 {
1833 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
1834 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
1835
1836 if (unlikely(dma_ops == NULL))
1837 return 0;
1838 @@ -132,7 +132,7 @@ static inline int dma_supported(struct d
1839
1840 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
1841 {
1842 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
1843 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
1844
1845 if (unlikely(dma_ops == NULL))
1846 return -EIO;
1847 @@ -147,7 +147,7 @@ static inline int dma_set_mask(struct de
1848 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1849 dma_addr_t *dma_handle, gfp_t flag)
1850 {
1851 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
1852 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
1853 void *cpu_addr;
1854
1855 BUG_ON(!dma_ops);
1856 @@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(s
1857 static inline void dma_free_coherent(struct device *dev, size_t size,
1858 void *cpu_addr, dma_addr_t dma_handle)
1859 {
1860 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
1861 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
1862
1863 BUG_ON(!dma_ops);
1864
1865 @@ -173,7 +173,7 @@ static inline void dma_free_coherent(str
1866
1867 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1868 {
1869 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
1870 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
1871
1872 if (dma_ops->mapping_error)
1873 return dma_ops->mapping_error(dev, dma_addr);
1874 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/elf.h linux-2.6.32.41/arch/powerpc/include/asm/elf.h
1875 --- linux-2.6.32.41/arch/powerpc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1876 +++ linux-2.6.32.41/arch/powerpc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1877 @@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
1878 the loader. We need to make sure that it is out of the way of the program
1879 that it will "exec", and that there is sufficient room for the brk. */
1880
1881 -extern unsigned long randomize_et_dyn(unsigned long base);
1882 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
1883 +#define ELF_ET_DYN_BASE (0x20000000)
1884 +
1885 +#ifdef CONFIG_PAX_ASLR
1886 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
1887 +
1888 +#ifdef __powerpc64__
1889 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
1890 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
1891 +#else
1892 +#define PAX_DELTA_MMAP_LEN 15
1893 +#define PAX_DELTA_STACK_LEN 15
1894 +#endif
1895 +#endif
1896
1897 /*
1898 * Our registers are always unsigned longs, whether we're a 32 bit
1899 @@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(s
1900 (0x7ff >> (PAGE_SHIFT - 12)) : \
1901 (0x3ffff >> (PAGE_SHIFT - 12)))
1902
1903 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1904 -#define arch_randomize_brk arch_randomize_brk
1905 -
1906 #endif /* __KERNEL__ */
1907
1908 /*
1909 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/iommu.h linux-2.6.32.41/arch/powerpc/include/asm/iommu.h
1910 --- linux-2.6.32.41/arch/powerpc/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
1911 +++ linux-2.6.32.41/arch/powerpc/include/asm/iommu.h 2011-04-17 15:56:45.000000000 -0400
1912 @@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(voi
1913 extern void iommu_init_early_dart(void);
1914 extern void iommu_init_early_pasemi(void);
1915
1916 +/* dma-iommu.c */
1917 +extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
1918 +
1919 #ifdef CONFIG_PCI
1920 extern void pci_iommu_init(void);
1921 extern void pci_direct_iommu_init(void);
1922 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/kmap_types.h linux-2.6.32.41/arch/powerpc/include/asm/kmap_types.h
1923 --- linux-2.6.32.41/arch/powerpc/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
1924 +++ linux-2.6.32.41/arch/powerpc/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
1925 @@ -26,6 +26,7 @@ enum km_type {
1926 KM_SOFTIRQ1,
1927 KM_PPC_SYNC_PAGE,
1928 KM_PPC_SYNC_ICACHE,
1929 + KM_CLEARPAGE,
1930 KM_TYPE_NR
1931 };
1932
1933 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/page_64.h linux-2.6.32.41/arch/powerpc/include/asm/page_64.h
1934 --- linux-2.6.32.41/arch/powerpc/include/asm/page_64.h 2011-03-27 14:31:47.000000000 -0400
1935 +++ linux-2.6.32.41/arch/powerpc/include/asm/page_64.h 2011-04-17 15:56:45.000000000 -0400
1936 @@ -180,15 +180,18 @@ do { \
1937 * stack by default, so in the absense of a PT_GNU_STACK program header
1938 * we turn execute permission off.
1939 */
1940 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1941 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1942 +#define VM_STACK_DEFAULT_FLAGS32 \
1943 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1944 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1945
1946 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1947 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1948
1949 +#ifndef CONFIG_PAX_PAGEEXEC
1950 #define VM_STACK_DEFAULT_FLAGS \
1951 (test_thread_flag(TIF_32BIT) ? \
1952 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
1953 +#endif
1954
1955 #include <asm-generic/getorder.h>
1956
1957 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/page.h linux-2.6.32.41/arch/powerpc/include/asm/page.h
1958 --- linux-2.6.32.41/arch/powerpc/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
1959 +++ linux-2.6.32.41/arch/powerpc/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
1960 @@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
1961 * and needs to be executable. This means the whole heap ends
1962 * up being executable.
1963 */
1964 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1965 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1966 +#define VM_DATA_DEFAULT_FLAGS32 \
1967 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1968 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1969
1970 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1971 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1972 @@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
1973 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
1974 #endif
1975
1976 +#define ktla_ktva(addr) (addr)
1977 +#define ktva_ktla(addr) (addr)
1978 +
1979 #ifndef __ASSEMBLY__
1980
1981 #undef STRICT_MM_TYPECHECKS
1982 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/pci.h linux-2.6.32.41/arch/powerpc/include/asm/pci.h
1983 --- linux-2.6.32.41/arch/powerpc/include/asm/pci.h 2011-03-27 14:31:47.000000000 -0400
1984 +++ linux-2.6.32.41/arch/powerpc/include/asm/pci.h 2011-04-17 15:56:45.000000000 -0400
1985 @@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq
1986 }
1987
1988 #ifdef CONFIG_PCI
1989 -extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
1990 -extern struct dma_map_ops *get_pci_dma_ops(void);
1991 +extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
1992 +extern const struct dma_map_ops *get_pci_dma_ops(void);
1993 #else /* CONFIG_PCI */
1994 #define set_pci_dma_ops(d)
1995 #define get_pci_dma_ops() NULL
1996 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/pgtable.h linux-2.6.32.41/arch/powerpc/include/asm/pgtable.h
1997 --- linux-2.6.32.41/arch/powerpc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1998 +++ linux-2.6.32.41/arch/powerpc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1999 @@ -2,6 +2,7 @@
2000 #define _ASM_POWERPC_PGTABLE_H
2001 #ifdef __KERNEL__
2002
2003 +#include <linux/const.h>
2004 #ifndef __ASSEMBLY__
2005 #include <asm/processor.h> /* For TASK_SIZE */
2006 #include <asm/mmu.h>
2007 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/pte-hash32.h linux-2.6.32.41/arch/powerpc/include/asm/pte-hash32.h
2008 --- linux-2.6.32.41/arch/powerpc/include/asm/pte-hash32.h 2011-03-27 14:31:47.000000000 -0400
2009 +++ linux-2.6.32.41/arch/powerpc/include/asm/pte-hash32.h 2011-04-17 15:56:45.000000000 -0400
2010 @@ -21,6 +21,7 @@
2011 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2012 #define _PAGE_USER 0x004 /* usermode access allowed */
2013 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2014 +#define _PAGE_EXEC _PAGE_GUARDED
2015 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2016 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2017 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2018 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/reg.h linux-2.6.32.41/arch/powerpc/include/asm/reg.h
2019 --- linux-2.6.32.41/arch/powerpc/include/asm/reg.h 2011-03-27 14:31:47.000000000 -0400
2020 +++ linux-2.6.32.41/arch/powerpc/include/asm/reg.h 2011-04-17 15:56:45.000000000 -0400
2021 @@ -191,6 +191,7 @@
2022 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2023 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2024 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2025 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2026 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2027 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2028 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2029 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/swiotlb.h linux-2.6.32.41/arch/powerpc/include/asm/swiotlb.h
2030 --- linux-2.6.32.41/arch/powerpc/include/asm/swiotlb.h 2011-03-27 14:31:47.000000000 -0400
2031 +++ linux-2.6.32.41/arch/powerpc/include/asm/swiotlb.h 2011-04-17 15:56:45.000000000 -0400
2032 @@ -13,7 +13,7 @@
2033
2034 #include <linux/swiotlb.h>
2035
2036 -extern struct dma_map_ops swiotlb_dma_ops;
2037 +extern const struct dma_map_ops swiotlb_dma_ops;
2038
2039 static inline void dma_mark_clean(void *addr, size_t size) {}
2040
2041 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/system.h linux-2.6.32.41/arch/powerpc/include/asm/system.h
2042 --- linux-2.6.32.41/arch/powerpc/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
2043 +++ linux-2.6.32.41/arch/powerpc/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
2044 @@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
2045 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2046 #endif
2047
2048 -extern unsigned long arch_align_stack(unsigned long sp);
2049 +#define arch_align_stack(x) ((x) & ~0xfUL)
2050
2051 /* Used in very early kernel initialization. */
2052 extern unsigned long reloc_offset(void);
2053 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/uaccess.h linux-2.6.32.41/arch/powerpc/include/asm/uaccess.h
2054 --- linux-2.6.32.41/arch/powerpc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
2055 +++ linux-2.6.32.41/arch/powerpc/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
2056 @@ -13,6 +13,8 @@
2057 #define VERIFY_READ 0
2058 #define VERIFY_WRITE 1
2059
2060 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
2061 +
2062 /*
2063 * The fs value determines whether argument validity checking should be
2064 * performed or not. If get_fs() == USER_DS, checking is performed, with
2065 @@ -327,52 +329,6 @@ do { \
2066 extern unsigned long __copy_tofrom_user(void __user *to,
2067 const void __user *from, unsigned long size);
2068
2069 -#ifndef __powerpc64__
2070 -
2071 -static inline unsigned long copy_from_user(void *to,
2072 - const void __user *from, unsigned long n)
2073 -{
2074 - unsigned long over;
2075 -
2076 - if (access_ok(VERIFY_READ, from, n))
2077 - return __copy_tofrom_user((__force void __user *)to, from, n);
2078 - if ((unsigned long)from < TASK_SIZE) {
2079 - over = (unsigned long)from + n - TASK_SIZE;
2080 - return __copy_tofrom_user((__force void __user *)to, from,
2081 - n - over) + over;
2082 - }
2083 - return n;
2084 -}
2085 -
2086 -static inline unsigned long copy_to_user(void __user *to,
2087 - const void *from, unsigned long n)
2088 -{
2089 - unsigned long over;
2090 -
2091 - if (access_ok(VERIFY_WRITE, to, n))
2092 - return __copy_tofrom_user(to, (__force void __user *)from, n);
2093 - if ((unsigned long)to < TASK_SIZE) {
2094 - over = (unsigned long)to + n - TASK_SIZE;
2095 - return __copy_tofrom_user(to, (__force void __user *)from,
2096 - n - over) + over;
2097 - }
2098 - return n;
2099 -}
2100 -
2101 -#else /* __powerpc64__ */
2102 -
2103 -#define __copy_in_user(to, from, size) \
2104 - __copy_tofrom_user((to), (from), (size))
2105 -
2106 -extern unsigned long copy_from_user(void *to, const void __user *from,
2107 - unsigned long n);
2108 -extern unsigned long copy_to_user(void __user *to, const void *from,
2109 - unsigned long n);
2110 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
2111 - unsigned long n);
2112 -
2113 -#endif /* __powerpc64__ */
2114 -
2115 static inline unsigned long __copy_from_user_inatomic(void *to,
2116 const void __user *from, unsigned long n)
2117 {
2118 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
2119 if (ret == 0)
2120 return 0;
2121 }
2122 +
2123 + if (!__builtin_constant_p(n))
2124 + check_object_size(to, n, false);
2125 +
2126 return __copy_tofrom_user((__force void __user *)to, from, n);
2127 }
2128
2129 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
2130 if (ret == 0)
2131 return 0;
2132 }
2133 +
2134 + if (!__builtin_constant_p(n))
2135 + check_object_size(from, n, true);
2136 +
2137 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2138 }
2139
2140 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
2141 return __copy_to_user_inatomic(to, from, size);
2142 }
2143
2144 +#ifndef __powerpc64__
2145 +
2146 +static inline unsigned long __must_check copy_from_user(void *to,
2147 + const void __user *from, unsigned long n)
2148 +{
2149 + unsigned long over;
2150 +
2151 + if ((long)n < 0)
2152 + return n;
2153 +
2154 + if (access_ok(VERIFY_READ, from, n)) {
2155 + if (!__builtin_constant_p(n))
2156 + check_object_size(to, n, false);
2157 + return __copy_tofrom_user((__force void __user *)to, from, n);
2158 + }
2159 + if ((unsigned long)from < TASK_SIZE) {
2160 + over = (unsigned long)from + n - TASK_SIZE;
2161 + if (!__builtin_constant_p(n - over))
2162 + check_object_size(to, n - over, false);
2163 + return __copy_tofrom_user((__force void __user *)to, from,
2164 + n - over) + over;
2165 + }
2166 + return n;
2167 +}
2168 +
2169 +static inline unsigned long __must_check copy_to_user(void __user *to,
2170 + const void *from, unsigned long n)
2171 +{
2172 + unsigned long over;
2173 +
2174 + if ((long)n < 0)
2175 + return n;
2176 +
2177 + if (access_ok(VERIFY_WRITE, to, n)) {
2178 + if (!__builtin_constant_p(n))
2179 + check_object_size(from, n, true);
2180 + return __copy_tofrom_user(to, (__force void __user *)from, n);
2181 + }
2182 + if ((unsigned long)to < TASK_SIZE) {
2183 + over = (unsigned long)to + n - TASK_SIZE;
2184 + if (!__builtin_constant_p(n))
2185 + check_object_size(from, n - over, true);
2186 + return __copy_tofrom_user(to, (__force void __user *)from,
2187 + n - over) + over;
2188 + }
2189 + return n;
2190 +}
2191 +
2192 +#else /* __powerpc64__ */
2193 +
2194 +#define __copy_in_user(to, from, size) \
2195 + __copy_tofrom_user((to), (from), (size))
2196 +
2197 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2198 +{
2199 + if ((long)n < 0 || n > INT_MAX)
2200 + return n;
2201 +
2202 + if (!__builtin_constant_p(n))
2203 + check_object_size(to, n, false);
2204 +
2205 + if (likely(access_ok(VERIFY_READ, from, n)))
2206 + n = __copy_from_user(to, from, n);
2207 + else
2208 + memset(to, 0, n);
2209 + return n;
2210 +}
2211 +
2212 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2213 +{
2214 + if ((long)n < 0 || n > INT_MAX)
2215 + return n;
2216 +
2217 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
2218 + if (!__builtin_constant_p(n))
2219 + check_object_size(from, n, true);
2220 + n = __copy_to_user(to, from, n);
2221 + }
2222 + return n;
2223 +}
2224 +
2225 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
2226 + unsigned long n);
2227 +
2228 +#endif /* __powerpc64__ */
2229 +
2230 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2231
2232 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2233 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/cacheinfo.c linux-2.6.32.41/arch/powerpc/kernel/cacheinfo.c
2234 --- linux-2.6.32.41/arch/powerpc/kernel/cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
2235 +++ linux-2.6.32.41/arch/powerpc/kernel/cacheinfo.c 2011-04-17 15:56:45.000000000 -0400
2236 @@ -642,7 +642,7 @@ static struct kobj_attribute *cache_inde
2237 &cache_assoc_attr,
2238 };
2239
2240 -static struct sysfs_ops cache_index_ops = {
2241 +static const struct sysfs_ops cache_index_ops = {
2242 .show = cache_index_show,
2243 };
2244
2245 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/dma.c linux-2.6.32.41/arch/powerpc/kernel/dma.c
2246 --- linux-2.6.32.41/arch/powerpc/kernel/dma.c 2011-03-27 14:31:47.000000000 -0400
2247 +++ linux-2.6.32.41/arch/powerpc/kernel/dma.c 2011-04-17 15:56:45.000000000 -0400
2248 @@ -134,7 +134,7 @@ static inline void dma_direct_sync_singl
2249 }
2250 #endif
2251
2252 -struct dma_map_ops dma_direct_ops = {
2253 +const struct dma_map_ops dma_direct_ops = {
2254 .alloc_coherent = dma_direct_alloc_coherent,
2255 .free_coherent = dma_direct_free_coherent,
2256 .map_sg = dma_direct_map_sg,
2257 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/dma-iommu.c linux-2.6.32.41/arch/powerpc/kernel/dma-iommu.c
2258 --- linux-2.6.32.41/arch/powerpc/kernel/dma-iommu.c 2011-03-27 14:31:47.000000000 -0400
2259 +++ linux-2.6.32.41/arch/powerpc/kernel/dma-iommu.c 2011-04-17 15:56:45.000000000 -0400
2260 @@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct de
2261 }
2262
2263 /* We support DMA to/from any memory page via the iommu */
2264 -static int dma_iommu_dma_supported(struct device *dev, u64 mask)
2265 +int dma_iommu_dma_supported(struct device *dev, u64 mask)
2266 {
2267 struct iommu_table *tbl = get_iommu_table_base(dev);
2268
2269 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/dma-swiotlb.c linux-2.6.32.41/arch/powerpc/kernel/dma-swiotlb.c
2270 --- linux-2.6.32.41/arch/powerpc/kernel/dma-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
2271 +++ linux-2.6.32.41/arch/powerpc/kernel/dma-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
2272 @@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
2273 * map_page, and unmap_page on highmem, use normal dma_ops
2274 * for everything else.
2275 */
2276 -struct dma_map_ops swiotlb_dma_ops = {
2277 +const struct dma_map_ops swiotlb_dma_ops = {
2278 .alloc_coherent = dma_direct_alloc_coherent,
2279 .free_coherent = dma_direct_free_coherent,
2280 .map_sg = swiotlb_map_sg_attrs,
2281 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/exceptions-64e.S linux-2.6.32.41/arch/powerpc/kernel/exceptions-64e.S
2282 --- linux-2.6.32.41/arch/powerpc/kernel/exceptions-64e.S 2011-03-27 14:31:47.000000000 -0400
2283 +++ linux-2.6.32.41/arch/powerpc/kernel/exceptions-64e.S 2011-04-17 15:56:45.000000000 -0400
2284 @@ -455,6 +455,7 @@ storage_fault_common:
2285 std r14,_DAR(r1)
2286 std r15,_DSISR(r1)
2287 addi r3,r1,STACK_FRAME_OVERHEAD
2288 + bl .save_nvgprs
2289 mr r4,r14
2290 mr r5,r15
2291 ld r14,PACA_EXGEN+EX_R14(r13)
2292 @@ -464,8 +465,7 @@ storage_fault_common:
2293 cmpdi r3,0
2294 bne- 1f
2295 b .ret_from_except_lite
2296 -1: bl .save_nvgprs
2297 - mr r5,r3
2298 +1: mr r5,r3
2299 addi r3,r1,STACK_FRAME_OVERHEAD
2300 ld r4,_DAR(r1)
2301 bl .bad_page_fault
2302 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/exceptions-64s.S linux-2.6.32.41/arch/powerpc/kernel/exceptions-64s.S
2303 --- linux-2.6.32.41/arch/powerpc/kernel/exceptions-64s.S 2011-03-27 14:31:47.000000000 -0400
2304 +++ linux-2.6.32.41/arch/powerpc/kernel/exceptions-64s.S 2011-04-17 15:56:45.000000000 -0400
2305 @@ -818,10 +818,10 @@ handle_page_fault:
2306 11: ld r4,_DAR(r1)
2307 ld r5,_DSISR(r1)
2308 addi r3,r1,STACK_FRAME_OVERHEAD
2309 + bl .save_nvgprs
2310 bl .do_page_fault
2311 cmpdi r3,0
2312 beq+ 13f
2313 - bl .save_nvgprs
2314 mr r5,r3
2315 addi r3,r1,STACK_FRAME_OVERHEAD
2316 lwz r4,_DAR(r1)
2317 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/ibmebus.c linux-2.6.32.41/arch/powerpc/kernel/ibmebus.c
2318 --- linux-2.6.32.41/arch/powerpc/kernel/ibmebus.c 2011-03-27 14:31:47.000000000 -0400
2319 +++ linux-2.6.32.41/arch/powerpc/kernel/ibmebus.c 2011-04-17 15:56:45.000000000 -0400
2320 @@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct
2321 return 1;
2322 }
2323
2324 -static struct dma_map_ops ibmebus_dma_ops = {
2325 +static const struct dma_map_ops ibmebus_dma_ops = {
2326 .alloc_coherent = ibmebus_alloc_coherent,
2327 .free_coherent = ibmebus_free_coherent,
2328 .map_sg = ibmebus_map_sg,
2329 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/kgdb.c linux-2.6.32.41/arch/powerpc/kernel/kgdb.c
2330 --- linux-2.6.32.41/arch/powerpc/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
2331 +++ linux-2.6.32.41/arch/powerpc/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
2332 @@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct
2333 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
2334 return 0;
2335
2336 - if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2337 + if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2338 regs->nip += 4;
2339
2340 return 1;
2341 @@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vecto
2342 /*
2343 * Global data
2344 */
2345 -struct kgdb_arch arch_kgdb_ops = {
2346 +const struct kgdb_arch arch_kgdb_ops = {
2347 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
2348 };
2349
2350 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/module_32.c linux-2.6.32.41/arch/powerpc/kernel/module_32.c
2351 --- linux-2.6.32.41/arch/powerpc/kernel/module_32.c 2011-03-27 14:31:47.000000000 -0400
2352 +++ linux-2.6.32.41/arch/powerpc/kernel/module_32.c 2011-04-17 15:56:45.000000000 -0400
2353 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2354 me->arch.core_plt_section = i;
2355 }
2356 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2357 - printk("Module doesn't contain .plt or .init.plt sections.\n");
2358 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2359 return -ENOEXEC;
2360 }
2361
2362 @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2363
2364 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2365 /* Init, or core PLT? */
2366 - if (location >= mod->module_core
2367 - && location < mod->module_core + mod->core_size)
2368 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2369 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2370 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2371 - else
2372 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2373 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2374 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2375 + else {
2376 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2377 + return ~0UL;
2378 + }
2379
2380 /* Find this entry, or if that fails, the next avail. entry */
2381 while (entry->jump[0]) {
2382 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/module.c linux-2.6.32.41/arch/powerpc/kernel/module.c
2383 --- linux-2.6.32.41/arch/powerpc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
2384 +++ linux-2.6.32.41/arch/powerpc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
2385 @@ -31,11 +31,24 @@
2386
2387 LIST_HEAD(module_bug_list);
2388
2389 +#ifdef CONFIG_PAX_KERNEXEC
2390 void *module_alloc(unsigned long size)
2391 {
2392 if (size == 0)
2393 return NULL;
2394
2395 + return vmalloc(size);
2396 +}
2397 +
2398 +void *module_alloc_exec(unsigned long size)
2399 +#else
2400 +void *module_alloc(unsigned long size)
2401 +#endif
2402 +
2403 +{
2404 + if (size == 0)
2405 + return NULL;
2406 +
2407 return vmalloc_exec(size);
2408 }
2409
2410 @@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2411 vfree(module_region);
2412 }
2413
2414 +#ifdef CONFIG_PAX_KERNEXEC
2415 +void module_free_exec(struct module *mod, void *module_region)
2416 +{
2417 + module_free(mod, module_region);
2418 +}
2419 +#endif
2420 +
2421 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2422 const Elf_Shdr *sechdrs,
2423 const char *name)
2424 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/pci-common.c linux-2.6.32.41/arch/powerpc/kernel/pci-common.c
2425 --- linux-2.6.32.41/arch/powerpc/kernel/pci-common.c 2011-03-27 14:31:47.000000000 -0400
2426 +++ linux-2.6.32.41/arch/powerpc/kernel/pci-common.c 2011-04-17 15:56:45.000000000 -0400
2427 @@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
2428 unsigned int ppc_pci_flags = 0;
2429
2430
2431 -static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2432 +static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2433
2434 -void set_pci_dma_ops(struct dma_map_ops *dma_ops)
2435 +void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
2436 {
2437 pci_dma_ops = dma_ops;
2438 }
2439
2440 -struct dma_map_ops *get_pci_dma_ops(void)
2441 +const struct dma_map_ops *get_pci_dma_ops(void)
2442 {
2443 return pci_dma_ops;
2444 }
2445 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/process.c linux-2.6.32.41/arch/powerpc/kernel/process.c
2446 --- linux-2.6.32.41/arch/powerpc/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
2447 +++ linux-2.6.32.41/arch/powerpc/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
2448 @@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
2449 * Lookup NIP late so we have the best change of getting the
2450 * above info out without failing
2451 */
2452 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2453 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2454 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2455 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2456 #endif
2457 show_stack(current, (unsigned long *) regs->gpr[1]);
2458 if (!user_mode(regs))
2459 @@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk,
2460 newsp = stack[0];
2461 ip = stack[STACK_FRAME_LR_SAVE];
2462 if (!firstframe || ip != lr) {
2463 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2464 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2465 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2466 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2467 - printk(" (%pS)",
2468 + printk(" (%pA)",
2469 (void *)current->ret_stack[curr_frame].ret);
2470 curr_frame--;
2471 }
2472 @@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk,
2473 struct pt_regs *regs = (struct pt_regs *)
2474 (sp + STACK_FRAME_OVERHEAD);
2475 lr = regs->link;
2476 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
2477 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
2478 regs->trap, (void *)regs->nip, (void *)lr);
2479 firstframe = 1;
2480 }
2481 @@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
2482 }
2483
2484 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2485 -
2486 -unsigned long arch_align_stack(unsigned long sp)
2487 -{
2488 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2489 - sp -= get_random_int() & ~PAGE_MASK;
2490 - return sp & ~0xf;
2491 -}
2492 -
2493 -static inline unsigned long brk_rnd(void)
2494 -{
2495 - unsigned long rnd = 0;
2496 -
2497 - /* 8MB for 32bit, 1GB for 64bit */
2498 - if (is_32bit_task())
2499 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2500 - else
2501 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2502 -
2503 - return rnd << PAGE_SHIFT;
2504 -}
2505 -
2506 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2507 -{
2508 - unsigned long base = mm->brk;
2509 - unsigned long ret;
2510 -
2511 -#ifdef CONFIG_PPC_STD_MMU_64
2512 - /*
2513 - * If we are using 1TB segments and we are allowed to randomise
2514 - * the heap, we can put it above 1TB so it is backed by a 1TB
2515 - * segment. Otherwise the heap will be in the bottom 1TB
2516 - * which always uses 256MB segments and this may result in a
2517 - * performance penalty.
2518 - */
2519 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2520 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2521 -#endif
2522 -
2523 - ret = PAGE_ALIGN(base + brk_rnd());
2524 -
2525 - if (ret < mm->brk)
2526 - return mm->brk;
2527 -
2528 - return ret;
2529 -}
2530 -
2531 -unsigned long randomize_et_dyn(unsigned long base)
2532 -{
2533 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2534 -
2535 - if (ret < base)
2536 - return base;
2537 -
2538 - return ret;
2539 -}
2540 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/signal_32.c linux-2.6.32.41/arch/powerpc/kernel/signal_32.c
2541 --- linux-2.6.32.41/arch/powerpc/kernel/signal_32.c 2011-03-27 14:31:47.000000000 -0400
2542 +++ linux-2.6.32.41/arch/powerpc/kernel/signal_32.c 2011-04-17 15:56:45.000000000 -0400
2543 @@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig
2544 /* Save user registers on the stack */
2545 frame = &rt_sf->uc.uc_mcontext;
2546 addr = frame;
2547 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2548 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2549 if (save_user_regs(regs, frame, 0, 1))
2550 goto badframe;
2551 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2552 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/signal_64.c linux-2.6.32.41/arch/powerpc/kernel/signal_64.c
2553 --- linux-2.6.32.41/arch/powerpc/kernel/signal_64.c 2011-03-27 14:31:47.000000000 -0400
2554 +++ linux-2.6.32.41/arch/powerpc/kernel/signal_64.c 2011-04-17 15:56:45.000000000 -0400
2555 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct
2556 current->thread.fpscr.val = 0;
2557
2558 /* Set up to return from userspace. */
2559 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2560 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2561 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2562 } else {
2563 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2564 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/sys_ppc32.c linux-2.6.32.41/arch/powerpc/kernel/sys_ppc32.c
2565 --- linux-2.6.32.41/arch/powerpc/kernel/sys_ppc32.c 2011-03-27 14:31:47.000000000 -0400
2566 +++ linux-2.6.32.41/arch/powerpc/kernel/sys_ppc32.c 2011-04-17 15:56:45.000000000 -0400
2567 @@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct
2568 if (oldlenp) {
2569 if (!error) {
2570 if (get_user(oldlen, oldlenp) ||
2571 - put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
2572 + put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
2573 + copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
2574 error = -EFAULT;
2575 }
2576 - copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
2577 }
2578 return error;
2579 }
2580 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/vdso.c linux-2.6.32.41/arch/powerpc/kernel/vdso.c
2581 --- linux-2.6.32.41/arch/powerpc/kernel/vdso.c 2011-03-27 14:31:47.000000000 -0400
2582 +++ linux-2.6.32.41/arch/powerpc/kernel/vdso.c 2011-04-17 15:56:45.000000000 -0400
2583 @@ -36,6 +36,7 @@
2584 #include <asm/firmware.h>
2585 #include <asm/vdso.h>
2586 #include <asm/vdso_datapage.h>
2587 +#include <asm/mman.h>
2588
2589 #include "setup.h"
2590
2591 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2592 vdso_base = VDSO32_MBASE;
2593 #endif
2594
2595 - current->mm->context.vdso_base = 0;
2596 + current->mm->context.vdso_base = ~0UL;
2597
2598 /* vDSO has a problem and was disabled, just don't "enable" it for the
2599 * process
2600 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2601 vdso_base = get_unmapped_area(NULL, vdso_base,
2602 (vdso_pages << PAGE_SHIFT) +
2603 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2604 - 0, 0);
2605 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
2606 if (IS_ERR_VALUE(vdso_base)) {
2607 rc = vdso_base;
2608 goto fail_mmapsem;
2609 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/vio.c linux-2.6.32.41/arch/powerpc/kernel/vio.c
2610 --- linux-2.6.32.41/arch/powerpc/kernel/vio.c 2011-03-27 14:31:47.000000000 -0400
2611 +++ linux-2.6.32.41/arch/powerpc/kernel/vio.c 2011-04-17 15:56:45.000000000 -0400
2612 @@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struc
2613 vio_cmo_dealloc(viodev, alloc_size);
2614 }
2615
2616 -struct dma_map_ops vio_dma_mapping_ops = {
2617 +static const struct dma_map_ops vio_dma_mapping_ops = {
2618 .alloc_coherent = vio_dma_iommu_alloc_coherent,
2619 .free_coherent = vio_dma_iommu_free_coherent,
2620 .map_sg = vio_dma_iommu_map_sg,
2621 .unmap_sg = vio_dma_iommu_unmap_sg,
2622 + .dma_supported = dma_iommu_dma_supported,
2623 .map_page = vio_dma_iommu_map_page,
2624 .unmap_page = vio_dma_iommu_unmap_page,
2625
2626 @@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vi
2627
2628 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
2629 {
2630 - vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
2631 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
2632 }
2633
2634 diff -urNp linux-2.6.32.41/arch/powerpc/lib/usercopy_64.c linux-2.6.32.41/arch/powerpc/lib/usercopy_64.c
2635 --- linux-2.6.32.41/arch/powerpc/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
2636 +++ linux-2.6.32.41/arch/powerpc/lib/usercopy_64.c 2011-04-17 15:56:45.000000000 -0400
2637 @@ -9,22 +9,6 @@
2638 #include <linux/module.h>
2639 #include <asm/uaccess.h>
2640
2641 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2642 -{
2643 - if (likely(access_ok(VERIFY_READ, from, n)))
2644 - n = __copy_from_user(to, from, n);
2645 - else
2646 - memset(to, 0, n);
2647 - return n;
2648 -}
2649 -
2650 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2651 -{
2652 - if (likely(access_ok(VERIFY_WRITE, to, n)))
2653 - n = __copy_to_user(to, from, n);
2654 - return n;
2655 -}
2656 -
2657 unsigned long copy_in_user(void __user *to, const void __user *from,
2658 unsigned long n)
2659 {
2660 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2661 return n;
2662 }
2663
2664 -EXPORT_SYMBOL(copy_from_user);
2665 -EXPORT_SYMBOL(copy_to_user);
2666 EXPORT_SYMBOL(copy_in_user);
2667
2668 diff -urNp linux-2.6.32.41/arch/powerpc/mm/fault.c linux-2.6.32.41/arch/powerpc/mm/fault.c
2669 --- linux-2.6.32.41/arch/powerpc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
2670 +++ linux-2.6.32.41/arch/powerpc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
2671 @@ -30,6 +30,10 @@
2672 #include <linux/kprobes.h>
2673 #include <linux/kdebug.h>
2674 #include <linux/perf_event.h>
2675 +#include <linux/slab.h>
2676 +#include <linux/pagemap.h>
2677 +#include <linux/compiler.h>
2678 +#include <linux/unistd.h>
2679
2680 #include <asm/firmware.h>
2681 #include <asm/page.h>
2682 @@ -40,6 +44,7 @@
2683 #include <asm/uaccess.h>
2684 #include <asm/tlbflush.h>
2685 #include <asm/siginfo.h>
2686 +#include <asm/ptrace.h>
2687
2688
2689 #ifdef CONFIG_KPROBES
2690 @@ -64,6 +69,33 @@ static inline int notify_page_fault(stru
2691 }
2692 #endif
2693
2694 +#ifdef CONFIG_PAX_PAGEEXEC
2695 +/*
2696 + * PaX: decide what to do with offenders (regs->nip = fault address)
2697 + *
2698 + * returns 1 when task should be killed
2699 + */
2700 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2701 +{
2702 + return 1;
2703 +}
2704 +
2705 +void pax_report_insns(void *pc, void *sp)
2706 +{
2707 + unsigned long i;
2708 +
2709 + printk(KERN_ERR "PAX: bytes at PC: ");
2710 + for (i = 0; i < 5; i++) {
2711 + unsigned int c;
2712 + if (get_user(c, (unsigned int __user *)pc+i))
2713 + printk(KERN_CONT "???????? ");
2714 + else
2715 + printk(KERN_CONT "%08x ", c);
2716 + }
2717 + printk("\n");
2718 +}
2719 +#endif
2720 +
2721 /*
2722 * Check whether the instruction at regs->nip is a store using
2723 * an update addressing form which will update r1.
2724 @@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_re
2725 * indicate errors in DSISR but can validly be set in SRR1.
2726 */
2727 if (trap == 0x400)
2728 - error_code &= 0x48200000;
2729 + error_code &= 0x58200000;
2730 else
2731 is_write = error_code & DSISR_ISSTORE;
2732 #else
2733 @@ -250,7 +282,7 @@ good_area:
2734 * "undefined". Of those that can be set, this is the only
2735 * one which seems bad.
2736 */
2737 - if (error_code & 0x10000000)
2738 + if (error_code & DSISR_GUARDED)
2739 /* Guarded storage error. */
2740 goto bad_area;
2741 #endif /* CONFIG_8xx */
2742 @@ -265,7 +297,7 @@ good_area:
2743 * processors use the same I/D cache coherency mechanism
2744 * as embedded.
2745 */
2746 - if (error_code & DSISR_PROTFAULT)
2747 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2748 goto bad_area;
2749 #endif /* CONFIG_PPC_STD_MMU */
2750
2751 @@ -335,6 +367,23 @@ bad_area:
2752 bad_area_nosemaphore:
2753 /* User mode accesses cause a SIGSEGV */
2754 if (user_mode(regs)) {
2755 +
2756 +#ifdef CONFIG_PAX_PAGEEXEC
2757 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2758 +#ifdef CONFIG_PPC_STD_MMU
2759 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2760 +#else
2761 + if (is_exec && regs->nip == address) {
2762 +#endif
2763 + switch (pax_handle_fetch_fault(regs)) {
2764 + }
2765 +
2766 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2767 + do_group_exit(SIGKILL);
2768 + }
2769 + }
2770 +#endif
2771 +
2772 _exception(SIGSEGV, regs, code, address);
2773 return 0;
2774 }
2775 diff -urNp linux-2.6.32.41/arch/powerpc/mm/mmap_64.c linux-2.6.32.41/arch/powerpc/mm/mmap_64.c
2776 --- linux-2.6.32.41/arch/powerpc/mm/mmap_64.c 2011-03-27 14:31:47.000000000 -0400
2777 +++ linux-2.6.32.41/arch/powerpc/mm/mmap_64.c 2011-04-17 15:56:45.000000000 -0400
2778 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2779 */
2780 if (mmap_is_legacy()) {
2781 mm->mmap_base = TASK_UNMAPPED_BASE;
2782 +
2783 +#ifdef CONFIG_PAX_RANDMMAP
2784 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2785 + mm->mmap_base += mm->delta_mmap;
2786 +#endif
2787 +
2788 mm->get_unmapped_area = arch_get_unmapped_area;
2789 mm->unmap_area = arch_unmap_area;
2790 } else {
2791 mm->mmap_base = mmap_base();
2792 +
2793 +#ifdef CONFIG_PAX_RANDMMAP
2794 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2795 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2796 +#endif
2797 +
2798 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2799 mm->unmap_area = arch_unmap_area_topdown;
2800 }
2801 diff -urNp linux-2.6.32.41/arch/powerpc/mm/slice.c linux-2.6.32.41/arch/powerpc/mm/slice.c
2802 --- linux-2.6.32.41/arch/powerpc/mm/slice.c 2011-03-27 14:31:47.000000000 -0400
2803 +++ linux-2.6.32.41/arch/powerpc/mm/slice.c 2011-04-17 15:56:45.000000000 -0400
2804 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
2805 if ((mm->task_size - len) < addr)
2806 return 0;
2807 vma = find_vma(mm, addr);
2808 - return (!vma || (addr + len) <= vma->vm_start);
2809 + return check_heap_stack_gap(vma, addr, len);
2810 }
2811
2812 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2813 @@ -256,7 +256,7 @@ full_search:
2814 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2815 continue;
2816 }
2817 - if (!vma || addr + len <= vma->vm_start) {
2818 + if (check_heap_stack_gap(vma, addr, len)) {
2819 /*
2820 * Remember the place where we stopped the search:
2821 */
2822 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
2823 }
2824 }
2825
2826 - addr = mm->mmap_base;
2827 - while (addr > len) {
2828 + if (mm->mmap_base < len)
2829 + addr = -ENOMEM;
2830 + else
2831 + addr = mm->mmap_base - len;
2832 +
2833 + while (!IS_ERR_VALUE(addr)) {
2834 /* Go down by chunk size */
2835 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2836 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
2837
2838 /* Check for hit with different page size */
2839 mask = slice_range_to_mask(addr, len);
2840 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
2841 * return with success:
2842 */
2843 vma = find_vma(mm, addr);
2844 - if (!vma || (addr + len) <= vma->vm_start) {
2845 + if (check_heap_stack_gap(vma, addr, len)) {
2846 /* remember the address as a hint for next time */
2847 if (use_cache)
2848 mm->free_area_cache = addr;
2849 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
2850 mm->cached_hole_size = vma->vm_start - addr;
2851
2852 /* try just below the current vma->vm_start */
2853 - addr = vma->vm_start;
2854 + addr = skip_heap_stack_gap(vma, len);
2855 }
2856
2857 /*
2858 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
2859 if (fixed && addr > (mm->task_size - len))
2860 return -EINVAL;
2861
2862 +#ifdef CONFIG_PAX_RANDMMAP
2863 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2864 + addr = 0;
2865 +#endif
2866 +
2867 /* If hint, make sure it matches our alignment restrictions */
2868 if (!fixed && addr) {
2869 addr = _ALIGN_UP(addr, 1ul << pshift);
2870 diff -urNp linux-2.6.32.41/arch/powerpc/platforms/52xx/lite5200_pm.c linux-2.6.32.41/arch/powerpc/platforms/52xx/lite5200_pm.c
2871 --- linux-2.6.32.41/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-03-27 14:31:47.000000000 -0400
2872 +++ linux-2.6.32.41/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-04-17 15:56:45.000000000 -0400
2873 @@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
2874 lite5200_pm_target_state = PM_SUSPEND_ON;
2875 }
2876
2877 -static struct platform_suspend_ops lite5200_pm_ops = {
2878 +static const struct platform_suspend_ops lite5200_pm_ops = {
2879 .valid = lite5200_pm_valid,
2880 .begin = lite5200_pm_begin,
2881 .prepare = lite5200_pm_prepare,
2882 diff -urNp linux-2.6.32.41/arch/powerpc/platforms/52xx/mpc52xx_pm.c linux-2.6.32.41/arch/powerpc/platforms/52xx/mpc52xx_pm.c
2883 --- linux-2.6.32.41/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-03-27 14:31:47.000000000 -0400
2884 +++ linux-2.6.32.41/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-04-17 15:56:45.000000000 -0400
2885 @@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
2886 iounmap(mbar);
2887 }
2888
2889 -static struct platform_suspend_ops mpc52xx_pm_ops = {
2890 +static const struct platform_suspend_ops mpc52xx_pm_ops = {
2891 .valid = mpc52xx_pm_valid,
2892 .prepare = mpc52xx_pm_prepare,
2893 .enter = mpc52xx_pm_enter,
2894 diff -urNp linux-2.6.32.41/arch/powerpc/platforms/83xx/suspend.c linux-2.6.32.41/arch/powerpc/platforms/83xx/suspend.c
2895 --- linux-2.6.32.41/arch/powerpc/platforms/83xx/suspend.c 2011-03-27 14:31:47.000000000 -0400
2896 +++ linux-2.6.32.41/arch/powerpc/platforms/83xx/suspend.c 2011-04-17 15:56:45.000000000 -0400
2897 @@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
2898 return ret;
2899 }
2900
2901 -static struct platform_suspend_ops mpc83xx_suspend_ops = {
2902 +static const struct platform_suspend_ops mpc83xx_suspend_ops = {
2903 .valid = mpc83xx_suspend_valid,
2904 .begin = mpc83xx_suspend_begin,
2905 .enter = mpc83xx_suspend_enter,
2906 diff -urNp linux-2.6.32.41/arch/powerpc/platforms/cell/iommu.c linux-2.6.32.41/arch/powerpc/platforms/cell/iommu.c
2907 --- linux-2.6.32.41/arch/powerpc/platforms/cell/iommu.c 2011-03-27 14:31:47.000000000 -0400
2908 +++ linux-2.6.32.41/arch/powerpc/platforms/cell/iommu.c 2011-04-17 15:56:45.000000000 -0400
2909 @@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struc
2910
2911 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
2912
2913 -struct dma_map_ops dma_iommu_fixed_ops = {
2914 +const struct dma_map_ops dma_iommu_fixed_ops = {
2915 .alloc_coherent = dma_fixed_alloc_coherent,
2916 .free_coherent = dma_fixed_free_coherent,
2917 .map_sg = dma_fixed_map_sg,
2918 diff -urNp linux-2.6.32.41/arch/powerpc/platforms/ps3/system-bus.c linux-2.6.32.41/arch/powerpc/platforms/ps3/system-bus.c
2919 --- linux-2.6.32.41/arch/powerpc/platforms/ps3/system-bus.c 2011-03-27 14:31:47.000000000 -0400
2920 +++ linux-2.6.32.41/arch/powerpc/platforms/ps3/system-bus.c 2011-04-17 15:56:45.000000000 -0400
2921 @@ -694,7 +694,7 @@ static int ps3_dma_supported(struct devi
2922 return mask >= DMA_BIT_MASK(32);
2923 }
2924
2925 -static struct dma_map_ops ps3_sb_dma_ops = {
2926 +static const struct dma_map_ops ps3_sb_dma_ops = {
2927 .alloc_coherent = ps3_alloc_coherent,
2928 .free_coherent = ps3_free_coherent,
2929 .map_sg = ps3_sb_map_sg,
2930 @@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops
2931 .unmap_page = ps3_unmap_page,
2932 };
2933
2934 -static struct dma_map_ops ps3_ioc0_dma_ops = {
2935 +static const struct dma_map_ops ps3_ioc0_dma_ops = {
2936 .alloc_coherent = ps3_alloc_coherent,
2937 .free_coherent = ps3_free_coherent,
2938 .map_sg = ps3_ioc0_map_sg,
2939 diff -urNp linux-2.6.32.41/arch/powerpc/platforms/pseries/Kconfig linux-2.6.32.41/arch/powerpc/platforms/pseries/Kconfig
2940 --- linux-2.6.32.41/arch/powerpc/platforms/pseries/Kconfig 2011-03-27 14:31:47.000000000 -0400
2941 +++ linux-2.6.32.41/arch/powerpc/platforms/pseries/Kconfig 2011-04-17 15:56:45.000000000 -0400
2942 @@ -2,6 +2,8 @@ config PPC_PSERIES
2943 depends on PPC64 && PPC_BOOK3S
2944 bool "IBM pSeries & new (POWER5-based) iSeries"
2945 select MPIC
2946 + select PCI_MSI
2947 + select XICS
2948 select PPC_I8259
2949 select PPC_RTAS
2950 select RTAS_ERROR_LOGGING
2951 diff -urNp linux-2.6.32.41/arch/s390/include/asm/elf.h linux-2.6.32.41/arch/s390/include/asm/elf.h
2952 --- linux-2.6.32.41/arch/s390/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
2953 +++ linux-2.6.32.41/arch/s390/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
2954 @@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
2955 that it will "exec", and that there is sufficient room for the brk. */
2956 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2957
2958 +#ifdef CONFIG_PAX_ASLR
2959 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2960 +
2961 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2962 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2963 +#endif
2964 +
2965 /* This yields a mask that user programs can use to figure out what
2966 instruction set this CPU supports. */
2967
2968 diff -urNp linux-2.6.32.41/arch/s390/include/asm/setup.h linux-2.6.32.41/arch/s390/include/asm/setup.h
2969 --- linux-2.6.32.41/arch/s390/include/asm/setup.h 2011-03-27 14:31:47.000000000 -0400
2970 +++ linux-2.6.32.41/arch/s390/include/asm/setup.h 2011-04-17 15:56:45.000000000 -0400
2971 @@ -50,13 +50,13 @@ extern unsigned long memory_end;
2972 void detect_memory_layout(struct mem_chunk chunk[]);
2973
2974 #ifdef CONFIG_S390_SWITCH_AMODE
2975 -extern unsigned int switch_amode;
2976 +#define switch_amode (1)
2977 #else
2978 #define switch_amode (0)
2979 #endif
2980
2981 #ifdef CONFIG_S390_EXEC_PROTECT
2982 -extern unsigned int s390_noexec;
2983 +#define s390_noexec (1)
2984 #else
2985 #define s390_noexec (0)
2986 #endif
2987 diff -urNp linux-2.6.32.41/arch/s390/include/asm/uaccess.h linux-2.6.32.41/arch/s390/include/asm/uaccess.h
2988 --- linux-2.6.32.41/arch/s390/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
2989 +++ linux-2.6.32.41/arch/s390/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
2990 @@ -232,6 +232,10 @@ static inline unsigned long __must_check
2991 copy_to_user(void __user *to, const void *from, unsigned long n)
2992 {
2993 might_fault();
2994 +
2995 + if ((long)n < 0)
2996 + return n;
2997 +
2998 if (access_ok(VERIFY_WRITE, to, n))
2999 n = __copy_to_user(to, from, n);
3000 return n;
3001 @@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void
3002 static inline unsigned long __must_check
3003 __copy_from_user(void *to, const void __user *from, unsigned long n)
3004 {
3005 + if ((long)n < 0)
3006 + return n;
3007 +
3008 if (__builtin_constant_p(n) && (n <= 256))
3009 return uaccess.copy_from_user_small(n, from, to);
3010 else
3011 @@ -283,6 +290,10 @@ static inline unsigned long __must_check
3012 copy_from_user(void *to, const void __user *from, unsigned long n)
3013 {
3014 might_fault();
3015 +
3016 + if ((long)n < 0)
3017 + return n;
3018 +
3019 if (access_ok(VERIFY_READ, from, n))
3020 n = __copy_from_user(to, from, n);
3021 else
3022 diff -urNp linux-2.6.32.41/arch/s390/Kconfig linux-2.6.32.41/arch/s390/Kconfig
3023 --- linux-2.6.32.41/arch/s390/Kconfig 2011-03-27 14:31:47.000000000 -0400
3024 +++ linux-2.6.32.41/arch/s390/Kconfig 2011-04-17 15:56:45.000000000 -0400
3025 @@ -194,28 +194,26 @@ config AUDIT_ARCH
3026
3027 config S390_SWITCH_AMODE
3028 bool "Switch kernel/user addressing modes"
3029 + default y
3030 help
3031 This option allows to switch the addressing modes of kernel and user
3032 - space. The kernel parameter switch_amode=on will enable this feature,
3033 - default is disabled. Enabling this (via kernel parameter) on machines
3034 - earlier than IBM System z9-109 EC/BC will reduce system performance.
3035 + space. Enabling this on machines earlier than IBM System z9-109 EC/BC
3036 + will reduce system performance.
3037
3038 Note that this option will also be selected by selecting the execute
3039 - protection option below. Enabling the execute protection via the
3040 - noexec kernel parameter will also switch the addressing modes,
3041 - independent of the switch_amode kernel parameter.
3042 + protection option below. Enabling the execute protection will also
3043 + switch the addressing modes, independent of this option.
3044
3045
3046 config S390_EXEC_PROTECT
3047 bool "Data execute protection"
3048 + default y
3049 select S390_SWITCH_AMODE
3050 help
3051 This option allows to enable a buffer overflow protection for user
3052 space programs and it also selects the addressing mode option above.
3053 - The kernel parameter noexec=on will enable this feature and also
3054 - switch the addressing modes, default is disabled. Enabling this (via
3055 - kernel parameter) on machines earlier than IBM System z9-109 EC/BC
3056 - will reduce system performance.
3057 + Enabling this on machines earlier than IBM System z9-109 EC/BC will
3058 + reduce system performance.
3059
3060 comment "Code generation options"
3061
3062 diff -urNp linux-2.6.32.41/arch/s390/kernel/module.c linux-2.6.32.41/arch/s390/kernel/module.c
3063 --- linux-2.6.32.41/arch/s390/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
3064 +++ linux-2.6.32.41/arch/s390/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
3065 @@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
3066
3067 /* Increase core size by size of got & plt and set start
3068 offsets for got and plt. */
3069 - me->core_size = ALIGN(me->core_size, 4);
3070 - me->arch.got_offset = me->core_size;
3071 - me->core_size += me->arch.got_size;
3072 - me->arch.plt_offset = me->core_size;
3073 - me->core_size += me->arch.plt_size;
3074 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
3075 + me->arch.got_offset = me->core_size_rw;
3076 + me->core_size_rw += me->arch.got_size;
3077 + me->arch.plt_offset = me->core_size_rx;
3078 + me->core_size_rx += me->arch.plt_size;
3079 return 0;
3080 }
3081
3082 @@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3083 if (info->got_initialized == 0) {
3084 Elf_Addr *gotent;
3085
3086 - gotent = me->module_core + me->arch.got_offset +
3087 + gotent = me->module_core_rw + me->arch.got_offset +
3088 info->got_offset;
3089 *gotent = val;
3090 info->got_initialized = 1;
3091 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3092 else if (r_type == R_390_GOTENT ||
3093 r_type == R_390_GOTPLTENT)
3094 *(unsigned int *) loc =
3095 - (val + (Elf_Addr) me->module_core - loc) >> 1;
3096 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3097 else if (r_type == R_390_GOT64 ||
3098 r_type == R_390_GOTPLT64)
3099 *(unsigned long *) loc = val;
3100 @@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3101 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3102 if (info->plt_initialized == 0) {
3103 unsigned int *ip;
3104 - ip = me->module_core + me->arch.plt_offset +
3105 + ip = me->module_core_rx + me->arch.plt_offset +
3106 info->plt_offset;
3107 #ifndef CONFIG_64BIT
3108 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3109 @@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3110 val - loc + 0xffffUL < 0x1ffffeUL) ||
3111 (r_type == R_390_PLT32DBL &&
3112 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3113 - val = (Elf_Addr) me->module_core +
3114 + val = (Elf_Addr) me->module_core_rx +
3115 me->arch.plt_offset +
3116 info->plt_offset;
3117 val += rela->r_addend - loc;
3118 @@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3119 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3120 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3121 val = val + rela->r_addend -
3122 - ((Elf_Addr) me->module_core + me->arch.got_offset);
3123 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3124 if (r_type == R_390_GOTOFF16)
3125 *(unsigned short *) loc = val;
3126 else if (r_type == R_390_GOTOFF32)
3127 @@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3128 break;
3129 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3130 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3131 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
3132 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3133 rela->r_addend - loc;
3134 if (r_type == R_390_GOTPC)
3135 *(unsigned int *) loc = val;
3136 diff -urNp linux-2.6.32.41/arch/s390/kernel/setup.c linux-2.6.32.41/arch/s390/kernel/setup.c
3137 --- linux-2.6.32.41/arch/s390/kernel/setup.c 2011-03-27 14:31:47.000000000 -0400
3138 +++ linux-2.6.32.41/arch/s390/kernel/setup.c 2011-04-17 15:56:45.000000000 -0400
3139 @@ -306,9 +306,6 @@ static int __init early_parse_mem(char *
3140 early_param("mem", early_parse_mem);
3141
3142 #ifdef CONFIG_S390_SWITCH_AMODE
3143 -unsigned int switch_amode = 0;
3144 -EXPORT_SYMBOL_GPL(switch_amode);
3145 -
3146 static int set_amode_and_uaccess(unsigned long user_amode,
3147 unsigned long user32_amode)
3148 {
3149 @@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigne
3150 return 0;
3151 }
3152 }
3153 -
3154 -/*
3155 - * Switch kernel/user addressing modes?
3156 - */
3157 -static int __init early_parse_switch_amode(char *p)
3158 -{
3159 - switch_amode = 1;
3160 - return 0;
3161 -}
3162 -early_param("switch_amode", early_parse_switch_amode);
3163 -
3164 #else /* CONFIG_S390_SWITCH_AMODE */
3165 static inline int set_amode_and_uaccess(unsigned long user_amode,
3166 unsigned long user32_amode)
3167 @@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(
3168 }
3169 #endif /* CONFIG_S390_SWITCH_AMODE */
3170
3171 -#ifdef CONFIG_S390_EXEC_PROTECT
3172 -unsigned int s390_noexec = 0;
3173 -EXPORT_SYMBOL_GPL(s390_noexec);
3174 -
3175 -/*
3176 - * Enable execute protection?
3177 - */
3178 -static int __init early_parse_noexec(char *p)
3179 -{
3180 - if (!strncmp(p, "off", 3))
3181 - return 0;
3182 - switch_amode = 1;
3183 - s390_noexec = 1;
3184 - return 0;
3185 -}
3186 -early_param("noexec", early_parse_noexec);
3187 -#endif /* CONFIG_S390_EXEC_PROTECT */
3188 -
3189 static void setup_addressing_mode(void)
3190 {
3191 if (s390_noexec) {
3192 diff -urNp linux-2.6.32.41/arch/s390/mm/mmap.c linux-2.6.32.41/arch/s390/mm/mmap.c
3193 --- linux-2.6.32.41/arch/s390/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3194 +++ linux-2.6.32.41/arch/s390/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
3195 @@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_str
3196 */
3197 if (mmap_is_legacy()) {
3198 mm->mmap_base = TASK_UNMAPPED_BASE;
3199 +
3200 +#ifdef CONFIG_PAX_RANDMMAP
3201 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3202 + mm->mmap_base += mm->delta_mmap;
3203 +#endif
3204 +
3205 mm->get_unmapped_area = arch_get_unmapped_area;
3206 mm->unmap_area = arch_unmap_area;
3207 } else {
3208 mm->mmap_base = mmap_base();
3209 +
3210 +#ifdef CONFIG_PAX_RANDMMAP
3211 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3212 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3213 +#endif
3214 +
3215 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3216 mm->unmap_area = arch_unmap_area_topdown;
3217 }
3218 @@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_str
3219 */
3220 if (mmap_is_legacy()) {
3221 mm->mmap_base = TASK_UNMAPPED_BASE;
3222 +
3223 +#ifdef CONFIG_PAX_RANDMMAP
3224 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3225 + mm->mmap_base += mm->delta_mmap;
3226 +#endif
3227 +
3228 mm->get_unmapped_area = s390_get_unmapped_area;
3229 mm->unmap_area = arch_unmap_area;
3230 } else {
3231 mm->mmap_base = mmap_base();
3232 +
3233 +#ifdef CONFIG_PAX_RANDMMAP
3234 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3235 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3236 +#endif
3237 +
3238 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3239 mm->unmap_area = arch_unmap_area_topdown;
3240 }
3241 diff -urNp linux-2.6.32.41/arch/score/include/asm/system.h linux-2.6.32.41/arch/score/include/asm/system.h
3242 --- linux-2.6.32.41/arch/score/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
3243 +++ linux-2.6.32.41/arch/score/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
3244 @@ -17,7 +17,7 @@ do { \
3245 #define finish_arch_switch(prev) do {} while (0)
3246
3247 typedef void (*vi_handler_t)(void);
3248 -extern unsigned long arch_align_stack(unsigned long sp);
3249 +#define arch_align_stack(x) (x)
3250
3251 #define mb() barrier()
3252 #define rmb() barrier()
3253 diff -urNp linux-2.6.32.41/arch/score/kernel/process.c linux-2.6.32.41/arch/score/kernel/process.c
3254 --- linux-2.6.32.41/arch/score/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
3255 +++ linux-2.6.32.41/arch/score/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
3256 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
3257
3258 return task_pt_regs(task)->cp0_epc;
3259 }
3260 -
3261 -unsigned long arch_align_stack(unsigned long sp)
3262 -{
3263 - return sp;
3264 -}
3265 diff -urNp linux-2.6.32.41/arch/sh/boards/mach-hp6xx/pm.c linux-2.6.32.41/arch/sh/boards/mach-hp6xx/pm.c
3266 --- linux-2.6.32.41/arch/sh/boards/mach-hp6xx/pm.c 2011-03-27 14:31:47.000000000 -0400
3267 +++ linux-2.6.32.41/arch/sh/boards/mach-hp6xx/pm.c 2011-04-17 15:56:45.000000000 -0400
3268 @@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_
3269 return 0;
3270 }
3271
3272 -static struct platform_suspend_ops hp6x0_pm_ops = {
3273 +static const struct platform_suspend_ops hp6x0_pm_ops = {
3274 .enter = hp6x0_pm_enter,
3275 .valid = suspend_valid_only_mem,
3276 };
3277 diff -urNp linux-2.6.32.41/arch/sh/kernel/cpu/sh4/sq.c linux-2.6.32.41/arch/sh/kernel/cpu/sh4/sq.c
3278 --- linux-2.6.32.41/arch/sh/kernel/cpu/sh4/sq.c 2011-03-27 14:31:47.000000000 -0400
3279 +++ linux-2.6.32.41/arch/sh/kernel/cpu/sh4/sq.c 2011-04-17 15:56:46.000000000 -0400
3280 @@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[
3281 NULL,
3282 };
3283
3284 -static struct sysfs_ops sq_sysfs_ops = {
3285 +static const struct sysfs_ops sq_sysfs_ops = {
3286 .show = sq_sysfs_show,
3287 .store = sq_sysfs_store,
3288 };
3289 diff -urNp linux-2.6.32.41/arch/sh/kernel/cpu/shmobile/pm.c linux-2.6.32.41/arch/sh/kernel/cpu/shmobile/pm.c
3290 --- linux-2.6.32.41/arch/sh/kernel/cpu/shmobile/pm.c 2011-03-27 14:31:47.000000000 -0400
3291 +++ linux-2.6.32.41/arch/sh/kernel/cpu/shmobile/pm.c 2011-04-17 15:56:46.000000000 -0400
3292 @@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t s
3293 return 0;
3294 }
3295
3296 -static struct platform_suspend_ops sh_pm_ops = {
3297 +static const struct platform_suspend_ops sh_pm_ops = {
3298 .enter = sh_pm_enter,
3299 .valid = suspend_valid_only_mem,
3300 };
3301 diff -urNp linux-2.6.32.41/arch/sh/kernel/kgdb.c linux-2.6.32.41/arch/sh/kernel/kgdb.c
3302 --- linux-2.6.32.41/arch/sh/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
3303 +++ linux-2.6.32.41/arch/sh/kernel/kgdb.c 2011-04-17 15:56:46.000000000 -0400
3304 @@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
3305 {
3306 }
3307
3308 -struct kgdb_arch arch_kgdb_ops = {
3309 +const struct kgdb_arch arch_kgdb_ops = {
3310 /* Breakpoint instruction: trapa #0x3c */
3311 #ifdef CONFIG_CPU_LITTLE_ENDIAN
3312 .gdb_bpt_instr = { 0x3c, 0xc3 },
3313 diff -urNp linux-2.6.32.41/arch/sh/mm/mmap.c linux-2.6.32.41/arch/sh/mm/mmap.c
3314 --- linux-2.6.32.41/arch/sh/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3315 +++ linux-2.6.32.41/arch/sh/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
3316 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
3317 addr = PAGE_ALIGN(addr);
3318
3319 vma = find_vma(mm, addr);
3320 - if (TASK_SIZE - len >= addr &&
3321 - (!vma || addr + len <= vma->vm_start))
3322 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3323 return addr;
3324 }
3325
3326 @@ -106,7 +105,7 @@ full_search:
3327 }
3328 return -ENOMEM;
3329 }
3330 - if (likely(!vma || addr + len <= vma->vm_start)) {
3331 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3332 /*
3333 * Remember the place where we stopped the search:
3334 */
3335 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
3336 addr = PAGE_ALIGN(addr);
3337
3338 vma = find_vma(mm, addr);
3339 - if (TASK_SIZE - len >= addr &&
3340 - (!vma || addr + len <= vma->vm_start))
3341 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3342 return addr;
3343 }
3344
3345 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
3346 /* make sure it can fit in the remaining address space */
3347 if (likely(addr > len)) {
3348 vma = find_vma(mm, addr-len);
3349 - if (!vma || addr <= vma->vm_start) {
3350 + if (check_heap_stack_gap(vma, addr - len, len)) {
3351 /* remember the address as a hint for next time */
3352 return (mm->free_area_cache = addr-len);
3353 }
3354 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
3355 if (unlikely(mm->mmap_base < len))
3356 goto bottomup;
3357
3358 - addr = mm->mmap_base-len;
3359 - if (do_colour_align)
3360 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3361 + addr = mm->mmap_base - len;
3362
3363 do {
3364 + if (do_colour_align)
3365 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3366 /*
3367 * Lookup failure means no vma is above this address,
3368 * else if new region fits below vma->vm_start,
3369 * return with success:
3370 */
3371 vma = find_vma(mm, addr);
3372 - if (likely(!vma || addr+len <= vma->vm_start)) {
3373 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3374 /* remember the address as a hint for next time */
3375 return (mm->free_area_cache = addr);
3376 }
3377 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
3378 mm->cached_hole_size = vma->vm_start - addr;
3379
3380 /* try just below the current vma->vm_start */
3381 - addr = vma->vm_start-len;
3382 - if (do_colour_align)
3383 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3384 - } while (likely(len < vma->vm_start));
3385 + addr = skip_heap_stack_gap(vma, len);
3386 + } while (!IS_ERR_VALUE(addr));
3387
3388 bottomup:
3389 /*
3390 diff -urNp linux-2.6.32.41/arch/sparc/include/asm/atomic_64.h linux-2.6.32.41/arch/sparc/include/asm/atomic_64.h
3391 --- linux-2.6.32.41/arch/sparc/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
3392 +++ linux-2.6.32.41/arch/sparc/include/asm/atomic_64.h 2011-05-04 17:56:20.000000000 -0400
3393 @@ -14,18 +14,40 @@
3394 #define ATOMIC64_INIT(i) { (i) }
3395
3396 #define atomic_read(v) ((v)->counter)
3397 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3398 +{
3399 + return v->counter;
3400 +}
3401 #define atomic64_read(v) ((v)->counter)
3402 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3403 +{
3404 + return v->counter;
3405 +}
3406
3407 #define atomic_set(v, i) (((v)->counter) = i)
3408 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3409 +{
3410 + v->counter = i;
3411 +}
3412 #define atomic64_set(v, i) (((v)->counter) = i)
3413 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3414 +{
3415 + v->counter = i;
3416 +}
3417
3418 extern void atomic_add(int, atomic_t *);
3419 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3420 extern void atomic64_add(long, atomic64_t *);
3421 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3422 extern void atomic_sub(int, atomic_t *);
3423 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3424 extern void atomic64_sub(long, atomic64_t *);
3425 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3426
3427 extern int atomic_add_ret(int, atomic_t *);
3428 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3429 extern long atomic64_add_ret(long, atomic64_t *);
3430 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3431 extern int atomic_sub_ret(int, atomic_t *);
3432 extern long atomic64_sub_ret(long, atomic64_t *);
3433
3434 @@ -33,7 +55,15 @@ extern long atomic64_sub_ret(long, atomi
3435 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3436
3437 #define atomic_inc_return(v) atomic_add_ret(1, v)
3438 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3439 +{
3440 + return atomic_add_ret_unchecked(1, v);
3441 +}
3442 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3443 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3444 +{
3445 + return atomic64_add_ret_unchecked(1, v);
3446 +}
3447
3448 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3449 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3450 @@ -50,6 +80,7 @@ extern long atomic64_sub_ret(long, atomi
3451 * other cases.
3452 */
3453 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3454 +#define atomic_inc_and_test_unchecked(v) (atomic_inc_return_unchecked(v) == 0)
3455 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3456
3457 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3458 @@ -59,30 +90,59 @@ extern long atomic64_sub_ret(long, atomi
3459 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3460
3461 #define atomic_inc(v) atomic_add(1, v)
3462 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3463 +{
3464 + atomic_add_unchecked(1, v);
3465 +}
3466 #define atomic64_inc(v) atomic64_add(1, v)
3467 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3468 +{
3469 + atomic64_add_unchecked(1, v);
3470 +}
3471
3472 #define atomic_dec(v) atomic_sub(1, v)
3473 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3474 +{
3475 + atomic_sub_unchecked(1, v);
3476 +}
3477 #define atomic64_dec(v) atomic64_sub(1, v)
3478 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3479 +{
3480 + atomic64_sub_unchecked(1, v);
3481 +}
3482
3483 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3484 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3485
3486 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3487 +#define atomic_cmpxchg_unchecked(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3488 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3489 +#define atomic_xchg_unchecked(v, new) (xchg(&((v)->counter), new))
3490
3491 static inline int atomic_add_unless(atomic_t *v, int a, int u)
3492 {
3493 - int c, old;
3494 + int c, old, new;
3495 c = atomic_read(v);
3496 for (;;) {
3497 - if (unlikely(c == (u)))
3498 + if (unlikely(c == u))
3499 break;
3500 - old = atomic_cmpxchg((v), c, c + (a));
3501 +
3502 + asm volatile("addcc %2, %0, %0\n"
3503 +
3504 +#ifdef CONFIG_PAX_REFCOUNT
3505 + "tvs %%icc, 6\n"
3506 +#endif
3507 +
3508 + : "=r" (new)
3509 + : "0" (c), "ir" (a)
3510 + : "cc");
3511 +
3512 + old = atomic_cmpxchg(v, c, new);
3513 if (likely(old == c))
3514 break;
3515 c = old;
3516 }
3517 - return c != (u);
3518 + return c != u;
3519 }
3520
3521 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
3522 @@ -93,17 +153,28 @@ static inline int atomic_add_unless(atom
3523
3524 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3525 {
3526 - long c, old;
3527 + long c, old, new;
3528 c = atomic64_read(v);
3529 for (;;) {
3530 - if (unlikely(c == (u)))
3531 + if (unlikely(c == u))
3532 break;
3533 - old = atomic64_cmpxchg((v), c, c + (a));
3534 +
3535 + asm volatile("addcc %2, %0, %0\n"
3536 +
3537 +#ifdef CONFIG_PAX_REFCOUNT
3538 + "tvs %%xcc, 6\n"
3539 +#endif
3540 +
3541 + : "=r" (new)
3542 + : "0" (c), "ir" (a)
3543 + : "cc");
3544 +
3545 + old = atomic64_cmpxchg(v, c, new);
3546 if (likely(old == c))
3547 break;
3548 c = old;
3549 }
3550 - return c != (u);
3551 + return c != u;
3552 }
3553
3554 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3555 diff -urNp linux-2.6.32.41/arch/sparc/include/asm/cache.h linux-2.6.32.41/arch/sparc/include/asm/cache.h
3556 --- linux-2.6.32.41/arch/sparc/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
3557 +++ linux-2.6.32.41/arch/sparc/include/asm/cache.h 2011-05-17 19:26:34.000000000 -0400
3558 @@ -8,7 +8,7 @@
3559 #define _SPARC_CACHE_H
3560
3561 #define L1_CACHE_SHIFT 5
3562 -#define L1_CACHE_BYTES 32
3563 +#define L1_CACHE_BYTES 32U
3564 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
3565
3566 #ifdef CONFIG_SPARC32
3567 diff -urNp linux-2.6.32.41/arch/sparc/include/asm/dma-mapping.h linux-2.6.32.41/arch/sparc/include/asm/dma-mapping.h
3568 --- linux-2.6.32.41/arch/sparc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
3569 +++ linux-2.6.32.41/arch/sparc/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
3570 @@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *d
3571 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
3572 #define dma_is_consistent(d, h) (1)
3573
3574 -extern struct dma_map_ops *dma_ops, pci32_dma_ops;
3575 +extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
3576 extern struct bus_type pci_bus_type;
3577
3578 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3579 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
3580 {
3581 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
3582 if (dev->bus == &pci_bus_type)
3583 @@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dm
3584 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3585 dma_addr_t *dma_handle, gfp_t flag)
3586 {
3587 - struct dma_map_ops *ops = get_dma_ops(dev);
3588 + const struct dma_map_ops *ops = get_dma_ops(dev);
3589 void *cpu_addr;
3590
3591 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
3592 @@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(s
3593 static inline void dma_free_coherent(struct device *dev, size_t size,
3594 void *cpu_addr, dma_addr_t dma_handle)
3595 {
3596 - struct dma_map_ops *ops = get_dma_ops(dev);
3597 + const struct dma_map_ops *ops = get_dma_ops(dev);
3598
3599 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
3600 ops->free_coherent(dev, size, cpu_addr, dma_handle);
3601 diff -urNp linux-2.6.32.41/arch/sparc/include/asm/elf_32.h linux-2.6.32.41/arch/sparc/include/asm/elf_32.h
3602 --- linux-2.6.32.41/arch/sparc/include/asm/elf_32.h 2011-03-27 14:31:47.000000000 -0400
3603 +++ linux-2.6.32.41/arch/sparc/include/asm/elf_32.h 2011-04-17 15:56:46.000000000 -0400
3604 @@ -116,6 +116,13 @@ typedef struct {
3605
3606 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3607
3608 +#ifdef CONFIG_PAX_ASLR
3609 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3610 +
3611 +#define PAX_DELTA_MMAP_LEN 16
3612 +#define PAX_DELTA_STACK_LEN 16
3613 +#endif
3614 +
3615 /* This yields a mask that user programs can use to figure out what
3616 instruction set this cpu supports. This can NOT be done in userspace
3617 on Sparc. */
3618 diff -urNp linux-2.6.32.41/arch/sparc/include/asm/elf_64.h linux-2.6.32.41/arch/sparc/include/asm/elf_64.h
3619 --- linux-2.6.32.41/arch/sparc/include/asm/elf_64.h 2011-03-27 14:31:47.000000000 -0400
3620 +++ linux-2.6.32.41/arch/sparc/include/asm/elf_64.h 2011-04-17 15:56:46.000000000 -0400
3621 @@ -163,6 +163,12 @@ typedef struct {
3622 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3623 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3624
3625 +#ifdef CONFIG_PAX_ASLR
3626 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3627 +
3628 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3629 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3630 +#endif
3631
3632 /* This yields a mask that user programs can use to figure out what
3633 instruction set this cpu supports. */
3634 diff -urNp linux-2.6.32.41/arch/sparc/include/asm/pgtable_32.h linux-2.6.32.41/arch/sparc/include/asm/pgtable_32.h
3635 --- linux-2.6.32.41/arch/sparc/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
3636 +++ linux-2.6.32.41/arch/sparc/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
3637 @@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3638 BTFIXUPDEF_INT(page_none)
3639 BTFIXUPDEF_INT(page_copy)
3640 BTFIXUPDEF_INT(page_readonly)
3641 +
3642 +#ifdef CONFIG_PAX_PAGEEXEC
3643 +BTFIXUPDEF_INT(page_shared_noexec)
3644 +BTFIXUPDEF_INT(page_copy_noexec)
3645 +BTFIXUPDEF_INT(page_readonly_noexec)
3646 +#endif
3647 +
3648 BTFIXUPDEF_INT(page_kernel)
3649
3650 #define PMD_SHIFT SUN4C_PMD_SHIFT
3651 @@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
3652 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3653 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3654
3655 +#ifdef CONFIG_PAX_PAGEEXEC
3656 +extern pgprot_t PAGE_SHARED_NOEXEC;
3657 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3658 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3659 +#else
3660 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3661 +# define PAGE_COPY_NOEXEC PAGE_COPY
3662 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3663 +#endif
3664 +
3665 extern unsigned long page_kernel;
3666
3667 #ifdef MODULE
3668 diff -urNp linux-2.6.32.41/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.32.41/arch/sparc/include/asm/pgtsrmmu.h
3669 --- linux-2.6.32.41/arch/sparc/include/asm/pgtsrmmu.h 2011-03-27 14:31:47.000000000 -0400
3670 +++ linux-2.6.32.41/arch/sparc/include/asm/pgtsrmmu.h 2011-04-17 15:56:46.000000000 -0400
3671 @@ -115,6 +115,13 @@
3672 SRMMU_EXEC | SRMMU_REF)
3673 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3674 SRMMU_EXEC | SRMMU_REF)
3675 +
3676 +#ifdef CONFIG_PAX_PAGEEXEC
3677 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3678 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3679 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3680 +#endif
3681 +
3682 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3683 SRMMU_DIRTY | SRMMU_REF)
3684
3685 diff -urNp linux-2.6.32.41/arch/sparc/include/asm/spinlock_64.h linux-2.6.32.41/arch/sparc/include/asm/spinlock_64.h
3686 --- linux-2.6.32.41/arch/sparc/include/asm/spinlock_64.h 2011-03-27 14:31:47.000000000 -0400
3687 +++ linux-2.6.32.41/arch/sparc/include/asm/spinlock_64.h 2011-05-04 17:56:20.000000000 -0400
3688 @@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags
3689
3690 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3691
3692 -static void inline arch_read_lock(raw_rwlock_t *lock)
3693 +static inline void arch_read_lock(raw_rwlock_t *lock)
3694 {
3695 unsigned long tmp1, tmp2;
3696
3697 __asm__ __volatile__ (
3698 "1: ldsw [%2], %0\n"
3699 " brlz,pn %0, 2f\n"
3700 -"4: add %0, 1, %1\n"
3701 +"4: addcc %0, 1, %1\n"
3702 +
3703 +#ifdef CONFIG_PAX_REFCOUNT
3704 +" tvs %%icc, 6\n"
3705 +#endif
3706 +
3707 " cas [%2], %0, %1\n"
3708 " cmp %0, %1\n"
3709 " bne,pn %%icc, 1b\n"
3710 @@ -112,7 +117,7 @@ static void inline arch_read_lock(raw_rw
3711 " .previous"
3712 : "=&r" (tmp1), "=&r" (tmp2)
3713 : "r" (lock)
3714 - : "memory");
3715 + : "memory", "cc");
3716 }
3717
3718 static int inline arch_read_trylock(raw_rwlock_t *lock)
3719 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_
3720 "1: ldsw [%2], %0\n"
3721 " brlz,a,pn %0, 2f\n"
3722 " mov 0, %0\n"
3723 -" add %0, 1, %1\n"
3724 +" addcc %0, 1, %1\n"
3725 +
3726 +#ifdef CONFIG_PAX_REFCOUNT
3727 +" tvs %%icc, 6\n"
3728 +#endif
3729 +
3730 " cas [%2], %0, %1\n"
3731 " cmp %0, %1\n"
3732 " bne,pn %%icc, 1b\n"
3733 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_
3734 return tmp1;
3735 }
3736
3737 -static void inline arch_read_unlock(raw_rwlock_t *lock)
3738 +static inline void arch_read_unlock(raw_rwlock_t *lock)
3739 {
3740 unsigned long tmp1, tmp2;
3741
3742 __asm__ __volatile__(
3743 "1: lduw [%2], %0\n"
3744 -" sub %0, 1, %1\n"
3745 +" subcc %0, 1, %1\n"
3746 +
3747 +#ifdef CONFIG_PAX_REFCOUNT
3748 +" tvs %%icc, 6\n"
3749 +#endif
3750 +
3751 " cas [%2], %0, %1\n"
3752 " cmp %0, %1\n"
3753 " bne,pn %%xcc, 1b\n"
3754 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_
3755 : "memory");
3756 }
3757
3758 -static void inline arch_write_lock(raw_rwlock_t *lock)
3759 +static inline void arch_write_lock(raw_rwlock_t *lock)
3760 {
3761 unsigned long mask, tmp1, tmp2;
3762
3763 @@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_r
3764 : "memory");
3765 }
3766
3767 -static void inline arch_write_unlock(raw_rwlock_t *lock)
3768 +static inline void arch_write_unlock(raw_rwlock_t *lock)
3769 {
3770 __asm__ __volatile__(
3771 " stw %%g0, [%0]"
3772 diff -urNp linux-2.6.32.41/arch/sparc/include/asm/thread_info_32.h linux-2.6.32.41/arch/sparc/include/asm/thread_info_32.h
3773 --- linux-2.6.32.41/arch/sparc/include/asm/thread_info_32.h 2011-03-27 14:31:47.000000000 -0400
3774 +++ linux-2.6.32.41/arch/sparc/include/asm/thread_info_32.h 2011-06-04 20:46:01.000000000 -0400
3775 @@ -50,6 +50,8 @@ struct thread_info {
3776 unsigned long w_saved;
3777
3778 struct restart_block restart_block;
3779 +
3780 + unsigned long lowest_stack;
3781 };
3782
3783 /*
3784 diff -urNp linux-2.6.32.41/arch/sparc/include/asm/thread_info_64.h linux-2.6.32.41/arch/sparc/include/asm/thread_info_64.h
3785 --- linux-2.6.32.41/arch/sparc/include/asm/thread_info_64.h 2011-03-27 14:31:47.000000000 -0400
3786 +++ linux-2.6.32.41/arch/sparc/include/asm/thread_info_64.h 2011-06-04 20:46:21.000000000 -0400
3787 @@ -68,6 +68,8 @@ struct thread_info {
3788 struct pt_regs *kern_una_regs;
3789 unsigned int kern_una_insn;
3790
3791 + unsigned long lowest_stack;
3792 +
3793 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3794 };
3795
3796 diff -urNp linux-2.6.32.41/arch/sparc/include/asm/uaccess_32.h linux-2.6.32.41/arch/sparc/include/asm/uaccess_32.h
3797 --- linux-2.6.32.41/arch/sparc/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
3798 +++ linux-2.6.32.41/arch/sparc/include/asm/uaccess_32.h 2011-04-17 15:56:46.000000000 -0400
3799 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
3800
3801 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3802 {
3803 - if (n && __access_ok((unsigned long) to, n))
3804 + if ((long)n < 0)
3805 + return n;
3806 +
3807 + if (n && __access_ok((unsigned long) to, n)) {
3808 + if (!__builtin_constant_p(n))
3809 + check_object_size(from, n, true);
3810 return __copy_user(to, (__force void __user *) from, n);
3811 - else
3812 + } else
3813 return n;
3814 }
3815
3816 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3817 {
3818 + if ((long)n < 0)
3819 + return n;
3820 +
3821 + if (!__builtin_constant_p(n))
3822 + check_object_size(from, n, true);
3823 +
3824 return __copy_user(to, (__force void __user *) from, n);
3825 }
3826
3827 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3828 {
3829 - if (n && __access_ok((unsigned long) from, n))
3830 + if ((long)n < 0)
3831 + return n;
3832 +
3833 + if (n && __access_ok((unsigned long) from, n)) {
3834 + if (!__builtin_constant_p(n))
3835 + check_object_size(to, n, false);
3836 return __copy_user((__force void __user *) to, from, n);
3837 - else
3838 + } else
3839 return n;
3840 }
3841
3842 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3843 {
3844 + if ((long)n < 0)
3845 + return n;
3846 +
3847 return __copy_user((__force void __user *) to, from, n);
3848 }
3849
3850 diff -urNp linux-2.6.32.41/arch/sparc/include/asm/uaccess_64.h linux-2.6.32.41/arch/sparc/include/asm/uaccess_64.h
3851 --- linux-2.6.32.41/arch/sparc/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
3852 +++ linux-2.6.32.41/arch/sparc/include/asm/uaccess_64.h 2011-04-17 15:56:46.000000000 -0400
3853 @@ -9,6 +9,7 @@
3854 #include <linux/compiler.h>
3855 #include <linux/string.h>
3856 #include <linux/thread_info.h>
3857 +#include <linux/kernel.h>
3858 #include <asm/asi.h>
3859 #include <asm/system.h>
3860 #include <asm/spitfire.h>
3861 @@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixu
3862 static inline unsigned long __must_check
3863 copy_from_user(void *to, const void __user *from, unsigned long size)
3864 {
3865 - unsigned long ret = ___copy_from_user(to, from, size);
3866 + unsigned long ret;
3867
3868 + if ((long)size < 0 || size > INT_MAX)
3869 + return size;
3870 +
3871 + if (!__builtin_constant_p(size))
3872 + check_object_size(to, size, false);
3873 +
3874 + ret = ___copy_from_user(to, from, size);
3875 if (unlikely(ret))
3876 ret = copy_from_user_fixup(to, from, size);
3877 return ret;
3878 @@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(
3879 static inline unsigned long __must_check
3880 copy_to_user(void __user *to, const void *from, unsigned long size)
3881 {
3882 - unsigned long ret = ___copy_to_user(to, from, size);
3883 + unsigned long ret;
3884 +
3885 + if ((long)size < 0 || size > INT_MAX)
3886 + return size;
3887 +
3888 + if (!__builtin_constant_p(size))
3889 + check_object_size(from, size, true);
3890
3891 + ret = ___copy_to_user(to, from, size);
3892 if (unlikely(ret))
3893 ret = copy_to_user_fixup(to, from, size);
3894 return ret;
3895 diff -urNp linux-2.6.32.41/arch/sparc/include/asm/uaccess.h linux-2.6.32.41/arch/sparc/include/asm/uaccess.h
3896 --- linux-2.6.32.41/arch/sparc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
3897 +++ linux-2.6.32.41/arch/sparc/include/asm/uaccess.h 2011-04-17 15:56:46.000000000 -0400
3898 @@ -1,5 +1,13 @@
3899 #ifndef ___ASM_SPARC_UACCESS_H
3900 #define ___ASM_SPARC_UACCESS_H
3901 +
3902 +#ifdef __KERNEL__
3903 +#ifndef __ASSEMBLY__
3904 +#include <linux/types.h>
3905 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3906 +#endif
3907 +#endif
3908 +
3909 #if defined(__sparc__) && defined(__arch64__)
3910 #include <asm/uaccess_64.h>
3911 #else
3912 diff -urNp linux-2.6.32.41/arch/sparc/kernel/iommu.c linux-2.6.32.41/arch/sparc/kernel/iommu.c
3913 --- linux-2.6.32.41/arch/sparc/kernel/iommu.c 2011-03-27 14:31:47.000000000 -0400
3914 +++ linux-2.6.32.41/arch/sparc/kernel/iommu.c 2011-04-17 15:56:46.000000000 -0400
3915 @@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struc
3916 spin_unlock_irqrestore(&iommu->lock, flags);
3917 }
3918
3919 -static struct dma_map_ops sun4u_dma_ops = {
3920 +static const struct dma_map_ops sun4u_dma_ops = {
3921 .alloc_coherent = dma_4u_alloc_coherent,
3922 .free_coherent = dma_4u_free_coherent,
3923 .map_page = dma_4u_map_page,
3924 @@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops
3925 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
3926 };
3927
3928 -struct dma_map_ops *dma_ops = &sun4u_dma_ops;
3929 +const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
3930 EXPORT_SYMBOL(dma_ops);
3931
3932 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
3933 diff -urNp linux-2.6.32.41/arch/sparc/kernel/ioport.c linux-2.6.32.41/arch/sparc/kernel/ioport.c
3934 --- linux-2.6.32.41/arch/sparc/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
3935 +++ linux-2.6.32.41/arch/sparc/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
3936 @@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(stru
3937 BUG();
3938 }
3939
3940 -struct dma_map_ops sbus_dma_ops = {
3941 +const struct dma_map_ops sbus_dma_ops = {
3942 .alloc_coherent = sbus_alloc_coherent,
3943 .free_coherent = sbus_free_coherent,
3944 .map_page = sbus_map_page,
3945 @@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
3946 .sync_sg_for_device = sbus_sync_sg_for_device,
3947 };
3948
3949 -struct dma_map_ops *dma_ops = &sbus_dma_ops;
3950 +const struct dma_map_ops *dma_ops = &sbus_dma_ops;
3951 EXPORT_SYMBOL(dma_ops);
3952
3953 static int __init sparc_register_ioport(void)
3954 @@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(str
3955 }
3956 }
3957
3958 -struct dma_map_ops pci32_dma_ops = {
3959 +const struct dma_map_ops pci32_dma_ops = {
3960 .alloc_coherent = pci32_alloc_coherent,
3961 .free_coherent = pci32_free_coherent,
3962 .map_page = pci32_map_page,
3963 diff -urNp linux-2.6.32.41/arch/sparc/kernel/kgdb_32.c linux-2.6.32.41/arch/sparc/kernel/kgdb_32.c
3964 --- linux-2.6.32.41/arch/sparc/kernel/kgdb_32.c 2011-03-27 14:31:47.000000000 -0400
3965 +++ linux-2.6.32.41/arch/sparc/kernel/kgdb_32.c 2011-04-17 15:56:46.000000000 -0400
3966 @@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
3967 {
3968 }
3969
3970 -struct kgdb_arch arch_kgdb_ops = {
3971 +const struct kgdb_arch arch_kgdb_ops = {
3972 /* Breakpoint instruction: ta 0x7d */
3973 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
3974 };
3975 diff -urNp linux-2.6.32.41/arch/sparc/kernel/kgdb_64.c linux-2.6.32.41/arch/sparc/kernel/kgdb_64.c
3976 --- linux-2.6.32.41/arch/sparc/kernel/kgdb_64.c 2011-03-27 14:31:47.000000000 -0400
3977 +++ linux-2.6.32.41/arch/sparc/kernel/kgdb_64.c 2011-04-17 15:56:46.000000000 -0400
3978 @@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
3979 {
3980 }
3981
3982 -struct kgdb_arch arch_kgdb_ops = {
3983 +const struct kgdb_arch arch_kgdb_ops = {
3984 /* Breakpoint instruction: ta 0x72 */
3985 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
3986 };
3987 diff -urNp linux-2.6.32.41/arch/sparc/kernel/Makefile linux-2.6.32.41/arch/sparc/kernel/Makefile
3988 --- linux-2.6.32.41/arch/sparc/kernel/Makefile 2011-03-27 14:31:47.000000000 -0400
3989 +++ linux-2.6.32.41/arch/sparc/kernel/Makefile 2011-04-17 15:56:46.000000000 -0400
3990 @@ -3,7 +3,7 @@
3991 #
3992
3993 asflags-y := -ansi
3994 -ccflags-y := -Werror
3995 +#ccflags-y := -Werror
3996
3997 extra-y := head_$(BITS).o
3998 extra-y += init_task.o
3999 diff -urNp linux-2.6.32.41/arch/sparc/kernel/pci_sun4v.c linux-2.6.32.41/arch/sparc/kernel/pci_sun4v.c
4000 --- linux-2.6.32.41/arch/sparc/kernel/pci_sun4v.c 2011-03-27 14:31:47.000000000 -0400
4001 +++ linux-2.6.32.41/arch/sparc/kernel/pci_sun4v.c 2011-04-17 15:56:46.000000000 -0400
4002 @@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct devic
4003 spin_unlock_irqrestore(&iommu->lock, flags);
4004 }
4005
4006 -static struct dma_map_ops sun4v_dma_ops = {
4007 +static const struct dma_map_ops sun4v_dma_ops = {
4008 .alloc_coherent = dma_4v_alloc_coherent,
4009 .free_coherent = dma_4v_free_coherent,
4010 .map_page = dma_4v_map_page,
4011 diff -urNp linux-2.6.32.41/arch/sparc/kernel/process_32.c linux-2.6.32.41/arch/sparc/kernel/process_32.c
4012 --- linux-2.6.32.41/arch/sparc/kernel/process_32.c 2011-03-27 14:31:47.000000000 -0400
4013 +++ linux-2.6.32.41/arch/sparc/kernel/process_32.c 2011-04-17 15:56:46.000000000 -0400
4014 @@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
4015 rw->ins[4], rw->ins[5],
4016 rw->ins[6],
4017 rw->ins[7]);
4018 - printk("%pS\n", (void *) rw->ins[7]);
4019 + printk("%pA\n", (void *) rw->ins[7]);
4020 rw = (struct reg_window32 *) rw->ins[6];
4021 }
4022 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
4023 @@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
4024
4025 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
4026 r->psr, r->pc, r->npc, r->y, print_tainted());
4027 - printk("PC: <%pS>\n", (void *) r->pc);
4028 + printk("PC: <%pA>\n", (void *) r->pc);
4029 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4030 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
4031 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
4032 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4033 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
4034 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
4035 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
4036 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
4037
4038 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4039 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
4040 @@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk,
4041 rw = (struct reg_window32 *) fp;
4042 pc = rw->ins[7];
4043 printk("[%08lx : ", pc);
4044 - printk("%pS ] ", (void *) pc);
4045 + printk("%pA ] ", (void *) pc);
4046 fp = rw->ins[6];
4047 } while (++count < 16);
4048 printk("\n");
4049 diff -urNp linux-2.6.32.41/arch/sparc/kernel/process_64.c linux-2.6.32.41/arch/sparc/kernel/process_64.c
4050 --- linux-2.6.32.41/arch/sparc/kernel/process_64.c 2011-03-27 14:31:47.000000000 -0400
4051 +++ linux-2.6.32.41/arch/sparc/kernel/process_64.c 2011-04-17 15:56:46.000000000 -0400
4052 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
4053 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
4054 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
4055 if (regs->tstate & TSTATE_PRIV)
4056 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
4057 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
4058 }
4059
4060 void show_regs(struct pt_regs *regs)
4061 {
4062 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
4063 regs->tpc, regs->tnpc, regs->y, print_tainted());
4064 - printk("TPC: <%pS>\n", (void *) regs->tpc);
4065 + printk("TPC: <%pA>\n", (void *) regs->tpc);
4066 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
4067 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
4068 regs->u_regs[3]);
4069 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
4070 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
4071 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
4072 regs->u_regs[15]);
4073 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
4074 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
4075 show_regwindow(regs);
4076 }
4077
4078 @@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void
4079 ((tp && tp->task) ? tp->task->pid : -1));
4080
4081 if (gp->tstate & TSTATE_PRIV) {
4082 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
4083 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
4084 (void *) gp->tpc,
4085 (void *) gp->o7,
4086 (void *) gp->i7,
4087 diff -urNp linux-2.6.32.41/arch/sparc/kernel/sys_sparc_32.c linux-2.6.32.41/arch/sparc/kernel/sys_sparc_32.c
4088 --- linux-2.6.32.41/arch/sparc/kernel/sys_sparc_32.c 2011-03-27 14:31:47.000000000 -0400
4089 +++ linux-2.6.32.41/arch/sparc/kernel/sys_sparc_32.c 2011-04-17 15:56:46.000000000 -0400
4090 @@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(str
4091 if (ARCH_SUN4C && len > 0x20000000)
4092 return -ENOMEM;
4093 if (!addr)
4094 - addr = TASK_UNMAPPED_BASE;
4095 + addr = current->mm->mmap_base;
4096
4097 if (flags & MAP_SHARED)
4098 addr = COLOUR_ALIGN(addr);
4099 @@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
4100 }
4101 if (TASK_SIZE - PAGE_SIZE - len < addr)
4102 return -ENOMEM;
4103 - if (!vmm || addr + len <= vmm->vm_start)
4104 + if (check_heap_stack_gap(vmm, addr, len))
4105 return addr;
4106 addr = vmm->vm_end;
4107 if (flags & MAP_SHARED)
4108 diff -urNp linux-2.6.32.41/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.41/arch/sparc/kernel/sys_sparc_64.c
4109 --- linux-2.6.32.41/arch/sparc/kernel/sys_sparc_64.c 2011-03-27 14:31:47.000000000 -0400
4110 +++ linux-2.6.32.41/arch/sparc/kernel/sys_sparc_64.c 2011-04-17 15:56:46.000000000 -0400
4111 @@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(str
4112 /* We do not accept a shared mapping if it would violate
4113 * cache aliasing constraints.
4114 */
4115 - if ((flags & MAP_SHARED) &&
4116 + if ((filp || (flags & MAP_SHARED)) &&
4117 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4118 return -EINVAL;
4119 return addr;
4120 @@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(str
4121 if (filp || (flags & MAP_SHARED))
4122 do_color_align = 1;
4123
4124 +#ifdef CONFIG_PAX_RANDMMAP
4125 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4126 +#endif
4127 +
4128 if (addr) {
4129 if (do_color_align)
4130 addr = COLOUR_ALIGN(addr, pgoff);
4131 @@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(str
4132 addr = PAGE_ALIGN(addr);
4133
4134 vma = find_vma(mm, addr);
4135 - if (task_size - len >= addr &&
4136 - (!vma || addr + len <= vma->vm_start))
4137 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4138 return addr;
4139 }
4140
4141 if (len > mm->cached_hole_size) {
4142 - start_addr = addr = mm->free_area_cache;
4143 + start_addr = addr = mm->free_area_cache;
4144 } else {
4145 - start_addr = addr = TASK_UNMAPPED_BASE;
4146 + start_addr = addr = mm->mmap_base;
4147 mm->cached_hole_size = 0;
4148 }
4149
4150 @@ -175,14 +178,14 @@ full_search:
4151 vma = find_vma(mm, VA_EXCLUDE_END);
4152 }
4153 if (unlikely(task_size < addr)) {
4154 - if (start_addr != TASK_UNMAPPED_BASE) {
4155 - start_addr = addr = TASK_UNMAPPED_BASE;
4156 + if (start_addr != mm->mmap_base) {
4157 + start_addr = addr = mm->mmap_base;
4158 mm->cached_hole_size = 0;
4159 goto full_search;
4160 }
4161 return -ENOMEM;
4162 }
4163 - if (likely(!vma || addr + len <= vma->vm_start)) {
4164 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4165 /*
4166 * Remember the place where we stopped the search:
4167 */
4168 @@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct fi
4169 /* We do not accept a shared mapping if it would violate
4170 * cache aliasing constraints.
4171 */
4172 - if ((flags & MAP_SHARED) &&
4173 + if ((filp || (flags & MAP_SHARED)) &&
4174 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4175 return -EINVAL;
4176 return addr;
4177 @@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct fi
4178 addr = PAGE_ALIGN(addr);
4179
4180 vma = find_vma(mm, addr);
4181 - if (task_size - len >= addr &&
4182 - (!vma || addr + len <= vma->vm_start))
4183 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4184 return addr;
4185 }
4186
4187 @@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct fi
4188 /* make sure it can fit in the remaining address space */
4189 if (likely(addr > len)) {
4190 vma = find_vma(mm, addr-len);
4191 - if (!vma || addr <= vma->vm_start) {
4192 + if (check_heap_stack_gap(vma, addr - len, len)) {
4193 /* remember the address as a hint for next time */
4194 return (mm->free_area_cache = addr-len);
4195 }
4196 @@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct fi
4197 if (unlikely(mm->mmap_base < len))
4198 goto bottomup;
4199
4200 - addr = mm->mmap_base-len;
4201 - if (do_color_align)
4202 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4203 + addr = mm->mmap_base - len;
4204
4205 do {
4206 + if (do_color_align)
4207 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4208 /*
4209 * Lookup failure means no vma is above this address,
4210 * else if new region fits below vma->vm_start,
4211 * return with success:
4212 */
4213 vma = find_vma(mm, addr);
4214 - if (likely(!vma || addr+len <= vma->vm_start)) {
4215 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4216 /* remember the address as a hint for next time */
4217 return (mm->free_area_cache = addr);
4218 }
4219 @@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct fi
4220 mm->cached_hole_size = vma->vm_start - addr;
4221
4222 /* try just below the current vma->vm_start */
4223 - addr = vma->vm_start-len;
4224 - if (do_color_align)
4225 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4226 - } while (likely(len < vma->vm_start));
4227 + addr = skip_heap_stack_gap(vma, len);
4228 + } while (!IS_ERR_VALUE(addr));
4229
4230 bottomup:
4231 /*
4232 @@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_str
4233 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
4234 sysctl_legacy_va_layout) {
4235 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4236 +
4237 +#ifdef CONFIG_PAX_RANDMMAP
4238 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4239 + mm->mmap_base += mm->delta_mmap;
4240 +#endif
4241 +
4242 mm->get_unmapped_area = arch_get_unmapped_area;
4243 mm->unmap_area = arch_unmap_area;
4244 } else {
4245 @@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_str
4246 gap = (task_size / 6 * 5);
4247
4248 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4249 +
4250 +#ifdef CONFIG_PAX_RANDMMAP
4251 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4252 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4253 +#endif
4254 +
4255 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4256 mm->unmap_area = arch_unmap_area_topdown;
4257 }
4258 diff -urNp linux-2.6.32.41/arch/sparc/kernel/traps_32.c linux-2.6.32.41/arch/sparc/kernel/traps_32.c
4259 --- linux-2.6.32.41/arch/sparc/kernel/traps_32.c 2011-03-27 14:31:47.000000000 -0400
4260 +++ linux-2.6.32.41/arch/sparc/kernel/traps_32.c 2011-04-17 15:56:46.000000000 -0400
4261 @@ -76,7 +76,7 @@ void die_if_kernel(char *str, struct pt_
4262 count++ < 30 &&
4263 (((unsigned long) rw) >= PAGE_OFFSET) &&
4264 !(((unsigned long) rw) & 0x7)) {
4265 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
4266 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
4267 (void *) rw->ins[7]);
4268 rw = (struct reg_window32 *)rw->ins[6];
4269 }
4270 diff -urNp linux-2.6.32.41/arch/sparc/kernel/traps_64.c linux-2.6.32.41/arch/sparc/kernel/traps_64.c
4271 --- linux-2.6.32.41/arch/sparc/kernel/traps_64.c 2011-03-27 14:31:47.000000000 -0400
4272 +++ linux-2.6.32.41/arch/sparc/kernel/traps_64.c 2011-04-17 15:56:46.000000000 -0400
4273 @@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_
4274 i + 1,
4275 p->trapstack[i].tstate, p->trapstack[i].tpc,
4276 p->trapstack[i].tnpc, p->trapstack[i].tt);
4277 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4278 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4279 }
4280 }
4281
4282 @@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long
4283
4284 lvl -= 0x100;
4285 if (regs->tstate & TSTATE_PRIV) {
4286 +
4287 +#ifdef CONFIG_PAX_REFCOUNT
4288 + if (lvl == 6)
4289 + pax_report_refcount_overflow(regs);
4290 +#endif
4291 +
4292 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4293 die_if_kernel(buffer, regs);
4294 }
4295 @@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long
4296 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4297 {
4298 char buffer[32];
4299 -
4300 +
4301 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4302 0, lvl, SIGTRAP) == NOTIFY_STOP)
4303 return;
4304
4305 +#ifdef CONFIG_PAX_REFCOUNT
4306 + if (lvl == 6)
4307 + pax_report_refcount_overflow(regs);
4308 +#endif
4309 +
4310 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4311
4312 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4313 @@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt
4314 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4315 printk("%s" "ERROR(%d): ",
4316 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4317 - printk("TPC<%pS>\n", (void *) regs->tpc);
4318 + printk("TPC<%pA>\n", (void *) regs->tpc);
4319 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4320 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4321 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4322 @@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type,
4323 smp_processor_id(),
4324 (type & 0x1) ? 'I' : 'D',
4325 regs->tpc);
4326 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4327 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4328 panic("Irrecoverable Cheetah+ parity error.");
4329 }
4330
4331 @@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type,
4332 smp_processor_id(),
4333 (type & 0x1) ? 'I' : 'D',
4334 regs->tpc);
4335 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4336 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4337 }
4338
4339 struct sun4v_error_entry {
4340 @@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_r
4341
4342 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4343 regs->tpc, tl);
4344 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4345 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4346 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4347 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4348 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4349 (void *) regs->u_regs[UREG_I7]);
4350 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4351 "pte[%lx] error[%lx]\n",
4352 @@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_r
4353
4354 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4355 regs->tpc, tl);
4356 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4357 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4358 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4359 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4360 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4361 (void *) regs->u_regs[UREG_I7]);
4362 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4363 "pte[%lx] error[%lx]\n",
4364 @@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk,
4365 fp = (unsigned long)sf->fp + STACK_BIAS;
4366 }
4367
4368 - printk(" [%016lx] %pS\n", pc, (void *) pc);
4369 + printk(" [%016lx] %pA\n", pc, (void *) pc);
4370 } while (++count < 16);
4371 }
4372
4373 @@ -2260,7 +2271,7 @@ void die_if_kernel(char *str, struct pt_
4374 while (rw &&
4375 count++ < 30&&
4376 is_kernel_stack(current, rw)) {
4377 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
4378 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
4379 (void *) rw->ins[7]);
4380
4381 rw = kernel_stack_up(rw);
4382 diff -urNp linux-2.6.32.41/arch/sparc/kernel/unaligned_64.c linux-2.6.32.41/arch/sparc/kernel/unaligned_64.c
4383 --- linux-2.6.32.41/arch/sparc/kernel/unaligned_64.c 2011-03-27 14:31:47.000000000 -0400
4384 +++ linux-2.6.32.41/arch/sparc/kernel/unaligned_64.c 2011-04-17 15:56:46.000000000 -0400
4385 @@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs
4386 if (count < 5) {
4387 last_time = jiffies;
4388 count++;
4389 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
4390 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
4391 regs->tpc, (void *) regs->tpc);
4392 }
4393 }
4394 diff -urNp linux-2.6.32.41/arch/sparc/lib/atomic_64.S linux-2.6.32.41/arch/sparc/lib/atomic_64.S
4395 --- linux-2.6.32.41/arch/sparc/lib/atomic_64.S 2011-03-27 14:31:47.000000000 -0400
4396 +++ linux-2.6.32.41/arch/sparc/lib/atomic_64.S 2011-04-17 15:56:46.000000000 -0400
4397 @@ -18,7 +18,12 @@
4398 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4399 BACKOFF_SETUP(%o2)
4400 1: lduw [%o1], %g1
4401 - add %g1, %o0, %g7
4402 + addcc %g1, %o0, %g7
4403 +
4404 +#ifdef CONFIG_PAX_REFCOUNT
4405 + tvs %icc, 6
4406 +#endif
4407 +
4408 cas [%o1], %g1, %g7
4409 cmp %g1, %g7
4410 bne,pn %icc, 2f
4411 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
4412 2: BACKOFF_SPIN(%o2, %o3, 1b)
4413 .size atomic_add, .-atomic_add
4414
4415 + .globl atomic_add_unchecked
4416 + .type atomic_add_unchecked,#function
4417 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4418 + BACKOFF_SETUP(%o2)
4419 +1: lduw [%o1], %g1
4420 + add %g1, %o0, %g7
4421 + cas [%o1], %g1, %g7
4422 + cmp %g1, %g7
4423 + bne,pn %icc, 2f
4424 + nop
4425 + retl
4426 + nop
4427 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4428 + .size atomic_add_unchecked, .-atomic_add_unchecked
4429 +
4430 .globl atomic_sub
4431 .type atomic_sub,#function
4432 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4433 BACKOFF_SETUP(%o2)
4434 1: lduw [%o1], %g1
4435 - sub %g1, %o0, %g7
4436 + subcc %g1, %o0, %g7
4437 +
4438 +#ifdef CONFIG_PAX_REFCOUNT
4439 + tvs %icc, 6
4440 +#endif
4441 +
4442 cas [%o1], %g1, %g7
4443 cmp %g1, %g7
4444 bne,pn %icc, 2f
4445 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
4446 2: BACKOFF_SPIN(%o2, %o3, 1b)
4447 .size atomic_sub, .-atomic_sub
4448
4449 + .globl atomic_sub_unchecked
4450 + .type atomic_sub_unchecked,#function
4451 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4452 + BACKOFF_SETUP(%o2)
4453 +1: lduw [%o1], %g1
4454 + sub %g1, %o0, %g7
4455 + cas [%o1], %g1, %g7
4456 + cmp %g1, %g7
4457 + bne,pn %icc, 2f
4458 + nop
4459 + retl
4460 + nop
4461 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4462 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
4463 +
4464 .globl atomic_add_ret
4465 .type atomic_add_ret,#function
4466 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4467 BACKOFF_SETUP(%o2)
4468 1: lduw [%o1], %g1
4469 - add %g1, %o0, %g7
4470 + addcc %g1, %o0, %g7
4471 +
4472 +#ifdef CONFIG_PAX_REFCOUNT
4473 + tvs %icc, 6
4474 +#endif
4475 +
4476 cas [%o1], %g1, %g7
4477 cmp %g1, %g7
4478 bne,pn %icc, 2f
4479 @@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1
4480 2: BACKOFF_SPIN(%o2, %o3, 1b)
4481 .size atomic_add_ret, .-atomic_add_ret
4482
4483 + .globl atomic_add_ret_unchecked
4484 + .type atomic_add_ret_unchecked,#function
4485 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4486 + BACKOFF_SETUP(%o2)
4487 +1: lduw [%o1], %g1
4488 + addcc %g1, %o0, %g7
4489 + cas [%o1], %g1, %g7
4490 + cmp %g1, %g7
4491 + bne,pn %icc, 2f
4492 + add %g7, %o0, %g7
4493 + sra %g7, 0, %o0
4494 + retl
4495 + nop
4496 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4497 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4498 +
4499 .globl atomic_sub_ret
4500 .type atomic_sub_ret,#function
4501 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4502 BACKOFF_SETUP(%o2)
4503 1: lduw [%o1], %g1
4504 - sub %g1, %o0, %g7
4505 + subcc %g1, %o0, %g7
4506 +
4507 +#ifdef CONFIG_PAX_REFCOUNT
4508 + tvs %icc, 6
4509 +#endif
4510 +
4511 cas [%o1], %g1, %g7
4512 cmp %g1, %g7
4513 bne,pn %icc, 2f
4514 @@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
4515 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4516 BACKOFF_SETUP(%o2)
4517 1: ldx [%o1], %g1
4518 - add %g1, %o0, %g7
4519 + addcc %g1, %o0, %g7
4520 +
4521 +#ifdef CONFIG_PAX_REFCOUNT
4522 + tvs %xcc, 6
4523 +#endif
4524 +
4525 casx [%o1], %g1, %g7
4526 cmp %g1, %g7
4527 bne,pn %xcc, 2f
4528 @@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 =
4529 2: BACKOFF_SPIN(%o2, %o3, 1b)
4530 .size atomic64_add, .-atomic64_add
4531
4532 + .globl atomic64_add_unchecked
4533 + .type atomic64_add_unchecked,#function
4534 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4535 + BACKOFF_SETUP(%o2)
4536 +1: ldx [%o1], %g1
4537 + addcc %g1, %o0, %g7
4538 + casx [%o1], %g1, %g7
4539 + cmp %g1, %g7
4540 + bne,pn %xcc, 2f
4541 + nop
4542 + retl
4543 + nop
4544 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4545 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
4546 +
4547 .globl atomic64_sub
4548 .type atomic64_sub,#function
4549 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4550 BACKOFF_SETUP(%o2)
4551 1: ldx [%o1], %g1
4552 - sub %g1, %o0, %g7
4553 + subcc %g1, %o0, %g7
4554 +
4555 +#ifdef CONFIG_PAX_REFCOUNT
4556 + tvs %xcc, 6
4557 +#endif
4558 +
4559 casx [%o1], %g1, %g7
4560 cmp %g1, %g7
4561 bne,pn %xcc, 2f
4562 @@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
4563 2: BACKOFF_SPIN(%o2, %o3, 1b)
4564 .size atomic64_sub, .-atomic64_sub
4565
4566 + .globl atomic64_sub_unchecked
4567 + .type atomic64_sub_unchecked,#function
4568 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4569 + BACKOFF_SETUP(%o2)
4570 +1: ldx [%o1], %g1
4571 + subcc %g1, %o0, %g7
4572 + casx [%o1], %g1, %g7
4573 + cmp %g1, %g7
4574 + bne,pn %xcc, 2f
4575 + nop
4576 + retl
4577 + nop
4578 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4579 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4580 +
4581 .globl atomic64_add_ret
4582 .type atomic64_add_ret,#function
4583 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4584 BACKOFF_SETUP(%o2)
4585 1: ldx [%o1], %g1
4586 - add %g1, %o0, %g7
4587 + addcc %g1, %o0, %g7
4588 +
4589 +#ifdef CONFIG_PAX_REFCOUNT
4590 + tvs %xcc, 6
4591 +#endif
4592 +
4593 casx [%o1], %g1, %g7
4594 cmp %g1, %g7
4595 bne,pn %xcc, 2f
4596 @@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4597 2: BACKOFF_SPIN(%o2, %o3, 1b)
4598 .size atomic64_add_ret, .-atomic64_add_ret
4599
4600 + .globl atomic64_add_ret_unchecked
4601 + .type atomic64_add_ret_unchecked,#function
4602 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4603 + BACKOFF_SETUP(%o2)
4604 +1: ldx [%o1], %g1
4605 + addcc %g1, %o0, %g7
4606 + casx [%o1], %g1, %g7
4607 + cmp %g1, %g7
4608 + bne,pn %xcc, 2f
4609 + add %g7, %o0, %g7
4610 + mov %g7, %o0
4611 + retl
4612 + nop
4613 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4614 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4615 +
4616 .globl atomic64_sub_ret
4617 .type atomic64_sub_ret,#function
4618 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4619 BACKOFF_SETUP(%o2)
4620 1: ldx [%o1], %g1
4621 - sub %g1, %o0, %g7
4622 + subcc %g1, %o0, %g7
4623 +
4624 +#ifdef CONFIG_PAX_REFCOUNT
4625 + tvs %xcc, 6
4626 +#endif
4627 +
4628 casx [%o1], %g1, %g7
4629 cmp %g1, %g7
4630 bne,pn %xcc, 2f
4631 diff -urNp linux-2.6.32.41/arch/sparc/lib/ksyms.c linux-2.6.32.41/arch/sparc/lib/ksyms.c
4632 --- linux-2.6.32.41/arch/sparc/lib/ksyms.c 2011-03-27 14:31:47.000000000 -0400
4633 +++ linux-2.6.32.41/arch/sparc/lib/ksyms.c 2011-04-17 15:56:46.000000000 -0400
4634 @@ -144,12 +144,17 @@ EXPORT_SYMBOL(__downgrade_write);
4635
4636 /* Atomic counter implementation. */
4637 EXPORT_SYMBOL(atomic_add);
4638 +EXPORT_SYMBOL(atomic_add_unchecked);
4639 EXPORT_SYMBOL(atomic_add_ret);
4640 EXPORT_SYMBOL(atomic_sub);
4641 +EXPORT_SYMBOL(atomic_sub_unchecked);
4642 EXPORT_SYMBOL(atomic_sub_ret);
4643 EXPORT_SYMBOL(atomic64_add);
4644 +EXPORT_SYMBOL(atomic64_add_unchecked);
4645 EXPORT_SYMBOL(atomic64_add_ret);
4646 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4647 EXPORT_SYMBOL(atomic64_sub);
4648 +EXPORT_SYMBOL(atomic64_sub_unchecked);
4649 EXPORT_SYMBOL(atomic64_sub_ret);
4650
4651 /* Atomic bit operations. */
4652 diff -urNp linux-2.6.32.41/arch/sparc/lib/Makefile linux-2.6.32.41/arch/sparc/lib/Makefile
4653 --- linux-2.6.32.41/arch/sparc/lib/Makefile 2011-03-27 14:31:47.000000000 -0400
4654 +++ linux-2.6.32.41/arch/sparc/lib/Makefile 2011-05-17 19:26:34.000000000 -0400
4655 @@ -2,7 +2,7 @@
4656 #
4657
4658 asflags-y := -ansi -DST_DIV0=0x02
4659 -ccflags-y := -Werror
4660 +#ccflags-y := -Werror
4661
4662 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4663 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4664 diff -urNp linux-2.6.32.41/arch/sparc/lib/rwsem_64.S linux-2.6.32.41/arch/sparc/lib/rwsem_64.S
4665 --- linux-2.6.32.41/arch/sparc/lib/rwsem_64.S 2011-03-27 14:31:47.000000000 -0400
4666 +++ linux-2.6.32.41/arch/sparc/lib/rwsem_64.S 2011-04-17 15:56:46.000000000 -0400
4667 @@ -11,7 +11,12 @@
4668 .globl __down_read
4669 __down_read:
4670 1: lduw [%o0], %g1
4671 - add %g1, 1, %g7
4672 + addcc %g1, 1, %g7
4673 +
4674 +#ifdef CONFIG_PAX_REFCOUNT
4675 + tvs %icc, 6
4676 +#endif
4677 +
4678 cas [%o0], %g1, %g7
4679 cmp %g1, %g7
4680 bne,pn %icc, 1b
4681 @@ -33,7 +38,12 @@ __down_read:
4682 .globl __down_read_trylock
4683 __down_read_trylock:
4684 1: lduw [%o0], %g1
4685 - add %g1, 1, %g7
4686 + addcc %g1, 1, %g7
4687 +
4688 +#ifdef CONFIG_PAX_REFCOUNT
4689 + tvs %icc, 6
4690 +#endif
4691 +
4692 cmp %g7, 0
4693 bl,pn %icc, 2f
4694 mov 0, %o1
4695 @@ -51,7 +61,12 @@ __down_write:
4696 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
4697 1:
4698 lduw [%o0], %g3
4699 - add %g3, %g1, %g7
4700 + addcc %g3, %g1, %g7
4701 +
4702 +#ifdef CONFIG_PAX_REFCOUNT
4703 + tvs %icc, 6
4704 +#endif
4705 +
4706 cas [%o0], %g3, %g7
4707 cmp %g3, %g7
4708 bne,pn %icc, 1b
4709 @@ -77,7 +92,12 @@ __down_write_trylock:
4710 cmp %g3, 0
4711 bne,pn %icc, 2f
4712 mov 0, %o1
4713 - add %g3, %g1, %g7
4714 + addcc %g3, %g1, %g7
4715 +
4716 +#ifdef CONFIG_PAX_REFCOUNT
4717 + tvs %icc, 6
4718 +#endif
4719 +
4720 cas [%o0], %g3, %g7
4721 cmp %g3, %g7
4722 bne,pn %icc, 1b
4723 @@ -90,7 +110,12 @@ __down_write_trylock:
4724 __up_read:
4725 1:
4726 lduw [%o0], %g1
4727 - sub %g1, 1, %g7
4728 + subcc %g1, 1, %g7
4729 +
4730 +#ifdef CONFIG_PAX_REFCOUNT
4731 + tvs %icc, 6
4732 +#endif
4733 +
4734 cas [%o0], %g1, %g7
4735 cmp %g1, %g7
4736 bne,pn %icc, 1b
4737 @@ -118,7 +143,12 @@ __up_write:
4738 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
4739 1:
4740 lduw [%o0], %g3
4741 - sub %g3, %g1, %g7
4742 + subcc %g3, %g1, %g7
4743 +
4744 +#ifdef CONFIG_PAX_REFCOUNT
4745 + tvs %icc, 6
4746 +#endif
4747 +
4748 cas [%o0], %g3, %g7
4749 cmp %g3, %g7
4750 bne,pn %icc, 1b
4751 @@ -143,7 +173,12 @@ __downgrade_write:
4752 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
4753 1:
4754 lduw [%o0], %g3
4755 - sub %g3, %g1, %g7
4756 + subcc %g3, %g1, %g7
4757 +
4758 +#ifdef CONFIG_PAX_REFCOUNT
4759 + tvs %icc, 6
4760 +#endif
4761 +
4762 cas [%o0], %g3, %g7
4763 cmp %g3, %g7
4764 bne,pn %icc, 1b
4765 diff -urNp linux-2.6.32.41/arch/sparc/Makefile linux-2.6.32.41/arch/sparc/Makefile
4766 --- linux-2.6.32.41/arch/sparc/Makefile 2011-03-27 14:31:47.000000000 -0400
4767 +++ linux-2.6.32.41/arch/sparc/Makefile 2011-04-17 15:56:46.000000000 -0400
4768 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
4769 # Export what is needed by arch/sparc/boot/Makefile
4770 export VMLINUX_INIT VMLINUX_MAIN
4771 VMLINUX_INIT := $(head-y) $(init-y)
4772 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4773 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4774 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4775 VMLINUX_MAIN += $(drivers-y) $(net-y)
4776
4777 diff -urNp linux-2.6.32.41/arch/sparc/mm/fault_32.c linux-2.6.32.41/arch/sparc/mm/fault_32.c
4778 --- linux-2.6.32.41/arch/sparc/mm/fault_32.c 2011-03-27 14:31:47.000000000 -0400
4779 +++ linux-2.6.32.41/arch/sparc/mm/fault_32.c 2011-04-17 15:56:46.000000000 -0400
4780 @@ -21,6 +21,9 @@
4781 #include <linux/interrupt.h>
4782 #include <linux/module.h>
4783 #include <linux/kdebug.h>
4784 +#include <linux/slab.h>
4785 +#include <linux/pagemap.h>
4786 +#include <linux/compiler.h>
4787
4788 #include <asm/system.h>
4789 #include <asm/page.h>
4790 @@ -167,6 +170,267 @@ static unsigned long compute_si_addr(str
4791 return safe_compute_effective_address(regs, insn);
4792 }
4793
4794 +#ifdef CONFIG_PAX_PAGEEXEC
4795 +#ifdef CONFIG_PAX_DLRESOLVE
4796 +static void pax_emuplt_close(struct vm_area_struct *vma)
4797 +{
4798 + vma->vm_mm->call_dl_resolve = 0UL;
4799 +}
4800 +
4801 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4802 +{
4803 + unsigned int *kaddr;
4804 +
4805 + vmf->page = alloc_page(GFP_HIGHUSER);
4806 + if (!vmf->page)
4807 + return VM_FAULT_OOM;
4808 +
4809 + kaddr = kmap(vmf->page);
4810 + memset(kaddr, 0, PAGE_SIZE);
4811 + kaddr[0] = 0x9DE3BFA8U; /* save */
4812 + flush_dcache_page(vmf->page);
4813 + kunmap(vmf->page);
4814 + return VM_FAULT_MAJOR;
4815 +}
4816 +
4817 +static const struct vm_operations_struct pax_vm_ops = {
4818 + .close = pax_emuplt_close,
4819 + .fault = pax_emuplt_fault
4820 +};
4821 +
4822 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4823 +{
4824 + int ret;
4825 +
4826 + vma->vm_mm = current->mm;
4827 + vma->vm_start = addr;
4828 + vma->vm_end = addr + PAGE_SIZE;
4829 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4830 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4831 + vma->vm_ops = &pax_vm_ops;
4832 +
4833 + ret = insert_vm_struct(current->mm, vma);
4834 + if (ret)
4835 + return ret;
4836 +
4837 + ++current->mm->total_vm;
4838 + return 0;
4839 +}
4840 +#endif
4841 +
4842 +/*
4843 + * PaX: decide what to do with offenders (regs->pc = fault address)
4844 + *
4845 + * returns 1 when task should be killed
4846 + * 2 when patched PLT trampoline was detected
4847 + * 3 when unpatched PLT trampoline was detected
4848 + */
4849 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4850 +{
4851 +
4852 +#ifdef CONFIG_PAX_EMUPLT
4853 + int err;
4854 +
4855 + do { /* PaX: patched PLT emulation #1 */
4856 + unsigned int sethi1, sethi2, jmpl;
4857 +
4858 + err = get_user(sethi1, (unsigned int *)regs->pc);
4859 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4860 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4861 +
4862 + if (err)
4863 + break;
4864 +
4865 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4866 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4867 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4868 + {
4869 + unsigned int addr;
4870 +
4871 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4872 + addr = regs->u_regs[UREG_G1];
4873 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4874 + regs->pc = addr;
4875 + regs->npc = addr+4;
4876 + return 2;
4877 + }
4878 + } while (0);
4879 +
4880 + { /* PaX: patched PLT emulation #2 */
4881 + unsigned int ba;
4882 +
4883 + err = get_user(ba, (unsigned int *)regs->pc);
4884 +
4885 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4886 + unsigned int addr;
4887 +
4888 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4889 + regs->pc = addr;
4890 + regs->npc = addr+4;
4891 + return 2;
4892 + }
4893 + }
4894 +
4895 + do { /* PaX: patched PLT emulation #3 */
4896 + unsigned int sethi, jmpl, nop;
4897 +
4898 + err = get_user(sethi, (unsigned int *)regs->pc);
4899 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4900 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4901 +
4902 + if (err)
4903 + break;
4904 +
4905 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4906 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4907 + nop == 0x01000000U)
4908 + {
4909 + unsigned int addr;
4910 +
4911 + addr = (sethi & 0x003FFFFFU) << 10;
4912 + regs->u_regs[UREG_G1] = addr;
4913 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4914 + regs->pc = addr;
4915 + regs->npc = addr+4;
4916 + return 2;
4917 + }
4918 + } while (0);
4919 +
4920 + do { /* PaX: unpatched PLT emulation step 1 */
4921 + unsigned int sethi, ba, nop;
4922 +
4923 + err = get_user(sethi, (unsigned int *)regs->pc);
4924 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
4925 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4926 +
4927 + if (err)
4928 + break;
4929 +
4930 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4931 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4932 + nop == 0x01000000U)
4933 + {
4934 + unsigned int addr, save, call;
4935 +
4936 + if ((ba & 0xFFC00000U) == 0x30800000U)
4937 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4938 + else
4939 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4940 +
4941 + err = get_user(save, (unsigned int *)addr);
4942 + err |= get_user(call, (unsigned int *)(addr+4));
4943 + err |= get_user(nop, (unsigned int *)(addr+8));
4944 + if (err)
4945 + break;
4946 +
4947 +#ifdef CONFIG_PAX_DLRESOLVE
4948 + if (save == 0x9DE3BFA8U &&
4949 + (call & 0xC0000000U) == 0x40000000U &&
4950 + nop == 0x01000000U)
4951 + {
4952 + struct vm_area_struct *vma;
4953 + unsigned long call_dl_resolve;
4954 +
4955 + down_read(&current->mm->mmap_sem);
4956 + call_dl_resolve = current->mm->call_dl_resolve;
4957 + up_read(&current->mm->mmap_sem);
4958 + if (likely(call_dl_resolve))
4959 + goto emulate;
4960 +
4961 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4962 +
4963 + down_write(&current->mm->mmap_sem);
4964 + if (current->mm->call_dl_resolve) {
4965 + call_dl_resolve = current->mm->call_dl_resolve;
4966 + up_write(&current->mm->mmap_sem);
4967 + if (vma)
4968 + kmem_cache_free(vm_area_cachep, vma);
4969 + goto emulate;
4970 + }
4971 +
4972 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4973 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4974 + up_write(&current->mm->mmap_sem);
4975 + if (vma)
4976 + kmem_cache_free(vm_area_cachep, vma);
4977 + return 1;
4978 + }
4979 +
4980 + if (pax_insert_vma(vma, call_dl_resolve)) {
4981 + up_write(&current->mm->mmap_sem);
4982 + kmem_cache_free(vm_area_cachep, vma);
4983 + return 1;
4984 + }
4985 +
4986 + current->mm->call_dl_resolve = call_dl_resolve;
4987 + up_write(&current->mm->mmap_sem);
4988 +
4989 +emulate:
4990 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4991 + regs->pc = call_dl_resolve;
4992 + regs->npc = addr+4;
4993 + return 3;
4994 + }
4995 +#endif
4996 +
4997 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4998 + if ((save & 0xFFC00000U) == 0x05000000U &&
4999 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5000 + nop == 0x01000000U)
5001 + {
5002 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5003 + regs->u_regs[UREG_G2] = addr + 4;
5004 + addr = (save & 0x003FFFFFU) << 10;
5005 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5006 + regs->pc = addr;
5007 + regs->npc = addr+4;
5008 + return 3;
5009 + }
5010 + }
5011 + } while (0);
5012 +
5013 + do { /* PaX: unpatched PLT emulation step 2 */
5014 + unsigned int save, call, nop;
5015 +
5016 + err = get_user(save, (unsigned int *)(regs->pc-4));
5017 + err |= get_user(call, (unsigned int *)regs->pc);
5018 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
5019 + if (err)
5020 + break;
5021 +
5022 + if (save == 0x9DE3BFA8U &&
5023 + (call & 0xC0000000U) == 0x40000000U &&
5024 + nop == 0x01000000U)
5025 + {
5026 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
5027 +
5028 + regs->u_regs[UREG_RETPC] = regs->pc;
5029 + regs->pc = dl_resolve;
5030 + regs->npc = dl_resolve+4;
5031 + return 3;
5032 + }
5033 + } while (0);
5034 +#endif
5035 +
5036 + return 1;
5037 +}
5038 +
5039 +void pax_report_insns(void *pc, void *sp)
5040 +{
5041 + unsigned long i;
5042 +
5043 + printk(KERN_ERR "PAX: bytes at PC: ");
5044 + for (i = 0; i < 8; i++) {
5045 + unsigned int c;
5046 + if (get_user(c, (unsigned int *)pc+i))
5047 + printk(KERN_CONT "???????? ");
5048 + else
5049 + printk(KERN_CONT "%08x ", c);
5050 + }
5051 + printk("\n");
5052 +}
5053 +#endif
5054 +
5055 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
5056 unsigned long address)
5057 {
5058 @@ -231,6 +495,24 @@ good_area:
5059 if(!(vma->vm_flags & VM_WRITE))
5060 goto bad_area;
5061 } else {
5062 +
5063 +#ifdef CONFIG_PAX_PAGEEXEC
5064 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
5065 + up_read(&mm->mmap_sem);
5066 + switch (pax_handle_fetch_fault(regs)) {
5067 +
5068 +#ifdef CONFIG_PAX_EMUPLT
5069 + case 2:
5070 + case 3:
5071 + return;
5072 +#endif
5073 +
5074 + }
5075 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
5076 + do_group_exit(SIGKILL);
5077 + }
5078 +#endif
5079 +
5080 /* Allow reads even for write-only mappings */
5081 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
5082 goto bad_area;
5083 diff -urNp linux-2.6.32.41/arch/sparc/mm/fault_64.c linux-2.6.32.41/arch/sparc/mm/fault_64.c
5084 --- linux-2.6.32.41/arch/sparc/mm/fault_64.c 2011-03-27 14:31:47.000000000 -0400
5085 +++ linux-2.6.32.41/arch/sparc/mm/fault_64.c 2011-04-17 15:56:46.000000000 -0400
5086 @@ -20,6 +20,9 @@
5087 #include <linux/kprobes.h>
5088 #include <linux/kdebug.h>
5089 #include <linux/percpu.h>
5090 +#include <linux/slab.h>
5091 +#include <linux/pagemap.h>
5092 +#include <linux/compiler.h>
5093
5094 #include <asm/page.h>
5095 #include <asm/pgtable.h>
5096 @@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs
5097 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
5098 regs->tpc);
5099 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
5100 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
5101 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
5102 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
5103 dump_stack();
5104 unhandled_fault(regs->tpc, current, regs);
5105 @@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_a
5106 show_regs(regs);
5107 }
5108
5109 +#ifdef CONFIG_PAX_PAGEEXEC
5110 +#ifdef CONFIG_PAX_DLRESOLVE
5111 +static void pax_emuplt_close(struct vm_area_struct *vma)
5112 +{
5113 + vma->vm_mm->call_dl_resolve = 0UL;
5114 +}
5115 +
5116 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5117 +{
5118 + unsigned int *kaddr;
5119 +
5120 + vmf->page = alloc_page(GFP_HIGHUSER);
5121 + if (!vmf->page)
5122 + return VM_FAULT_OOM;
5123 +
5124 + kaddr = kmap(vmf->page);
5125 + memset(kaddr, 0, PAGE_SIZE);
5126 + kaddr[0] = 0x9DE3BFA8U; /* save */
5127 + flush_dcache_page(vmf->page);
5128 + kunmap(vmf->page);
5129 + return VM_FAULT_MAJOR;
5130 +}
5131 +
5132 +static const struct vm_operations_struct pax_vm_ops = {
5133 + .close = pax_emuplt_close,
5134 + .fault = pax_emuplt_fault
5135 +};
5136 +
5137 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5138 +{
5139 + int ret;
5140 +
5141 + vma->vm_mm = current->mm;
5142 + vma->vm_start = addr;
5143 + vma->vm_end = addr + PAGE_SIZE;
5144 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5145 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5146 + vma->vm_ops = &pax_vm_ops;
5147 +
5148 + ret = insert_vm_struct(current->mm, vma);
5149 + if (ret)
5150 + return ret;
5151 +
5152 + ++current->mm->total_vm;
5153 + return 0;
5154 +}
5155 +#endif
5156 +
5157 +/*
5158 + * PaX: decide what to do with offenders (regs->tpc = fault address)
5159 + *
5160 + * returns 1 when task should be killed
5161 + * 2 when patched PLT trampoline was detected
5162 + * 3 when unpatched PLT trampoline was detected
5163 + */
5164 +static int pax_handle_fetch_fault(struct pt_regs *regs)
5165 +{
5166 +
5167 +#ifdef CONFIG_PAX_EMUPLT
5168 + int err;
5169 +
5170 + do { /* PaX: patched PLT emulation #1 */
5171 + unsigned int sethi1, sethi2, jmpl;
5172 +
5173 + err = get_user(sethi1, (unsigned int *)regs->tpc);
5174 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
5175 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
5176 +
5177 + if (err)
5178 + break;
5179 +
5180 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5181 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
5182 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
5183 + {
5184 + unsigned long addr;
5185 +
5186 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5187 + addr = regs->u_regs[UREG_G1];
5188 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5189 +
5190 + if (test_thread_flag(TIF_32BIT))
5191 + addr &= 0xFFFFFFFFUL;
5192 +
5193 + regs->tpc = addr;
5194 + regs->tnpc = addr+4;
5195 + return 2;
5196 + }
5197 + } while (0);
5198 +
5199 + { /* PaX: patched PLT emulation #2 */
5200 + unsigned int ba;
5201 +
5202 + err = get_user(ba, (unsigned int *)regs->tpc);
5203 +
5204 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5205 + unsigned long addr;
5206 +
5207 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5208 +
5209 + if (test_thread_flag(TIF_32BIT))
5210 + addr &= 0xFFFFFFFFUL;
5211 +
5212 + regs->tpc = addr;
5213 + regs->tnpc = addr+4;
5214 + return 2;
5215 + }
5216 + }
5217 +
5218 + do { /* PaX: patched PLT emulation #3 */
5219 + unsigned int sethi, jmpl, nop;
5220 +
5221 + err = get_user(sethi, (unsigned int *)regs->tpc);
5222 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5223 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5224 +
5225 + if (err)
5226 + break;
5227 +
5228 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5229 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5230 + nop == 0x01000000U)
5231 + {
5232 + unsigned long addr;
5233 +
5234 + addr = (sethi & 0x003FFFFFU) << 10;
5235 + regs->u_regs[UREG_G1] = addr;
5236 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5237 +
5238 + if (test_thread_flag(TIF_32BIT))
5239 + addr &= 0xFFFFFFFFUL;
5240 +
5241 + regs->tpc = addr;
5242 + regs->tnpc = addr+4;
5243 + return 2;
5244 + }
5245 + } while (0);
5246 +
5247 + do { /* PaX: patched PLT emulation #4 */
5248 + unsigned int sethi, mov1, call, mov2;
5249 +
5250 + err = get_user(sethi, (unsigned int *)regs->tpc);
5251 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5252 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
5253 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5254 +
5255 + if (err)
5256 + break;
5257 +
5258 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5259 + mov1 == 0x8210000FU &&
5260 + (call & 0xC0000000U) == 0x40000000U &&
5261 + mov2 == 0x9E100001U)
5262 + {
5263 + unsigned long addr;
5264 +
5265 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5266 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5267 +
5268 + if (test_thread_flag(TIF_32BIT))
5269 + addr &= 0xFFFFFFFFUL;
5270 +
5271 + regs->tpc = addr;
5272 + regs->tnpc = addr+4;
5273 + return 2;
5274 + }
5275 + } while (0);
5276 +
5277 + do { /* PaX: patched PLT emulation #5 */
5278 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5279 +
5280 + err = get_user(sethi, (unsigned int *)regs->tpc);
5281 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5282 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5283 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5284 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5285 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5286 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5287 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5288 +
5289 + if (err)
5290 + break;
5291 +
5292 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5293 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5294 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5295 + (or1 & 0xFFFFE000U) == 0x82106000U &&
5296 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5297 + sllx == 0x83287020U &&
5298 + jmpl == 0x81C04005U &&
5299 + nop == 0x01000000U)
5300 + {
5301 + unsigned long addr;
5302 +
5303 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5304 + regs->u_regs[UREG_G1] <<= 32;
5305 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5306 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5307 + regs->tpc = addr;
5308 + regs->tnpc = addr+4;
5309 + return 2;
5310 + }
5311 + } while (0);
5312 +
5313 + do { /* PaX: patched PLT emulation #6 */
5314 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5315 +
5316 + err = get_user(sethi, (unsigned int *)regs->tpc);
5317 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5318 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5319 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5320 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
5321 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5322 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5323 +
5324 + if (err)
5325 + break;
5326 +
5327 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5328 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5329 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5330 + sllx == 0x83287020U &&
5331 + (or & 0xFFFFE000U) == 0x8A116000U &&
5332 + jmpl == 0x81C04005U &&
5333 + nop == 0x01000000U)
5334 + {
5335 + unsigned long addr;
5336 +
5337 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5338 + regs->u_regs[UREG_G1] <<= 32;
5339 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5340 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5341 + regs->tpc = addr;
5342 + regs->tnpc = addr+4;
5343 + return 2;
5344 + }
5345 + } while (0);
5346 +
5347 + do { /* PaX: unpatched PLT emulation step 1 */
5348 + unsigned int sethi, ba, nop;
5349 +
5350 + err = get_user(sethi, (unsigned int *)regs->tpc);
5351 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5352 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5353 +
5354 + if (err)
5355 + break;
5356 +
5357 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5358 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5359 + nop == 0x01000000U)
5360 + {
5361 + unsigned long addr;
5362 + unsigned int save, call;
5363 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5364 +
5365 + if ((ba & 0xFFC00000U) == 0x30800000U)
5366 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5367 + else
5368 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5369 +
5370 + if (test_thread_flag(TIF_32BIT))
5371 + addr &= 0xFFFFFFFFUL;
5372 +
5373 + err = get_user(save, (unsigned int *)addr);
5374 + err |= get_user(call, (unsigned int *)(addr+4));
5375 + err |= get_user(nop, (unsigned int *)(addr+8));
5376 + if (err)
5377 + break;
5378 +
5379 +#ifdef CONFIG_PAX_DLRESOLVE
5380 + if (save == 0x9DE3BFA8U &&
5381 + (call & 0xC0000000U) == 0x40000000U &&
5382 + nop == 0x01000000U)
5383 + {
5384 + struct vm_area_struct *vma;
5385 + unsigned long call_dl_resolve;
5386 +
5387 + down_read(&current->mm->mmap_sem);
5388 + call_dl_resolve = current->mm->call_dl_resolve;
5389 + up_read(&current->mm->mmap_sem);
5390 + if (likely(call_dl_resolve))
5391 + goto emulate;
5392 +
5393 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5394 +
5395 + down_write(&current->mm->mmap_sem);
5396 + if (current->mm->call_dl_resolve) {
5397 + call_dl_resolve = current->mm->call_dl_resolve;
5398 + up_write(&current->mm->mmap_sem);
5399 + if (vma)
5400 + kmem_cache_free(vm_area_cachep, vma);
5401 + goto emulate;
5402 + }
5403 +
5404 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5405 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5406 + up_write(&current->mm->mmap_sem);
5407 + if (vma)
5408 + kmem_cache_free(vm_area_cachep, vma);
5409 + return 1;
5410 + }
5411 +
5412 + if (pax_insert_vma(vma, call_dl_resolve)) {
5413 + up_write(&current->mm->mmap_sem);
5414 + kmem_cache_free(vm_area_cachep, vma);
5415 + return 1;
5416 + }
5417 +
5418 + current->mm->call_dl_resolve = call_dl_resolve;
5419 + up_write(&current->mm->mmap_sem);
5420 +
5421 +emulate:
5422 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5423 + regs->tpc = call_dl_resolve;
5424 + regs->tnpc = addr+4;
5425 + return 3;
5426 + }
5427 +#endif
5428 +
5429 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5430 + if ((save & 0xFFC00000U) == 0x05000000U &&
5431 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5432 + nop == 0x01000000U)
5433 + {
5434 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5435 + regs->u_regs[UREG_G2] = addr + 4;
5436 + addr = (save & 0x003FFFFFU) << 10;
5437 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5438 +
5439 + if (test_thread_flag(TIF_32BIT))
5440 + addr &= 0xFFFFFFFFUL;
5441 +
5442 + regs->tpc = addr;
5443 + regs->tnpc = addr+4;
5444 + return 3;
5445 + }
5446 +
5447 + /* PaX: 64-bit PLT stub */
5448 + err = get_user(sethi1, (unsigned int *)addr);
5449 + err |= get_user(sethi2, (unsigned int *)(addr+4));
5450 + err |= get_user(or1, (unsigned int *)(addr+8));
5451 + err |= get_user(or2, (unsigned int *)(addr+12));
5452 + err |= get_user(sllx, (unsigned int *)(addr+16));
5453 + err |= get_user(add, (unsigned int *)(addr+20));
5454 + err |= get_user(jmpl, (unsigned int *)(addr+24));
5455 + err |= get_user(nop, (unsigned int *)(addr+28));
5456 + if (err)
5457 + break;
5458 +
5459 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5460 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5461 + (or1 & 0xFFFFE000U) == 0x88112000U &&
5462 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5463 + sllx == 0x89293020U &&
5464 + add == 0x8A010005U &&
5465 + jmpl == 0x89C14000U &&
5466 + nop == 0x01000000U)
5467 + {
5468 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5469 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5470 + regs->u_regs[UREG_G4] <<= 32;
5471 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5472 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5473 + regs->u_regs[UREG_G4] = addr + 24;
5474 + addr = regs->u_regs[UREG_G5];
5475 + regs->tpc = addr;
5476 + regs->tnpc = addr+4;
5477 + return 3;
5478 + }
5479 + }
5480 + } while (0);
5481 +
5482 +#ifdef CONFIG_PAX_DLRESOLVE
5483 + do { /* PaX: unpatched PLT emulation step 2 */
5484 + unsigned int save, call, nop;
5485 +
5486 + err = get_user(save, (unsigned int *)(regs->tpc-4));
5487 + err |= get_user(call, (unsigned int *)regs->tpc);
5488 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5489 + if (err)
5490 + break;
5491 +
5492 + if (save == 0x9DE3BFA8U &&
5493 + (call & 0xC0000000U) == 0x40000000U &&
5494 + nop == 0x01000000U)
5495 + {
5496 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5497 +
5498 + if (test_thread_flag(TIF_32BIT))
5499 + dl_resolve &= 0xFFFFFFFFUL;
5500 +
5501 + regs->u_regs[UREG_RETPC] = regs->tpc;
5502 + regs->tpc = dl_resolve;
5503 + regs->tnpc = dl_resolve+4;
5504 + return 3;
5505 + }
5506 + } while (0);
5507 +#endif
5508 +
5509 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5510 + unsigned int sethi, ba, nop;
5511 +
5512 + err = get_user(sethi, (unsigned int *)regs->tpc);
5513 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5514 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5515 +
5516 + if (err)
5517 + break;
5518 +
5519 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5520 + (ba & 0xFFF00000U) == 0x30600000U &&
5521 + nop == 0x01000000U)
5522 + {
5523 + unsigned long addr;
5524 +
5525 + addr = (sethi & 0x003FFFFFU) << 10;
5526 + regs->u_regs[UREG_G1] = addr;
5527 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5528 +
5529 + if (test_thread_flag(TIF_32BIT))
5530 + addr &= 0xFFFFFFFFUL;
5531 +
5532 + regs->tpc = addr;
5533 + regs->tnpc = addr+4;
5534 + return 2;
5535 + }
5536 + } while (0);
5537 +
5538 +#endif
5539 +
5540 + return 1;
5541 +}
5542 +
5543 +void pax_report_insns(void *pc, void *sp)
5544 +{
5545 + unsigned long i;
5546 +
5547 + printk(KERN_ERR "PAX: bytes at PC: ");
5548 + for (i = 0; i < 8; i++) {
5549 + unsigned int c;
5550 + if (get_user(c, (unsigned int *)pc+i))
5551 + printk(KERN_CONT "???????? ");
5552 + else
5553 + printk(KERN_CONT "%08x ", c);
5554 + }
5555 + printk("\n");
5556 +}
5557 +#endif
5558 +
5559 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5560 {
5561 struct mm_struct *mm = current->mm;
5562 @@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fau
5563 if (!vma)
5564 goto bad_area;
5565
5566 +#ifdef CONFIG_PAX_PAGEEXEC
5567 + /* PaX: detect ITLB misses on non-exec pages */
5568 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5569 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5570 + {
5571 + if (address != regs->tpc)
5572 + goto good_area;
5573 +
5574 + up_read(&mm->mmap_sem);
5575 + switch (pax_handle_fetch_fault(regs)) {
5576 +
5577 +#ifdef CONFIG_PAX_EMUPLT
5578 + case 2:
5579 + case 3:
5580 + return;
5581 +#endif
5582 +
5583 + }
5584 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
5585 + do_group_exit(SIGKILL);
5586 + }
5587 +#endif
5588 +
5589 /* Pure DTLB misses do not tell us whether the fault causing
5590 * load/store/atomic was a write or not, it only says that there
5591 * was no match. So in such a case we (carefully) read the
5592 diff -urNp linux-2.6.32.41/arch/sparc/mm/hugetlbpage.c linux-2.6.32.41/arch/sparc/mm/hugetlbpage.c
5593 --- linux-2.6.32.41/arch/sparc/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
5594 +++ linux-2.6.32.41/arch/sparc/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
5595 @@ -69,7 +69,7 @@ full_search:
5596 }
5597 return -ENOMEM;
5598 }
5599 - if (likely(!vma || addr + len <= vma->vm_start)) {
5600 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5601 /*
5602 * Remember the place where we stopped the search:
5603 */
5604 @@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct
5605 /* make sure it can fit in the remaining address space */
5606 if (likely(addr > len)) {
5607 vma = find_vma(mm, addr-len);
5608 - if (!vma || addr <= vma->vm_start) {
5609 + if (check_heap_stack_gap(vma, addr - len, len)) {
5610 /* remember the address as a hint for next time */
5611 return (mm->free_area_cache = addr-len);
5612 }
5613 @@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct
5614 if (unlikely(mm->mmap_base < len))
5615 goto bottomup;
5616
5617 - addr = (mm->mmap_base-len) & HPAGE_MASK;
5618 + addr = mm->mmap_base - len;
5619
5620 do {
5621 + addr &= HPAGE_MASK;
5622 /*
5623 * Lookup failure means no vma is above this address,
5624 * else if new region fits below vma->vm_start,
5625 * return with success:
5626 */
5627 vma = find_vma(mm, addr);
5628 - if (likely(!vma || addr+len <= vma->vm_start)) {
5629 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5630 /* remember the address as a hint for next time */
5631 return (mm->free_area_cache = addr);
5632 }
5633 @@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct
5634 mm->cached_hole_size = vma->vm_start - addr;
5635
5636 /* try just below the current vma->vm_start */
5637 - addr = (vma->vm_start-len) & HPAGE_MASK;
5638 - } while (likely(len < vma->vm_start));
5639 + addr = skip_heap_stack_gap(vma, len);
5640 + } while (!IS_ERR_VALUE(addr));
5641
5642 bottomup:
5643 /*
5644 @@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *f
5645 if (addr) {
5646 addr = ALIGN(addr, HPAGE_SIZE);
5647 vma = find_vma(mm, addr);
5648 - if (task_size - len >= addr &&
5649 - (!vma || addr + len <= vma->vm_start))
5650 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5651 return addr;
5652 }
5653 if (mm->get_unmapped_area == arch_get_unmapped_area)
5654 diff -urNp linux-2.6.32.41/arch/sparc/mm/init_32.c linux-2.6.32.41/arch/sparc/mm/init_32.c
5655 --- linux-2.6.32.41/arch/sparc/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
5656 +++ linux-2.6.32.41/arch/sparc/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
5657 @@ -317,6 +317,9 @@ extern void device_scan(void);
5658 pgprot_t PAGE_SHARED __read_mostly;
5659 EXPORT_SYMBOL(PAGE_SHARED);
5660
5661 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5662 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5663 +
5664 void __init paging_init(void)
5665 {
5666 switch(sparc_cpu_model) {
5667 @@ -345,17 +348,17 @@ void __init paging_init(void)
5668
5669 /* Initialize the protection map with non-constant, MMU dependent values. */
5670 protection_map[0] = PAGE_NONE;
5671 - protection_map[1] = PAGE_READONLY;
5672 - protection_map[2] = PAGE_COPY;
5673 - protection_map[3] = PAGE_COPY;
5674 + protection_map[1] = PAGE_READONLY_NOEXEC;
5675 + protection_map[2] = PAGE_COPY_NOEXEC;
5676 + protection_map[3] = PAGE_COPY_NOEXEC;
5677 protection_map[4] = PAGE_READONLY;
5678 protection_map[5] = PAGE_READONLY;
5679 protection_map[6] = PAGE_COPY;
5680 protection_map[7] = PAGE_COPY;
5681 protection_map[8] = PAGE_NONE;
5682 - protection_map[9] = PAGE_READONLY;
5683 - protection_map[10] = PAGE_SHARED;
5684 - protection_map[11] = PAGE_SHARED;
5685 + protection_map[9] = PAGE_READONLY_NOEXEC;
5686 + protection_map[10] = PAGE_SHARED_NOEXEC;
5687 + protection_map[11] = PAGE_SHARED_NOEXEC;
5688 protection_map[12] = PAGE_READONLY;
5689 protection_map[13] = PAGE_READONLY;
5690 protection_map[14] = PAGE_SHARED;
5691 diff -urNp linux-2.6.32.41/arch/sparc/mm/Makefile linux-2.6.32.41/arch/sparc/mm/Makefile
5692 --- linux-2.6.32.41/arch/sparc/mm/Makefile 2011-03-27 14:31:47.000000000 -0400
5693 +++ linux-2.6.32.41/arch/sparc/mm/Makefile 2011-04-17 15:56:46.000000000 -0400
5694 @@ -2,7 +2,7 @@
5695 #
5696
5697 asflags-y := -ansi
5698 -ccflags-y := -Werror
5699 +#ccflags-y := -Werror
5700
5701 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5702 obj-y += fault_$(BITS).o
5703 diff -urNp linux-2.6.32.41/arch/sparc/mm/srmmu.c linux-2.6.32.41/arch/sparc/mm/srmmu.c
5704 --- linux-2.6.32.41/arch/sparc/mm/srmmu.c 2011-03-27 14:31:47.000000000 -0400
5705 +++ linux-2.6.32.41/arch/sparc/mm/srmmu.c 2011-04-17 15:56:46.000000000 -0400
5706 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5707 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5708 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5709 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5710 +
5711 +#ifdef CONFIG_PAX_PAGEEXEC
5712 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5713 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5714 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5715 +#endif
5716 +
5717 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5718 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5719
5720 diff -urNp linux-2.6.32.41/arch/um/include/asm/kmap_types.h linux-2.6.32.41/arch/um/include/asm/kmap_types.h
5721 --- linux-2.6.32.41/arch/um/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
5722 +++ linux-2.6.32.41/arch/um/include/asm/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
5723 @@ -23,6 +23,7 @@ enum km_type {
5724 KM_IRQ1,
5725 KM_SOFTIRQ0,
5726 KM_SOFTIRQ1,
5727 + KM_CLEARPAGE,
5728 KM_TYPE_NR
5729 };
5730
5731 diff -urNp linux-2.6.32.41/arch/um/include/asm/page.h linux-2.6.32.41/arch/um/include/asm/page.h
5732 --- linux-2.6.32.41/arch/um/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
5733 +++ linux-2.6.32.41/arch/um/include/asm/page.h 2011-04-17 15:56:46.000000000 -0400
5734 @@ -14,6 +14,9 @@
5735 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5736 #define PAGE_MASK (~(PAGE_SIZE-1))
5737
5738 +#define ktla_ktva(addr) (addr)
5739 +#define ktva_ktla(addr) (addr)
5740 +
5741 #ifndef __ASSEMBLY__
5742
5743 struct page;
5744 diff -urNp linux-2.6.32.41/arch/um/kernel/process.c linux-2.6.32.41/arch/um/kernel/process.c
5745 --- linux-2.6.32.41/arch/um/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
5746 +++ linux-2.6.32.41/arch/um/kernel/process.c 2011-04-17 15:56:46.000000000 -0400
5747 @@ -393,22 +393,6 @@ int singlestepping(void * t)
5748 return 2;
5749 }
5750
5751 -/*
5752 - * Only x86 and x86_64 have an arch_align_stack().
5753 - * All other arches have "#define arch_align_stack(x) (x)"
5754 - * in their asm/system.h
5755 - * As this is included in UML from asm-um/system-generic.h,
5756 - * we can use it to behave as the subarch does.
5757 - */
5758 -#ifndef arch_align_stack
5759 -unsigned long arch_align_stack(unsigned long sp)
5760 -{
5761 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5762 - sp -= get_random_int() % 8192;
5763 - return sp & ~0xf;
5764 -}
5765 -#endif
5766 -
5767 unsigned long get_wchan(struct task_struct *p)
5768 {
5769 unsigned long stack_page, sp, ip;
5770 diff -urNp linux-2.6.32.41/arch/um/sys-i386/syscalls.c linux-2.6.32.41/arch/um/sys-i386/syscalls.c
5771 --- linux-2.6.32.41/arch/um/sys-i386/syscalls.c 2011-03-27 14:31:47.000000000 -0400
5772 +++ linux-2.6.32.41/arch/um/sys-i386/syscalls.c 2011-04-17 15:56:46.000000000 -0400
5773 @@ -11,6 +11,21 @@
5774 #include "asm/uaccess.h"
5775 #include "asm/unistd.h"
5776
5777 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5778 +{
5779 + unsigned long pax_task_size = TASK_SIZE;
5780 +
5781 +#ifdef CONFIG_PAX_SEGMEXEC
5782 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5783 + pax_task_size = SEGMEXEC_TASK_SIZE;
5784 +#endif
5785 +
5786 + if (len > pax_task_size || addr > pax_task_size - len)
5787 + return -EINVAL;
5788 +
5789 + return 0;
5790 +}
5791 +
5792 /*
5793 * Perform the select(nd, in, out, ex, tv) and mmap() system
5794 * calls. Linux/i386 didn't use to be able to handle more than
5795 diff -urNp linux-2.6.32.41/arch/x86/boot/bitops.h linux-2.6.32.41/arch/x86/boot/bitops.h
5796 --- linux-2.6.32.41/arch/x86/boot/bitops.h 2011-03-27 14:31:47.000000000 -0400
5797 +++ linux-2.6.32.41/arch/x86/boot/bitops.h 2011-04-17 15:56:46.000000000 -0400
5798 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int
5799 u8 v;
5800 const u32 *p = (const u32 *)addr;
5801
5802 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5803 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5804 return v;
5805 }
5806
5807 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int
5808
5809 static inline void set_bit(int nr, void *addr)
5810 {
5811 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5812 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5813 }
5814
5815 #endif /* BOOT_BITOPS_H */
5816 diff -urNp linux-2.6.32.41/arch/x86/boot/boot.h linux-2.6.32.41/arch/x86/boot/boot.h
5817 --- linux-2.6.32.41/arch/x86/boot/boot.h 2011-03-27 14:31:47.000000000 -0400
5818 +++ linux-2.6.32.41/arch/x86/boot/boot.h 2011-04-17 15:56:46.000000000 -0400
5819 @@ -82,7 +82,7 @@ static inline void io_delay(void)
5820 static inline u16 ds(void)
5821 {
5822 u16 seg;
5823 - asm("movw %%ds,%0" : "=rm" (seg));
5824 + asm volatile("movw %%ds,%0" : "=rm" (seg));
5825 return seg;
5826 }
5827
5828 @@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t
5829 static inline int memcmp(const void *s1, const void *s2, size_t len)
5830 {
5831 u8 diff;
5832 - asm("repe; cmpsb; setnz %0"
5833 + asm volatile("repe; cmpsb; setnz %0"
5834 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5835 return diff;
5836 }
5837 diff -urNp linux-2.6.32.41/arch/x86/boot/compressed/head_32.S linux-2.6.32.41/arch/x86/boot/compressed/head_32.S
5838 --- linux-2.6.32.41/arch/x86/boot/compressed/head_32.S 2011-03-27 14:31:47.000000000 -0400
5839 +++ linux-2.6.32.41/arch/x86/boot/compressed/head_32.S 2011-04-17 15:56:46.000000000 -0400
5840 @@ -76,7 +76,7 @@ ENTRY(startup_32)
5841 notl %eax
5842 andl %eax, %ebx
5843 #else
5844 - movl $LOAD_PHYSICAL_ADDR, %ebx
5845 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5846 #endif
5847
5848 /* Target address to relocate to for decompression */
5849 @@ -149,7 +149,7 @@ relocated:
5850 * and where it was actually loaded.
5851 */
5852 movl %ebp, %ebx
5853 - subl $LOAD_PHYSICAL_ADDR, %ebx
5854 + subl $____LOAD_PHYSICAL_ADDR, %ebx
5855 jz 2f /* Nothing to be done if loaded at compiled addr. */
5856 /*
5857 * Process relocations.
5858 @@ -157,8 +157,7 @@ relocated:
5859
5860 1: subl $4, %edi
5861 movl (%edi), %ecx
5862 - testl %ecx, %ecx
5863 - jz 2f
5864 + jecxz 2f
5865 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5866 jmp 1b
5867 2:
5868 diff -urNp linux-2.6.32.41/arch/x86/boot/compressed/head_64.S linux-2.6.32.41/arch/x86/boot/compressed/head_64.S
5869 --- linux-2.6.32.41/arch/x86/boot/compressed/head_64.S 2011-03-27 14:31:47.000000000 -0400
5870 +++ linux-2.6.32.41/arch/x86/boot/compressed/head_64.S 2011-04-17 15:56:46.000000000 -0400
5871 @@ -91,7 +91,7 @@ ENTRY(startup_32)
5872 notl %eax
5873 andl %eax, %ebx
5874 #else
5875 - movl $LOAD_PHYSICAL_ADDR, %ebx
5876 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5877 #endif
5878
5879 /* Target address to relocate to for decompression */
5880 @@ -234,7 +234,7 @@ ENTRY(startup_64)
5881 notq %rax
5882 andq %rax, %rbp
5883 #else
5884 - movq $LOAD_PHYSICAL_ADDR, %rbp
5885 + movq $____LOAD_PHYSICAL_ADDR, %rbp
5886 #endif
5887
5888 /* Target address to relocate to for decompression */
5889 diff -urNp linux-2.6.32.41/arch/x86/boot/compressed/misc.c linux-2.6.32.41/arch/x86/boot/compressed/misc.c
5890 --- linux-2.6.32.41/arch/x86/boot/compressed/misc.c 2011-03-27 14:31:47.000000000 -0400
5891 +++ linux-2.6.32.41/arch/x86/boot/compressed/misc.c 2011-04-17 15:56:46.000000000 -0400
5892 @@ -288,7 +288,7 @@ static void parse_elf(void *output)
5893 case PT_LOAD:
5894 #ifdef CONFIG_RELOCATABLE
5895 dest = output;
5896 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5897 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5898 #else
5899 dest = (void *)(phdr->p_paddr);
5900 #endif
5901 @@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *
5902 error("Destination address too large");
5903 #endif
5904 #ifndef CONFIG_RELOCATABLE
5905 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5906 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5907 error("Wrong destination address");
5908 #endif
5909
5910 diff -urNp linux-2.6.32.41/arch/x86/boot/compressed/mkpiggy.c linux-2.6.32.41/arch/x86/boot/compressed/mkpiggy.c
5911 --- linux-2.6.32.41/arch/x86/boot/compressed/mkpiggy.c 2011-03-27 14:31:47.000000000 -0400
5912 +++ linux-2.6.32.41/arch/x86/boot/compressed/mkpiggy.c 2011-04-17 15:56:46.000000000 -0400
5913 @@ -74,7 +74,7 @@ int main(int argc, char *argv[])
5914
5915 offs = (olen > ilen) ? olen - ilen : 0;
5916 offs += olen >> 12; /* Add 8 bytes for each 32K block */
5917 - offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
5918 + offs += 64*1024; /* Add 64K bytes slack */
5919 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
5920
5921 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
5922 diff -urNp linux-2.6.32.41/arch/x86/boot/compressed/relocs.c linux-2.6.32.41/arch/x86/boot/compressed/relocs.c
5923 --- linux-2.6.32.41/arch/x86/boot/compressed/relocs.c 2011-03-27 14:31:47.000000000 -0400
5924 +++ linux-2.6.32.41/arch/x86/boot/compressed/relocs.c 2011-04-17 15:56:46.000000000 -0400
5925 @@ -10,8 +10,11 @@
5926 #define USE_BSD
5927 #include <endian.h>
5928
5929 +#include "../../../../include/linux/autoconf.h"
5930 +
5931 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5932 static Elf32_Ehdr ehdr;
5933 +static Elf32_Phdr *phdr;
5934 static unsigned long reloc_count, reloc_idx;
5935 static unsigned long *relocs;
5936
5937 @@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
5938
5939 static int is_safe_abs_reloc(const char* sym_name)
5940 {
5941 - int i;
5942 + unsigned int i;
5943
5944 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
5945 if (!strcmp(sym_name, safe_abs_relocs[i]))
5946 @@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
5947 }
5948 }
5949
5950 +static void read_phdrs(FILE *fp)
5951 +{
5952 + unsigned int i;
5953 +
5954 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5955 + if (!phdr) {
5956 + die("Unable to allocate %d program headers\n",
5957 + ehdr.e_phnum);
5958 + }
5959 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5960 + die("Seek to %d failed: %s\n",
5961 + ehdr.e_phoff, strerror(errno));
5962 + }
5963 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5964 + die("Cannot read ELF program headers: %s\n",
5965 + strerror(errno));
5966 + }
5967 + for(i = 0; i < ehdr.e_phnum; i++) {
5968 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5969 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5970 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5971 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5972 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5973 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5974 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5975 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5976 + }
5977 +
5978 +}
5979 +
5980 static void read_shdrs(FILE *fp)
5981 {
5982 - int i;
5983 + unsigned int i;
5984 Elf32_Shdr shdr;
5985
5986 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5987 @@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
5988
5989 static void read_strtabs(FILE *fp)
5990 {
5991 - int i;
5992 + unsigned int i;
5993 for (i = 0; i < ehdr.e_shnum; i++) {
5994 struct section *sec = &secs[i];
5995 if (sec->shdr.sh_type != SHT_STRTAB) {
5996 @@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
5997
5998 static void read_symtabs(FILE *fp)
5999 {
6000 - int i,j;
6001 + unsigned int i,j;
6002 for (i = 0; i < ehdr.e_shnum; i++) {
6003 struct section *sec = &secs[i];
6004 if (sec->shdr.sh_type != SHT_SYMTAB) {
6005 @@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
6006
6007 static void read_relocs(FILE *fp)
6008 {
6009 - int i,j;
6010 + unsigned int i,j;
6011 + uint32_t base;
6012 +
6013 for (i = 0; i < ehdr.e_shnum; i++) {
6014 struct section *sec = &secs[i];
6015 if (sec->shdr.sh_type != SHT_REL) {
6016 @@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
6017 die("Cannot read symbol table: %s\n",
6018 strerror(errno));
6019 }
6020 + base = 0;
6021 + for (j = 0; j < ehdr.e_phnum; j++) {
6022 + if (phdr[j].p_type != PT_LOAD )
6023 + continue;
6024 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
6025 + continue;
6026 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
6027 + break;
6028 + }
6029 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
6030 Elf32_Rel *rel = &sec->reltab[j];
6031 - rel->r_offset = elf32_to_cpu(rel->r_offset);
6032 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
6033 rel->r_info = elf32_to_cpu(rel->r_info);
6034 }
6035 }
6036 @@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
6037
6038 static void print_absolute_symbols(void)
6039 {
6040 - int i;
6041 + unsigned int i;
6042 printf("Absolute symbols\n");
6043 printf(" Num: Value Size Type Bind Visibility Name\n");
6044 for (i = 0; i < ehdr.e_shnum; i++) {
6045 struct section *sec = &secs[i];
6046 char *sym_strtab;
6047 Elf32_Sym *sh_symtab;
6048 - int j;
6049 + unsigned int j;
6050
6051 if (sec->shdr.sh_type != SHT_SYMTAB) {
6052 continue;
6053 @@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
6054
6055 static void print_absolute_relocs(void)
6056 {
6057 - int i, printed = 0;
6058 + unsigned int i, printed = 0;
6059
6060 for (i = 0; i < ehdr.e_shnum; i++) {
6061 struct section *sec = &secs[i];
6062 struct section *sec_applies, *sec_symtab;
6063 char *sym_strtab;
6064 Elf32_Sym *sh_symtab;
6065 - int j;
6066 + unsigned int j;
6067 if (sec->shdr.sh_type != SHT_REL) {
6068 continue;
6069 }
6070 @@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
6071
6072 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6073 {
6074 - int i;
6075 + unsigned int i;
6076 /* Walk through the relocations */
6077 for (i = 0; i < ehdr.e_shnum; i++) {
6078 char *sym_strtab;
6079 Elf32_Sym *sh_symtab;
6080 struct section *sec_applies, *sec_symtab;
6081 - int j;
6082 + unsigned int j;
6083 struct section *sec = &secs[i];
6084
6085 if (sec->shdr.sh_type != SHT_REL) {
6086 @@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(El
6087 if (sym->st_shndx == SHN_ABS) {
6088 continue;
6089 }
6090 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6091 + if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6092 + continue;
6093 +
6094 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6095 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6096 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6097 + continue;
6098 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6099 + continue;
6100 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6101 + continue;
6102 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6103 + continue;
6104 +#endif
6105 if (r_type == R_386_NONE || r_type == R_386_PC32) {
6106 /*
6107 * NONE can be ignored and and PC relative
6108 @@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, co
6109
6110 static void emit_relocs(int as_text)
6111 {
6112 - int i;
6113 + unsigned int i;
6114 /* Count how many relocations I have and allocate space for them. */
6115 reloc_count = 0;
6116 walk_relocs(count_reloc);
6117 @@ -634,6 +693,7 @@ int main(int argc, char **argv)
6118 fname, strerror(errno));
6119 }
6120 read_ehdr(fp);
6121 + read_phdrs(fp);
6122 read_shdrs(fp);
6123 read_strtabs(fp);
6124 read_symtabs(fp);
6125 diff -urNp linux-2.6.32.41/arch/x86/boot/cpucheck.c linux-2.6.32.41/arch/x86/boot/cpucheck.c
6126 --- linux-2.6.32.41/arch/x86/boot/cpucheck.c 2011-03-27 14:31:47.000000000 -0400
6127 +++ linux-2.6.32.41/arch/x86/boot/cpucheck.c 2011-04-17 15:56:46.000000000 -0400
6128 @@ -74,7 +74,7 @@ static int has_fpu(void)
6129 u16 fcw = -1, fsw = -1;
6130 u32 cr0;
6131
6132 - asm("movl %%cr0,%0" : "=r" (cr0));
6133 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
6134 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6135 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6136 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6137 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6138 {
6139 u32 f0, f1;
6140
6141 - asm("pushfl ; "
6142 + asm volatile("pushfl ; "
6143 "pushfl ; "
6144 "popl %0 ; "
6145 "movl %0,%1 ; "
6146 @@ -115,7 +115,7 @@ static void get_flags(void)
6147 set_bit(X86_FEATURE_FPU, cpu.flags);
6148
6149 if (has_eflag(X86_EFLAGS_ID)) {
6150 - asm("cpuid"
6151 + asm volatile("cpuid"
6152 : "=a" (max_intel_level),
6153 "=b" (cpu_vendor[0]),
6154 "=d" (cpu_vendor[1]),
6155 @@ -124,7 +124,7 @@ static void get_flags(void)
6156
6157 if (max_intel_level >= 0x00000001 &&
6158 max_intel_level <= 0x0000ffff) {
6159 - asm("cpuid"
6160 + asm volatile("cpuid"
6161 : "=a" (tfms),
6162 "=c" (cpu.flags[4]),
6163 "=d" (cpu.flags[0])
6164 @@ -136,7 +136,7 @@ static void get_flags(void)
6165 cpu.model += ((tfms >> 16) & 0xf) << 4;
6166 }
6167
6168 - asm("cpuid"
6169 + asm volatile("cpuid"
6170 : "=a" (max_amd_level)
6171 : "a" (0x80000000)
6172 : "ebx", "ecx", "edx");
6173 @@ -144,7 +144,7 @@ static void get_flags(void)
6174 if (max_amd_level >= 0x80000001 &&
6175 max_amd_level <= 0x8000ffff) {
6176 u32 eax = 0x80000001;
6177 - asm("cpuid"
6178 + asm volatile("cpuid"
6179 : "+a" (eax),
6180 "=c" (cpu.flags[6]),
6181 "=d" (cpu.flags[1])
6182 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6183 u32 ecx = MSR_K7_HWCR;
6184 u32 eax, edx;
6185
6186 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6187 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6188 eax &= ~(1 << 15);
6189 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6190 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6191
6192 get_flags(); /* Make sure it really did something */
6193 err = check_flags();
6194 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6195 u32 ecx = MSR_VIA_FCR;
6196 u32 eax, edx;
6197
6198 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6199 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6200 eax |= (1<<1)|(1<<7);
6201 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6202 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6203
6204 set_bit(X86_FEATURE_CX8, cpu.flags);
6205 err = check_flags();
6206 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
6207 u32 eax, edx;
6208 u32 level = 1;
6209
6210 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6211 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6212 - asm("cpuid"
6213 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6214 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6215 + asm volatile("cpuid"
6216 : "+a" (level), "=d" (cpu.flags[0])
6217 : : "ecx", "ebx");
6218 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6219 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6220
6221 err = check_flags();
6222 }
6223 diff -urNp linux-2.6.32.41/arch/x86/boot/header.S linux-2.6.32.41/arch/x86/boot/header.S
6224 --- linux-2.6.32.41/arch/x86/boot/header.S 2011-03-27 14:31:47.000000000 -0400
6225 +++ linux-2.6.32.41/arch/x86/boot/header.S 2011-04-17 15:56:46.000000000 -0400
6226 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
6227 # single linked list of
6228 # struct setup_data
6229
6230 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6231 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6232
6233 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6234 #define VO_INIT_SIZE (VO__end - VO__text)
6235 diff -urNp linux-2.6.32.41/arch/x86/boot/memory.c linux-2.6.32.41/arch/x86/boot/memory.c
6236 --- linux-2.6.32.41/arch/x86/boot/memory.c 2011-03-27 14:31:47.000000000 -0400
6237 +++ linux-2.6.32.41/arch/x86/boot/memory.c 2011-04-17 15:56:46.000000000 -0400
6238 @@ -19,7 +19,7 @@
6239
6240 static int detect_memory_e820(void)
6241 {
6242 - int count = 0;
6243 + unsigned int count = 0;
6244 struct biosregs ireg, oreg;
6245 struct e820entry *desc = boot_params.e820_map;
6246 static struct e820entry buf; /* static so it is zeroed */
6247 diff -urNp linux-2.6.32.41/arch/x86/boot/video.c linux-2.6.32.41/arch/x86/boot/video.c
6248 --- linux-2.6.32.41/arch/x86/boot/video.c 2011-03-27 14:31:47.000000000 -0400
6249 +++ linux-2.6.32.41/arch/x86/boot/video.c 2011-04-17 15:56:46.000000000 -0400
6250 @@ -90,7 +90,7 @@ static void store_mode_params(void)
6251 static unsigned int get_entry(void)
6252 {
6253 char entry_buf[4];
6254 - int i, len = 0;
6255 + unsigned int i, len = 0;
6256 int key;
6257 unsigned int v;
6258
6259 diff -urNp linux-2.6.32.41/arch/x86/boot/video-vesa.c linux-2.6.32.41/arch/x86/boot/video-vesa.c
6260 --- linux-2.6.32.41/arch/x86/boot/video-vesa.c 2011-03-27 14:31:47.000000000 -0400
6261 +++ linux-2.6.32.41/arch/x86/boot/video-vesa.c 2011-04-17 15:56:46.000000000 -0400
6262 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6263
6264 boot_params.screen_info.vesapm_seg = oreg.es;
6265 boot_params.screen_info.vesapm_off = oreg.di;
6266 + boot_params.screen_info.vesapm_size = oreg.cx;
6267 }
6268
6269 /*
6270 diff -urNp linux-2.6.32.41/arch/x86/ia32/ia32_aout.c linux-2.6.32.41/arch/x86/ia32/ia32_aout.c
6271 --- linux-2.6.32.41/arch/x86/ia32/ia32_aout.c 2011-03-27 14:31:47.000000000 -0400
6272 +++ linux-2.6.32.41/arch/x86/ia32/ia32_aout.c 2011-04-17 15:56:46.000000000 -0400
6273 @@ -169,6 +169,8 @@ static int aout_core_dump(long signr, st
6274 unsigned long dump_start, dump_size;
6275 struct user32 dump;
6276
6277 + memset(&dump, 0, sizeof(dump));
6278 +
6279 fs = get_fs();
6280 set_fs(KERNEL_DS);
6281 has_dumped = 1;
6282 @@ -218,12 +220,6 @@ static int aout_core_dump(long signr, st
6283 dump_size = dump.u_ssize << PAGE_SHIFT;
6284 DUMP_WRITE(dump_start, dump_size);
6285 }
6286 - /*
6287 - * Finally dump the task struct. Not be used by gdb, but
6288 - * could be useful
6289 - */
6290 - set_fs(KERNEL_DS);
6291 - DUMP_WRITE(current, sizeof(*current));
6292 end_coredump:
6293 set_fs(fs);
6294 return has_dumped;
6295 diff -urNp linux-2.6.32.41/arch/x86/ia32/ia32entry.S linux-2.6.32.41/arch/x86/ia32/ia32entry.S
6296 --- linux-2.6.32.41/arch/x86/ia32/ia32entry.S 2011-03-27 14:31:47.000000000 -0400
6297 +++ linux-2.6.32.41/arch/x86/ia32/ia32entry.S 2011-06-04 20:29:52.000000000 -0400
6298 @@ -13,6 +13,7 @@
6299 #include <asm/thread_info.h>
6300 #include <asm/segment.h>
6301 #include <asm/irqflags.h>
6302 +#include <asm/pgtable.h>
6303 #include <linux/linkage.h>
6304
6305 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6306 @@ -93,6 +94,30 @@ ENTRY(native_irq_enable_sysexit)
6307 ENDPROC(native_irq_enable_sysexit)
6308 #endif
6309
6310 + .macro pax_enter_kernel_user
6311 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6312 + call pax_enter_kernel_user
6313 +#endif
6314 + .endm
6315 +
6316 + .macro pax_exit_kernel_user
6317 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6318 + call pax_exit_kernel_user
6319 +#endif
6320 +#ifdef CONFIG_PAX_RANDKSTACK
6321 + pushq %rax
6322 + call pax_randomize_kstack
6323 + popq %rax
6324 +#endif
6325 + pax_erase_kstack
6326 + .endm
6327 +
6328 +.macro pax_erase_kstack
6329 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6330 + call pax_erase_kstack
6331 +#endif
6332 +.endm
6333 +
6334 /*
6335 * 32bit SYSENTER instruction entry.
6336 *
6337 @@ -119,7 +144,7 @@ ENTRY(ia32_sysenter_target)
6338 CFI_REGISTER rsp,rbp
6339 SWAPGS_UNSAFE_STACK
6340 movq PER_CPU_VAR(kernel_stack), %rsp
6341 - addq $(KERNEL_STACK_OFFSET),%rsp
6342 + pax_enter_kernel_user
6343 /*
6344 * No need to follow this irqs on/off section: the syscall
6345 * disabled irqs, here we enable it straight after entry:
6346 @@ -135,7 +160,8 @@ ENTRY(ia32_sysenter_target)
6347 pushfq
6348 CFI_ADJUST_CFA_OFFSET 8
6349 /*CFI_REL_OFFSET rflags,0*/
6350 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6351 + GET_THREAD_INFO(%r10)
6352 + movl TI_sysenter_return(%r10), %r10d
6353 CFI_REGISTER rip,r10
6354 pushq $__USER32_CS
6355 CFI_ADJUST_CFA_OFFSET 8
6356 @@ -150,6 +176,12 @@ ENTRY(ia32_sysenter_target)
6357 SAVE_ARGS 0,0,1
6358 /* no need to do an access_ok check here because rbp has been
6359 32bit zero extended */
6360 +
6361 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6362 + mov $PAX_USER_SHADOW_BASE,%r10
6363 + add %r10,%rbp
6364 +#endif
6365 +
6366 1: movl (%rbp),%ebp
6367 .section __ex_table,"a"
6368 .quad 1b,ia32_badarg
6369 @@ -172,6 +204,7 @@ sysenter_dispatch:
6370 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6371 jnz sysexit_audit
6372 sysexit_from_sys_call:
6373 + pax_exit_kernel_user
6374 andl $~TS_COMPAT,TI_status(%r10)
6375 /* clear IF, that popfq doesn't enable interrupts early */
6376 andl $~0x200,EFLAGS-R11(%rsp)
6377 @@ -200,6 +233,9 @@ sysexit_from_sys_call:
6378 movl %eax,%esi /* 2nd arg: syscall number */
6379 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6380 call audit_syscall_entry
6381 +
6382 + pax_erase_kstack
6383 +
6384 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6385 cmpq $(IA32_NR_syscalls-1),%rax
6386 ja ia32_badsys
6387 @@ -252,6 +288,9 @@ sysenter_tracesys:
6388 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
6389 movq %rsp,%rdi /* &pt_regs -> arg1 */
6390 call syscall_trace_enter
6391 +
6392 + pax_erase_kstack
6393 +
6394 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6395 RESTORE_REST
6396 cmpq $(IA32_NR_syscalls-1),%rax
6397 @@ -283,19 +322,24 @@ ENDPROC(ia32_sysenter_target)
6398 ENTRY(ia32_cstar_target)
6399 CFI_STARTPROC32 simple
6400 CFI_SIGNAL_FRAME
6401 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
6402 + CFI_DEF_CFA rsp,0
6403 CFI_REGISTER rip,rcx
6404 /*CFI_REGISTER rflags,r11*/
6405 SWAPGS_UNSAFE_STACK
6406 movl %esp,%r8d
6407 CFI_REGISTER rsp,r8
6408 movq PER_CPU_VAR(kernel_stack),%rsp
6409 +
6410 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6411 + pax_enter_kernel_user
6412 +#endif
6413 +
6414 /*
6415 * No need to follow this irqs on/off section: the syscall
6416 * disabled irqs and here we enable it straight after entry:
6417 */
6418 ENABLE_INTERRUPTS(CLBR_NONE)
6419 - SAVE_ARGS 8,1,1
6420 + SAVE_ARGS 8*6,1,1
6421 movl %eax,%eax /* zero extension */
6422 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
6423 movq %rcx,RIP-ARGOFFSET(%rsp)
6424 @@ -311,6 +355,12 @@ ENTRY(ia32_cstar_target)
6425 /* no need to do an access_ok check here because r8 has been
6426 32bit zero extended */
6427 /* hardware stack frame is complete now */
6428 +
6429 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6430 + mov $PAX_USER_SHADOW_BASE,%r10
6431 + add %r10,%r8
6432 +#endif
6433 +
6434 1: movl (%r8),%r9d
6435 .section __ex_table,"a"
6436 .quad 1b,ia32_badarg
6437 @@ -333,6 +383,7 @@ cstar_dispatch:
6438 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6439 jnz sysretl_audit
6440 sysretl_from_sys_call:
6441 + pax_exit_kernel_user
6442 andl $~TS_COMPAT,TI_status(%r10)
6443 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
6444 movl RIP-ARGOFFSET(%rsp),%ecx
6445 @@ -370,6 +421,9 @@ cstar_tracesys:
6446 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6447 movq %rsp,%rdi /* &pt_regs -> arg1 */
6448 call syscall_trace_enter
6449 +
6450 + pax_erase_kstack
6451 +
6452 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6453 RESTORE_REST
6454 xchgl %ebp,%r9d
6455 @@ -415,6 +469,7 @@ ENTRY(ia32_syscall)
6456 CFI_REL_OFFSET rip,RIP-RIP
6457 PARAVIRT_ADJUST_EXCEPTION_FRAME
6458 SWAPGS
6459 + pax_enter_kernel_user
6460 /*
6461 * No need to follow this irqs on/off section: the syscall
6462 * disabled irqs and here we enable it straight after entry:
6463 @@ -448,6 +503,9 @@ ia32_tracesys:
6464 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6465 movq %rsp,%rdi /* &pt_regs -> arg1 */
6466 call syscall_trace_enter
6467 +
6468 + pax_erase_kstack
6469 +
6470 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6471 RESTORE_REST
6472 cmpq $(IA32_NR_syscalls-1),%rax
6473 diff -urNp linux-2.6.32.41/arch/x86/ia32/ia32_signal.c linux-2.6.32.41/arch/x86/ia32/ia32_signal.c
6474 --- linux-2.6.32.41/arch/x86/ia32/ia32_signal.c 2011-03-27 14:31:47.000000000 -0400
6475 +++ linux-2.6.32.41/arch/x86/ia32/ia32_signal.c 2011-04-17 15:56:46.000000000 -0400
6476 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
6477 sp -= frame_size;
6478 /* Align the stack pointer according to the i386 ABI,
6479 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6480 - sp = ((sp + 4) & -16ul) - 4;
6481 + sp = ((sp - 12) & -16ul) - 4;
6482 return (void __user *) sp;
6483 }
6484
6485 @@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
6486 * These are actually not used anymore, but left because some
6487 * gdb versions depend on them as a marker.
6488 */
6489 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6490 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6491 } put_user_catch(err);
6492
6493 if (err)
6494 @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
6495 0xb8,
6496 __NR_ia32_rt_sigreturn,
6497 0x80cd,
6498 - 0,
6499 + 0
6500 };
6501
6502 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6503 @@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
6504
6505 if (ka->sa.sa_flags & SA_RESTORER)
6506 restorer = ka->sa.sa_restorer;
6507 + else if (current->mm->context.vdso)
6508 + /* Return stub is in 32bit vsyscall page */
6509 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6510 else
6511 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6512 - rt_sigreturn);
6513 + restorer = &frame->retcode;
6514 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6515
6516 /*
6517 * Not actually used anymore, but left because some gdb
6518 * versions need it.
6519 */
6520 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6521 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6522 } put_user_catch(err);
6523
6524 if (err)
6525 diff -urNp linux-2.6.32.41/arch/x86/include/asm/alternative.h linux-2.6.32.41/arch/x86/include/asm/alternative.h
6526 --- linux-2.6.32.41/arch/x86/include/asm/alternative.h 2011-03-27 14:31:47.000000000 -0400
6527 +++ linux-2.6.32.41/arch/x86/include/asm/alternative.h 2011-04-17 15:56:46.000000000 -0400
6528 @@ -85,7 +85,7 @@ static inline void alternatives_smp_swit
6529 " .byte 662b-661b\n" /* sourcelen */ \
6530 " .byte 664f-663f\n" /* replacementlen */ \
6531 ".previous\n" \
6532 - ".section .altinstr_replacement, \"ax\"\n" \
6533 + ".section .altinstr_replacement, \"a\"\n" \
6534 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
6535 ".previous"
6536
6537 diff -urNp linux-2.6.32.41/arch/x86/include/asm/apm.h linux-2.6.32.41/arch/x86/include/asm/apm.h
6538 --- linux-2.6.32.41/arch/x86/include/asm/apm.h 2011-03-27 14:31:47.000000000 -0400
6539 +++ linux-2.6.32.41/arch/x86/include/asm/apm.h 2011-04-17 15:56:46.000000000 -0400
6540 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
6541 __asm__ __volatile__(APM_DO_ZERO_SEGS
6542 "pushl %%edi\n\t"
6543 "pushl %%ebp\n\t"
6544 - "lcall *%%cs:apm_bios_entry\n\t"
6545 + "lcall *%%ss:apm_bios_entry\n\t"
6546 "setc %%al\n\t"
6547 "popl %%ebp\n\t"
6548 "popl %%edi\n\t"
6549 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
6550 __asm__ __volatile__(APM_DO_ZERO_SEGS
6551 "pushl %%edi\n\t"
6552 "pushl %%ebp\n\t"
6553 - "lcall *%%cs:apm_bios_entry\n\t"
6554 + "lcall *%%ss:apm_bios_entry\n\t"
6555 "setc %%bl\n\t"
6556 "popl %%ebp\n\t"
6557 "popl %%edi\n\t"
6558 diff -urNp linux-2.6.32.41/arch/x86/include/asm/atomic_32.h linux-2.6.32.41/arch/x86/include/asm/atomic_32.h
6559 --- linux-2.6.32.41/arch/x86/include/asm/atomic_32.h 2011-03-27 14:31:47.000000000 -0400
6560 +++ linux-2.6.32.41/arch/x86/include/asm/atomic_32.h 2011-05-04 17:56:20.000000000 -0400
6561 @@ -25,6 +25,17 @@ static inline int atomic_read(const atom
6562 }
6563
6564 /**
6565 + * atomic_read_unchecked - read atomic variable
6566 + * @v: pointer of type atomic_unchecked_t
6567 + *
6568 + * Atomically reads the value of @v.
6569 + */
6570 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6571 +{
6572 + return v->counter;
6573 +}
6574 +
6575 +/**
6576 * atomic_set - set atomic variable
6577 * @v: pointer of type atomic_t
6578 * @i: required value
6579 @@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *
6580 }
6581
6582 /**
6583 + * atomic_set_unchecked - set atomic variable
6584 + * @v: pointer of type atomic_unchecked_t
6585 + * @i: required value
6586 + *
6587 + * Atomically sets the value of @v to @i.
6588 + */
6589 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6590 +{
6591 + v->counter = i;
6592 +}
6593 +
6594 +/**
6595 * atomic_add - add integer to atomic variable
6596 * @i: integer value to add
6597 * @v: pointer of type atomic_t
6598 @@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *
6599 */
6600 static inline void atomic_add(int i, atomic_t *v)
6601 {
6602 - asm volatile(LOCK_PREFIX "addl %1,%0"
6603 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6604 +
6605 +#ifdef CONFIG_PAX_REFCOUNT
6606 + "jno 0f\n"
6607 + LOCK_PREFIX "subl %1,%0\n"
6608 + "int $4\n0:\n"
6609 + _ASM_EXTABLE(0b, 0b)
6610 +#endif
6611 +
6612 + : "+m" (v->counter)
6613 + : "ir" (i));
6614 +}
6615 +
6616 +/**
6617 + * atomic_add_unchecked - add integer to atomic variable
6618 + * @i: integer value to add
6619 + * @v: pointer of type atomic_unchecked_t
6620 + *
6621 + * Atomically adds @i to @v.
6622 + */
6623 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6624 +{
6625 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6626 : "+m" (v->counter)
6627 : "ir" (i));
6628 }
6629 @@ -59,7 +104,29 @@ static inline void atomic_add(int i, ato
6630 */
6631 static inline void atomic_sub(int i, atomic_t *v)
6632 {
6633 - asm volatile(LOCK_PREFIX "subl %1,%0"
6634 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6635 +
6636 +#ifdef CONFIG_PAX_REFCOUNT
6637 + "jno 0f\n"
6638 + LOCK_PREFIX "addl %1,%0\n"
6639 + "int $4\n0:\n"
6640 + _ASM_EXTABLE(0b, 0b)
6641 +#endif
6642 +
6643 + : "+m" (v->counter)
6644 + : "ir" (i));
6645 +}
6646 +
6647 +/**
6648 + * atomic_sub_unchecked - subtract integer from atomic variable
6649 + * @i: integer value to subtract
6650 + * @v: pointer of type atomic_unchecked_t
6651 + *
6652 + * Atomically subtracts @i from @v.
6653 + */
6654 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6655 +{
6656 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6657 : "+m" (v->counter)
6658 : "ir" (i));
6659 }
6660 @@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(in
6661 {
6662 unsigned char c;
6663
6664 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6665 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
6666 +
6667 +#ifdef CONFIG_PAX_REFCOUNT
6668 + "jno 0f\n"
6669 + LOCK_PREFIX "addl %2,%0\n"
6670 + "int $4\n0:\n"
6671 + _ASM_EXTABLE(0b, 0b)
6672 +#endif
6673 +
6674 + "sete %1\n"
6675 : "+m" (v->counter), "=qm" (c)
6676 : "ir" (i) : "memory");
6677 return c;
6678 @@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(in
6679 */
6680 static inline void atomic_inc(atomic_t *v)
6681 {
6682 - asm volatile(LOCK_PREFIX "incl %0"
6683 + asm volatile(LOCK_PREFIX "incl %0\n"
6684 +
6685 +#ifdef CONFIG_PAX_REFCOUNT
6686 + "jno 0f\n"
6687 + LOCK_PREFIX "decl %0\n"
6688 + "int $4\n0:\n"
6689 + _ASM_EXTABLE(0b, 0b)
6690 +#endif
6691 +
6692 + : "+m" (v->counter));
6693 +}
6694 +
6695 +/**
6696 + * atomic_inc_unchecked - increment atomic variable
6697 + * @v: pointer of type atomic_unchecked_t
6698 + *
6699 + * Atomically increments @v by 1.
6700 + */
6701 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6702 +{
6703 + asm volatile(LOCK_PREFIX "incl %0\n"
6704 : "+m" (v->counter));
6705 }
6706
6707 @@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *
6708 */
6709 static inline void atomic_dec(atomic_t *v)
6710 {
6711 - asm volatile(LOCK_PREFIX "decl %0"
6712 + asm volatile(LOCK_PREFIX "decl %0\n"
6713 +
6714 +#ifdef CONFIG_PAX_REFCOUNT
6715 + "jno 0f\n"
6716 + LOCK_PREFIX "incl %0\n"
6717 + "int $4\n0:\n"
6718 + _ASM_EXTABLE(0b, 0b)
6719 +#endif
6720 +
6721 + : "+m" (v->counter));
6722 +}
6723 +
6724 +/**
6725 + * atomic_dec_unchecked - decrement atomic variable
6726 + * @v: pointer of type atomic_unchecked_t
6727 + *
6728 + * Atomically decrements @v by 1.
6729 + */
6730 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6731 +{
6732 + asm volatile(LOCK_PREFIX "decl %0\n"
6733 : "+m" (v->counter));
6734 }
6735
6736 @@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(at
6737 {
6738 unsigned char c;
6739
6740 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
6741 + asm volatile(LOCK_PREFIX "decl %0\n"
6742 +
6743 +#ifdef CONFIG_PAX_REFCOUNT
6744 + "jno 0f\n"
6745 + LOCK_PREFIX "incl %0\n"
6746 + "int $4\n0:\n"
6747 + _ASM_EXTABLE(0b, 0b)
6748 +#endif
6749 +
6750 + "sete %1\n"
6751 : "+m" (v->counter), "=qm" (c)
6752 : : "memory");
6753 return c != 0;
6754 @@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(at
6755 {
6756 unsigned char c;
6757
6758 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
6759 + asm volatile(LOCK_PREFIX "incl %0\n"
6760 +
6761 +#ifdef CONFIG_PAX_REFCOUNT
6762 + "jno 0f\n"
6763 + LOCK_PREFIX "decl %0\n"
6764 + "into\n0:\n"
6765 + _ASM_EXTABLE(0b, 0b)
6766 +#endif
6767 +
6768 + "sete %1\n"
6769 + : "+m" (v->counter), "=qm" (c)
6770 + : : "memory");
6771 + return c != 0;
6772 +}
6773 +
6774 +/**
6775 + * atomic_inc_and_test_unchecked - increment and test
6776 + * @v: pointer of type atomic_unchecked_t
6777 + *
6778 + * Atomically increments @v by 1
6779 + * and returns true if the result is zero, or false for all
6780 + * other cases.
6781 + */
6782 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6783 +{
6784 + unsigned char c;
6785 +
6786 + asm volatile(LOCK_PREFIX "incl %0\n"
6787 + "sete %1\n"
6788 : "+m" (v->counter), "=qm" (c)
6789 : : "memory");
6790 return c != 0;
6791 @@ -156,7 +309,16 @@ static inline int atomic_add_negative(in
6792 {
6793 unsigned char c;
6794
6795 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
6796 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
6797 +
6798 +#ifdef CONFIG_PAX_REFCOUNT
6799 + "jno 0f\n"
6800 + LOCK_PREFIX "subl %2,%0\n"
6801 + "int $4\n0:\n"
6802 + _ASM_EXTABLE(0b, 0b)
6803 +#endif
6804 +
6805 + "sets %1\n"
6806 : "+m" (v->counter), "=qm" (c)
6807 : "ir" (i) : "memory");
6808 return c;
6809 @@ -179,6 +341,46 @@ static inline int atomic_add_return(int
6810 #endif
6811 /* Modern 486+ processor */
6812 __i = i;
6813 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
6814 +
6815 +#ifdef CONFIG_PAX_REFCOUNT
6816 + "jno 0f\n"
6817 + "movl %0, %1\n"
6818 + "int $4\n0:\n"
6819 + _ASM_EXTABLE(0b, 0b)
6820 +#endif
6821 +
6822 + : "+r" (i), "+m" (v->counter)
6823 + : : "memory");
6824 + return i + __i;
6825 +
6826 +#ifdef CONFIG_M386
6827 +no_xadd: /* Legacy 386 processor */
6828 + local_irq_save(flags);
6829 + __i = atomic_read(v);
6830 + atomic_set(v, i + __i);
6831 + local_irq_restore(flags);
6832 + return i + __i;
6833 +#endif
6834 +}
6835 +
6836 +/**
6837 + * atomic_add_return_unchecked - add integer and return
6838 + * @v: pointer of type atomic_unchecked_t
6839 + * @i: integer value to add
6840 + *
6841 + * Atomically adds @i to @v and returns @i + @v
6842 + */
6843 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6844 +{
6845 + int __i;
6846 +#ifdef CONFIG_M386
6847 + unsigned long flags;
6848 + if (unlikely(boot_cpu_data.x86 <= 3))
6849 + goto no_xadd;
6850 +#endif
6851 + /* Modern 486+ processor */
6852 + __i = i;
6853 asm volatile(LOCK_PREFIX "xaddl %0, %1"
6854 : "+r" (i), "+m" (v->counter)
6855 : : "memory");
6856 @@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_
6857 return cmpxchg(&v->counter, old, new);
6858 }
6859
6860 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
6861 +{
6862 + return cmpxchg(&v->counter, old, new);
6863 +}
6864 +
6865 static inline int atomic_xchg(atomic_t *v, int new)
6866 {
6867 return xchg(&v->counter, new);
6868 }
6869
6870 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6871 +{
6872 + return xchg(&v->counter, new);
6873 +}
6874 +
6875 /**
6876 * atomic_add_unless - add unless the number is already a given value
6877 * @v: pointer of type atomic_t
6878 @@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *
6879 */
6880 static inline int atomic_add_unless(atomic_t *v, int a, int u)
6881 {
6882 - int c, old;
6883 + int c, old, new;
6884 c = atomic_read(v);
6885 for (;;) {
6886 - if (unlikely(c == (u)))
6887 + if (unlikely(c == u))
6888 break;
6889 - old = atomic_cmpxchg((v), c, c + (a));
6890 +
6891 + asm volatile("addl %2,%0\n"
6892 +
6893 +#ifdef CONFIG_PAX_REFCOUNT
6894 + "jno 0f\n"
6895 + "subl %2,%0\n"
6896 + "int $4\n0:\n"
6897 + _ASM_EXTABLE(0b, 0b)
6898 +#endif
6899 +
6900 + : "=r" (new)
6901 + : "0" (c), "ir" (a));
6902 +
6903 + old = atomic_cmpxchg(v, c, new);
6904 if (likely(old == c))
6905 break;
6906 c = old;
6907 }
6908 - return c != (u);
6909 + return c != u;
6910 }
6911
6912 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
6913
6914 #define atomic_inc_return(v) (atomic_add_return(1, v))
6915 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6916 +{
6917 + return atomic_add_return_unchecked(1, v);
6918 +}
6919 #define atomic_dec_return(v) (atomic_sub_return(1, v))
6920
6921 /* These are x86-specific, used by some header files */
6922 @@ -266,9 +495,18 @@ typedef struct {
6923 u64 __aligned(8) counter;
6924 } atomic64_t;
6925
6926 +#ifdef CONFIG_PAX_REFCOUNT
6927 +typedef struct {
6928 + u64 __aligned(8) counter;
6929 +} atomic64_unchecked_t;
6930 +#else
6931 +typedef atomic64_t atomic64_unchecked_t;
6932 +#endif
6933 +
6934 #define ATOMIC64_INIT(val) { (val) }
6935
6936 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
6937 +extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
6938
6939 /**
6940 * atomic64_xchg - xchg atomic64 variable
6941 @@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *
6942 * the old value.
6943 */
6944 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
6945 +extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
6946
6947 /**
6948 * atomic64_set - set atomic64 variable
6949 @@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr
6950 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
6951
6952 /**
6953 + * atomic64_unchecked_set - set atomic64 variable
6954 + * @ptr: pointer to type atomic64_unchecked_t
6955 + * @new_val: value to assign
6956 + *
6957 + * Atomically sets the value of @ptr to @new_val.
6958 + */
6959 +extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
6960 +
6961 +/**
6962 * atomic64_read - read atomic64 variable
6963 * @ptr: pointer to type atomic64_t
6964 *
6965 @@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64
6966 return res;
6967 }
6968
6969 -extern u64 atomic64_read(atomic64_t *ptr);
6970 +/**
6971 + * atomic64_read_unchecked - read atomic64 variable
6972 + * @ptr: pointer to type atomic64_unchecked_t
6973 + *
6974 + * Atomically reads the value of @ptr and returns it.
6975 + */
6976 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
6977 +{
6978 + u64 res;
6979 +
6980 + /*
6981 + * Note, we inline this atomic64_unchecked_t primitive because
6982 + * it only clobbers EAX/EDX and leaves the others
6983 + * untouched. We also (somewhat subtly) rely on the
6984 + * fact that cmpxchg8b returns the current 64-bit value
6985 + * of the memory location we are touching:
6986 + */
6987 + asm volatile(
6988 + "mov %%ebx, %%eax\n\t"
6989 + "mov %%ecx, %%edx\n\t"
6990 + LOCK_PREFIX "cmpxchg8b %1\n"
6991 + : "=&A" (res)
6992 + : "m" (*ptr)
6993 + );
6994 +
6995 + return res;
6996 +}
6997
6998 /**
6999 * atomic64_add_return - add and return
7000 @@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta
7001 * Other variants with different arithmetic operators:
7002 */
7003 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
7004 +extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7005 extern u64 atomic64_inc_return(atomic64_t *ptr);
7006 +extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
7007 extern u64 atomic64_dec_return(atomic64_t *ptr);
7008 +extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
7009
7010 /**
7011 * atomic64_add - add integer to atomic64 variable
7012 @@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_
7013 extern void atomic64_add(u64 delta, atomic64_t *ptr);
7014
7015 /**
7016 + * atomic64_add_unchecked - add integer to atomic64 variable
7017 + * @delta: integer value to add
7018 + * @ptr: pointer to type atomic64_unchecked_t
7019 + *
7020 + * Atomically adds @delta to @ptr.
7021 + */
7022 +extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7023 +
7024 +/**
7025 * atomic64_sub - subtract the atomic64 variable
7026 * @delta: integer value to subtract
7027 * @ptr: pointer to type atomic64_t
7028 @@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atom
7029 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
7030
7031 /**
7032 + * atomic64_sub_unchecked - subtract the atomic64 variable
7033 + * @delta: integer value to subtract
7034 + * @ptr: pointer to type atomic64_unchecked_t
7035 + *
7036 + * Atomically subtracts @delta from @ptr.
7037 + */
7038 +extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7039 +
7040 +/**
7041 * atomic64_sub_and_test - subtract value from variable and test result
7042 * @delta: integer value to subtract
7043 * @ptr: pointer to type atomic64_t
7044 @@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 del
7045 extern void atomic64_inc(atomic64_t *ptr);
7046
7047 /**
7048 + * atomic64_inc_unchecked - increment atomic64 variable
7049 + * @ptr: pointer to type atomic64_unchecked_t
7050 + *
7051 + * Atomically increments @ptr by 1.
7052 + */
7053 +extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
7054 +
7055 +/**
7056 * atomic64_dec - decrement atomic64 variable
7057 * @ptr: pointer to type atomic64_t
7058 *
7059 @@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr
7060 extern void atomic64_dec(atomic64_t *ptr);
7061
7062 /**
7063 + * atomic64_dec_unchecked - decrement atomic64 variable
7064 + * @ptr: pointer to type atomic64_unchecked_t
7065 + *
7066 + * Atomically decrements @ptr by 1.
7067 + */
7068 +extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
7069 +
7070 +/**
7071 * atomic64_dec_and_test - decrement and test
7072 * @ptr: pointer to type atomic64_t
7073 *
7074 diff -urNp linux-2.6.32.41/arch/x86/include/asm/atomic_64.h linux-2.6.32.41/arch/x86/include/asm/atomic_64.h
7075 --- linux-2.6.32.41/arch/x86/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
7076 +++ linux-2.6.32.41/arch/x86/include/asm/atomic_64.h 2011-05-04 18:35:31.000000000 -0400
7077 @@ -24,6 +24,17 @@ static inline int atomic_read(const atom
7078 }
7079
7080 /**
7081 + * atomic_read_unchecked - read atomic variable
7082 + * @v: pointer of type atomic_unchecked_t
7083 + *
7084 + * Atomically reads the value of @v.
7085 + */
7086 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7087 +{
7088 + return v->counter;
7089 +}
7090 +
7091 +/**
7092 * atomic_set - set atomic variable
7093 * @v: pointer of type atomic_t
7094 * @i: required value
7095 @@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *
7096 }
7097
7098 /**
7099 + * atomic_set_unchecked - set atomic variable
7100 + * @v: pointer of type atomic_unchecked_t
7101 + * @i: required value
7102 + *
7103 + * Atomically sets the value of @v to @i.
7104 + */
7105 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7106 +{
7107 + v->counter = i;
7108 +}
7109 +
7110 +/**
7111 * atomic_add - add integer to atomic variable
7112 * @i: integer value to add
7113 * @v: pointer of type atomic_t
7114 @@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *
7115 */
7116 static inline void atomic_add(int i, atomic_t *v)
7117 {
7118 - asm volatile(LOCK_PREFIX "addl %1,%0"
7119 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7120 +
7121 +#ifdef CONFIG_PAX_REFCOUNT
7122 + "jno 0f\n"
7123 + LOCK_PREFIX "subl %1,%0\n"
7124 + "int $4\n0:\n"
7125 + _ASM_EXTABLE(0b, 0b)
7126 +#endif
7127 +
7128 + : "=m" (v->counter)
7129 + : "ir" (i), "m" (v->counter));
7130 +}
7131 +
7132 +/**
7133 + * atomic_add_unchecked - add integer to atomic variable
7134 + * @i: integer value to add
7135 + * @v: pointer of type atomic_unchecked_t
7136 + *
7137 + * Atomically adds @i to @v.
7138 + */
7139 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7140 +{
7141 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7142 : "=m" (v->counter)
7143 : "ir" (i), "m" (v->counter));
7144 }
7145 @@ -58,7 +103,29 @@ static inline void atomic_add(int i, ato
7146 */
7147 static inline void atomic_sub(int i, atomic_t *v)
7148 {
7149 - asm volatile(LOCK_PREFIX "subl %1,%0"
7150 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7151 +
7152 +#ifdef CONFIG_PAX_REFCOUNT
7153 + "jno 0f\n"
7154 + LOCK_PREFIX "addl %1,%0\n"
7155 + "int $4\n0:\n"
7156 + _ASM_EXTABLE(0b, 0b)
7157 +#endif
7158 +
7159 + : "=m" (v->counter)
7160 + : "ir" (i), "m" (v->counter));
7161 +}
7162 +
7163 +/**
7164 + * atomic_sub_unchecked - subtract the atomic variable
7165 + * @i: integer value to subtract
7166 + * @v: pointer of type atomic_unchecked_t
7167 + *
7168 + * Atomically subtracts @i from @v.
7169 + */
7170 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7171 +{
7172 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7173 : "=m" (v->counter)
7174 : "ir" (i), "m" (v->counter));
7175 }
7176 @@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(in
7177 {
7178 unsigned char c;
7179
7180 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7181 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
7182 +
7183 +#ifdef CONFIG_PAX_REFCOUNT
7184 + "jno 0f\n"
7185 + LOCK_PREFIX "addl %2,%0\n"
7186 + "int $4\n0:\n"
7187 + _ASM_EXTABLE(0b, 0b)
7188 +#endif
7189 +
7190 + "sete %1\n"
7191 : "=m" (v->counter), "=qm" (c)
7192 : "ir" (i), "m" (v->counter) : "memory");
7193 return c;
7194 @@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(in
7195 */
7196 static inline void atomic_inc(atomic_t *v)
7197 {
7198 - asm volatile(LOCK_PREFIX "incl %0"
7199 + asm volatile(LOCK_PREFIX "incl %0\n"
7200 +
7201 +#ifdef CONFIG_PAX_REFCOUNT
7202 + "jno 0f\n"
7203 + LOCK_PREFIX "decl %0\n"
7204 + "int $4\n0:\n"
7205 + _ASM_EXTABLE(0b, 0b)
7206 +#endif
7207 +
7208 + : "=m" (v->counter)
7209 + : "m" (v->counter));
7210 +}
7211 +
7212 +/**
7213 + * atomic_inc_unchecked - increment atomic variable
7214 + * @v: pointer of type atomic_unchecked_t
7215 + *
7216 + * Atomically increments @v by 1.
7217 + */
7218 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7219 +{
7220 + asm volatile(LOCK_PREFIX "incl %0\n"
7221 : "=m" (v->counter)
7222 : "m" (v->counter));
7223 }
7224 @@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *
7225 */
7226 static inline void atomic_dec(atomic_t *v)
7227 {
7228 - asm volatile(LOCK_PREFIX "decl %0"
7229 + asm volatile(LOCK_PREFIX "decl %0\n"
7230 +
7231 +#ifdef CONFIG_PAX_REFCOUNT
7232 + "jno 0f\n"
7233 + LOCK_PREFIX "incl %0\n"
7234 + "int $4\n0:\n"
7235 + _ASM_EXTABLE(0b, 0b)
7236 +#endif
7237 +
7238 + : "=m" (v->counter)
7239 + : "m" (v->counter));
7240 +}
7241 +
7242 +/**
7243 + * atomic_dec_unchecked - decrement atomic variable
7244 + * @v: pointer of type atomic_unchecked_t
7245 + *
7246 + * Atomically decrements @v by 1.
7247 + */
7248 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7249 +{
7250 + asm volatile(LOCK_PREFIX "decl %0\n"
7251 : "=m" (v->counter)
7252 : "m" (v->counter));
7253 }
7254 @@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(at
7255 {
7256 unsigned char c;
7257
7258 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
7259 + asm volatile(LOCK_PREFIX "decl %0\n"
7260 +
7261 +#ifdef CONFIG_PAX_REFCOUNT
7262 + "jno 0f\n"
7263 + LOCK_PREFIX "incl %0\n"
7264 + "int $4\n0:\n"
7265 + _ASM_EXTABLE(0b, 0b)
7266 +#endif
7267 +
7268 + "sete %1\n"
7269 : "=m" (v->counter), "=qm" (c)
7270 : "m" (v->counter) : "memory");
7271 return c != 0;
7272 @@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(at
7273 {
7274 unsigned char c;
7275
7276 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
7277 + asm volatile(LOCK_PREFIX "incl %0\n"
7278 +
7279 +#ifdef CONFIG_PAX_REFCOUNT
7280 + "jno 0f\n"
7281 + LOCK_PREFIX "decl %0\n"
7282 + "int $4\n0:\n"
7283 + _ASM_EXTABLE(0b, 0b)
7284 +#endif
7285 +
7286 + "sete %1\n"
7287 + : "=m" (v->counter), "=qm" (c)
7288 + : "m" (v->counter) : "memory");
7289 + return c != 0;
7290 +}
7291 +
7292 +/**
7293 + * atomic_inc_and_test_unchecked - increment and test
7294 + * @v: pointer of type atomic_unchecked_t
7295 + *
7296 + * Atomically increments @v by 1
7297 + * and returns true if the result is zero, or false for all
7298 + * other cases.
7299 + */
7300 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7301 +{
7302 + unsigned char c;
7303 +
7304 + asm volatile(LOCK_PREFIX "incl %0\n"
7305 + "sete %1\n"
7306 : "=m" (v->counter), "=qm" (c)
7307 : "m" (v->counter) : "memory");
7308 return c != 0;
7309 @@ -157,7 +312,16 @@ static inline int atomic_add_negative(in
7310 {
7311 unsigned char c;
7312
7313 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7314 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
7315 +
7316 +#ifdef CONFIG_PAX_REFCOUNT
7317 + "jno 0f\n"
7318 + LOCK_PREFIX "subl %2,%0\n"
7319 + "int $4\n0:\n"
7320 + _ASM_EXTABLE(0b, 0b)
7321 +#endif
7322 +
7323 + "sets %1\n"
7324 : "=m" (v->counter), "=qm" (c)
7325 : "ir" (i), "m" (v->counter) : "memory");
7326 return c;
7327 @@ -173,7 +337,31 @@ static inline int atomic_add_negative(in
7328 static inline int atomic_add_return(int i, atomic_t *v)
7329 {
7330 int __i = i;
7331 - asm volatile(LOCK_PREFIX "xaddl %0, %1"
7332 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7333 +
7334 +#ifdef CONFIG_PAX_REFCOUNT
7335 + "jno 0f\n"
7336 + "movl %0, %1\n"
7337 + "int $4\n0:\n"
7338 + _ASM_EXTABLE(0b, 0b)
7339 +#endif
7340 +
7341 + : "+r" (i), "+m" (v->counter)
7342 + : : "memory");
7343 + return i + __i;
7344 +}
7345 +
7346 +/**
7347 + * atomic_add_return_unchecked - add and return
7348 + * @i: integer value to add
7349 + * @v: pointer of type atomic_unchecked_t
7350 + *
7351 + * Atomically adds @i to @v and returns @i + @v
7352 + */
7353 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7354 +{
7355 + int __i = i;
7356 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7357 : "+r" (i), "+m" (v->counter)
7358 : : "memory");
7359 return i + __i;
7360 @@ -185,6 +373,10 @@ static inline int atomic_sub_return(int
7361 }
7362
7363 #define atomic_inc_return(v) (atomic_add_return(1, v))
7364 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7365 +{
7366 + return atomic_add_return_unchecked(1, v);
7367 +}
7368 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7369
7370 /* The 64-bit atomic type */
7371 @@ -204,6 +396,18 @@ static inline long atomic64_read(const a
7372 }
7373
7374 /**
7375 + * atomic64_read_unchecked - read atomic64 variable
7376 + * @v: pointer of type atomic64_unchecked_t
7377 + *
7378 + * Atomically reads the value of @v.
7379 + * Doesn't imply a read memory barrier.
7380 + */
7381 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7382 +{
7383 + return v->counter;
7384 +}
7385 +
7386 +/**
7387 * atomic64_set - set atomic64 variable
7388 * @v: pointer to type atomic64_t
7389 * @i: required value
7390 @@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64
7391 }
7392
7393 /**
7394 + * atomic64_set_unchecked - set atomic64 variable
7395 + * @v: pointer to type atomic64_unchecked_t
7396 + * @i: required value
7397 + *
7398 + * Atomically sets the value of @v to @i.
7399 + */
7400 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7401 +{
7402 + v->counter = i;
7403 +}
7404 +
7405 +/**
7406 * atomic64_add - add integer to atomic64 variable
7407 * @i: integer value to add
7408 * @v: pointer to type atomic64_t
7409 @@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64
7410 */
7411 static inline void atomic64_add(long i, atomic64_t *v)
7412 {
7413 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
7414 +
7415 +#ifdef CONFIG_PAX_REFCOUNT
7416 + "jno 0f\n"
7417 + LOCK_PREFIX "subq %1,%0\n"
7418 + "int $4\n0:\n"
7419 + _ASM_EXTABLE(0b, 0b)
7420 +#endif
7421 +
7422 + : "=m" (v->counter)
7423 + : "er" (i), "m" (v->counter));
7424 +}
7425 +
7426 +/**
7427 + * atomic64_add_unchecked - add integer to atomic64 variable
7428 + * @i: integer value to add
7429 + * @v: pointer to type atomic64_unchecked_t
7430 + *
7431 + * Atomically adds @i to @v.
7432 + */
7433 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
7434 +{
7435 asm volatile(LOCK_PREFIX "addq %1,%0"
7436 : "=m" (v->counter)
7437 : "er" (i), "m" (v->counter));
7438 @@ -238,7 +476,15 @@ static inline void atomic64_add(long i,
7439 */
7440 static inline void atomic64_sub(long i, atomic64_t *v)
7441 {
7442 - asm volatile(LOCK_PREFIX "subq %1,%0"
7443 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
7444 +
7445 +#ifdef CONFIG_PAX_REFCOUNT
7446 + "jno 0f\n"
7447 + LOCK_PREFIX "addq %1,%0\n"
7448 + "int $4\n0:\n"
7449 + _ASM_EXTABLE(0b, 0b)
7450 +#endif
7451 +
7452 : "=m" (v->counter)
7453 : "er" (i), "m" (v->counter));
7454 }
7455 @@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(
7456 {
7457 unsigned char c;
7458
7459 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
7460 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
7461 +
7462 +#ifdef CONFIG_PAX_REFCOUNT
7463 + "jno 0f\n"
7464 + LOCK_PREFIX "addq %2,%0\n"
7465 + "int $4\n0:\n"
7466 + _ASM_EXTABLE(0b, 0b)
7467 +#endif
7468 +
7469 + "sete %1\n"
7470 : "=m" (v->counter), "=qm" (c)
7471 : "er" (i), "m" (v->counter) : "memory");
7472 return c;
7473 @@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(
7474 */
7475 static inline void atomic64_inc(atomic64_t *v)
7476 {
7477 + asm volatile(LOCK_PREFIX "incq %0\n"
7478 +
7479 +#ifdef CONFIG_PAX_REFCOUNT
7480 + "jno 0f\n"
7481 + LOCK_PREFIX "decq %0\n"
7482 + "int $4\n0:\n"
7483 + _ASM_EXTABLE(0b, 0b)
7484 +#endif
7485 +
7486 + : "=m" (v->counter)
7487 + : "m" (v->counter));
7488 +}
7489 +
7490 +/**
7491 + * atomic64_inc_unchecked - increment atomic64 variable
7492 + * @v: pointer to type atomic64_unchecked_t
7493 + *
7494 + * Atomically increments @v by 1.
7495 + */
7496 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7497 +{
7498 asm volatile(LOCK_PREFIX "incq %0"
7499 : "=m" (v->counter)
7500 : "m" (v->counter));
7501 @@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64
7502 */
7503 static inline void atomic64_dec(atomic64_t *v)
7504 {
7505 - asm volatile(LOCK_PREFIX "decq %0"
7506 + asm volatile(LOCK_PREFIX "decq %0\n"
7507 +
7508 +#ifdef CONFIG_PAX_REFCOUNT
7509 + "jno 0f\n"
7510 + LOCK_PREFIX "incq %0\n"
7511 + "int $4\n0:\n"
7512 + _ASM_EXTABLE(0b, 0b)
7513 +#endif
7514 +
7515 + : "=m" (v->counter)
7516 + : "m" (v->counter));
7517 +}
7518 +
7519 +/**
7520 + * atomic64_dec_unchecked - decrement atomic64 variable
7521 + * @v: pointer to type atomic64_t
7522 + *
7523 + * Atomically decrements @v by 1.
7524 + */
7525 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7526 +{
7527 + asm volatile(LOCK_PREFIX "decq %0\n"
7528 : "=m" (v->counter)
7529 : "m" (v->counter));
7530 }
7531 @@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(
7532 {
7533 unsigned char c;
7534
7535 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
7536 + asm volatile(LOCK_PREFIX "decq %0\n"
7537 +
7538 +#ifdef CONFIG_PAX_REFCOUNT
7539 + "jno 0f\n"
7540 + LOCK_PREFIX "incq %0\n"
7541 + "int $4\n0:\n"
7542 + _ASM_EXTABLE(0b, 0b)
7543 +#endif
7544 +
7545 + "sete %1\n"
7546 : "=m" (v->counter), "=qm" (c)
7547 : "m" (v->counter) : "memory");
7548 return c != 0;
7549 @@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(
7550 {
7551 unsigned char c;
7552
7553 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
7554 + asm volatile(LOCK_PREFIX "incq %0\n"
7555 +
7556 +#ifdef CONFIG_PAX_REFCOUNT
7557 + "jno 0f\n"
7558 + LOCK_PREFIX "decq %0\n"
7559 + "int $4\n0:\n"
7560 + _ASM_EXTABLE(0b, 0b)
7561 +#endif
7562 +
7563 + "sete %1\n"
7564 : "=m" (v->counter), "=qm" (c)
7565 : "m" (v->counter) : "memory");
7566 return c != 0;
7567 @@ -337,7 +652,16 @@ static inline int atomic64_add_negative(
7568 {
7569 unsigned char c;
7570
7571 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
7572 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
7573 +
7574 +#ifdef CONFIG_PAX_REFCOUNT
7575 + "jno 0f\n"
7576 + LOCK_PREFIX "subq %2,%0\n"
7577 + "int $4\n0:\n"
7578 + _ASM_EXTABLE(0b, 0b)
7579 +#endif
7580 +
7581 + "sets %1\n"
7582 : "=m" (v->counter), "=qm" (c)
7583 : "er" (i), "m" (v->counter) : "memory");
7584 return c;
7585 @@ -353,7 +677,31 @@ static inline int atomic64_add_negative(
7586 static inline long atomic64_add_return(long i, atomic64_t *v)
7587 {
7588 long __i = i;
7589 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
7590 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
7591 +
7592 +#ifdef CONFIG_PAX_REFCOUNT
7593 + "jno 0f\n"
7594 + "movq %0, %1\n"
7595 + "int $4\n0:\n"
7596 + _ASM_EXTABLE(0b, 0b)
7597 +#endif
7598 +
7599 + : "+r" (i), "+m" (v->counter)
7600 + : : "memory");
7601 + return i + __i;
7602 +}
7603 +
7604 +/**
7605 + * atomic64_add_return_unchecked - add and return
7606 + * @i: integer value to add
7607 + * @v: pointer to type atomic64_unchecked_t
7608 + *
7609 + * Atomically adds @i to @v and returns @i + @v
7610 + */
7611 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7612 +{
7613 + long __i = i;
7614 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
7615 : "+r" (i), "+m" (v->counter)
7616 : : "memory");
7617 return i + __i;
7618 @@ -365,6 +713,10 @@ static inline long atomic64_sub_return(l
7619 }
7620
7621 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
7622 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7623 +{
7624 + return atomic64_add_return_unchecked(1, v);
7625 +}
7626 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
7627
7628 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
7629 @@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atom
7630 return cmpxchg(&v->counter, old, new);
7631 }
7632
7633 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
7634 +{
7635 + return cmpxchg(&v->counter, old, new);
7636 +}
7637 +
7638 static inline long atomic64_xchg(atomic64_t *v, long new)
7639 {
7640 return xchg(&v->counter, new);
7641 }
7642
7643 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
7644 +{
7645 + return xchg(&v->counter, new);
7646 +}
7647 +
7648 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
7649 {
7650 return cmpxchg(&v->counter, old, new);
7651 }
7652
7653 +static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7654 +{
7655 + return cmpxchg(&v->counter, old, new);
7656 +}
7657 +
7658 static inline long atomic_xchg(atomic_t *v, int new)
7659 {
7660 return xchg(&v->counter, new);
7661 }
7662
7663 +static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7664 +{
7665 + return xchg(&v->counter, new);
7666 +}
7667 +
7668 /**
7669 * atomic_add_unless - add unless the number is a given value
7670 * @v: pointer of type atomic_t
7671 @@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t
7672 */
7673 static inline int atomic_add_unless(atomic_t *v, int a, int u)
7674 {
7675 - int c, old;
7676 + int c, old, new;
7677 c = atomic_read(v);
7678 for (;;) {
7679 - if (unlikely(c == (u)))
7680 + if (unlikely(c == u))
7681 break;
7682 - old = atomic_cmpxchg((v), c, c + (a));
7683 +
7684 + asm volatile("addl %2,%0\n"
7685 +
7686 +#ifdef CONFIG_PAX_REFCOUNT
7687 + "jno 0f\n"
7688 + "subl %2,%0\n"
7689 + "int $4\n0:\n"
7690 + _ASM_EXTABLE(0b, 0b)
7691 +#endif
7692 +
7693 + : "=r" (new)
7694 + : "0" (c), "ir" (a));
7695 +
7696 + old = atomic_cmpxchg(v, c, new);
7697 if (likely(old == c))
7698 break;
7699 c = old;
7700 }
7701 - return c != (u);
7702 + return c != u;
7703 }
7704
7705 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
7706 @@ -424,17 +809,30 @@ static inline int atomic_add_unless(atom
7707 */
7708 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
7709 {
7710 - long c, old;
7711 + long c, old, new;
7712 c = atomic64_read(v);
7713 for (;;) {
7714 - if (unlikely(c == (u)))
7715 + if (unlikely(c == u))
7716 break;
7717 - old = atomic64_cmpxchg((v), c, c + (a));
7718 +
7719 + asm volatile("addq %2,%0\n"
7720 +
7721 +#ifdef CONFIG_PAX_REFCOUNT
7722 + "jno 0f\n"
7723 + "subq %2,%0\n"
7724 + "int $4\n0:\n"
7725 + _ASM_EXTABLE(0b, 0b)
7726 +#endif
7727 +
7728 + : "=r" (new)
7729 + : "0" (c), "er" (a));
7730 +
7731 + old = atomic64_cmpxchg(v, c, new);
7732 if (likely(old == c))
7733 break;
7734 c = old;
7735 }
7736 - return c != (u);
7737 + return c != u;
7738 }
7739
7740 /**
7741 diff -urNp linux-2.6.32.41/arch/x86/include/asm/bitops.h linux-2.6.32.41/arch/x86/include/asm/bitops.h
7742 --- linux-2.6.32.41/arch/x86/include/asm/bitops.h 2011-03-27 14:31:47.000000000 -0400
7743 +++ linux-2.6.32.41/arch/x86/include/asm/bitops.h 2011-04-17 15:56:46.000000000 -0400
7744 @@ -38,7 +38,7 @@
7745 * a mask operation on a byte.
7746 */
7747 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
7748 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
7749 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
7750 #define CONST_MASK(nr) (1 << ((nr) & 7))
7751
7752 /**
7753 diff -urNp linux-2.6.32.41/arch/x86/include/asm/boot.h linux-2.6.32.41/arch/x86/include/asm/boot.h
7754 --- linux-2.6.32.41/arch/x86/include/asm/boot.h 2011-03-27 14:31:47.000000000 -0400
7755 +++ linux-2.6.32.41/arch/x86/include/asm/boot.h 2011-04-17 15:56:46.000000000 -0400
7756 @@ -11,10 +11,15 @@
7757 #include <asm/pgtable_types.h>
7758
7759 /* Physical address where kernel should be loaded. */
7760 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
7761 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
7762 + (CONFIG_PHYSICAL_ALIGN - 1)) \
7763 & ~(CONFIG_PHYSICAL_ALIGN - 1))
7764
7765 +#ifndef __ASSEMBLY__
7766 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
7767 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
7768 +#endif
7769 +
7770 /* Minimum kernel alignment, as a power of two */
7771 #ifdef CONFIG_X86_64
7772 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
7773 diff -urNp linux-2.6.32.41/arch/x86/include/asm/cacheflush.h linux-2.6.32.41/arch/x86/include/asm/cacheflush.h
7774 --- linux-2.6.32.41/arch/x86/include/asm/cacheflush.h 2011-03-27 14:31:47.000000000 -0400
7775 +++ linux-2.6.32.41/arch/x86/include/asm/cacheflush.h 2011-04-17 15:56:46.000000000 -0400
7776 @@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
7777 static inline unsigned long get_page_memtype(struct page *pg)
7778 {
7779 if (!PageUncached(pg) && !PageWC(pg))
7780 - return -1;
7781 + return ~0UL;
7782 else if (!PageUncached(pg) && PageWC(pg))
7783 return _PAGE_CACHE_WC;
7784 else if (PageUncached(pg) && !PageWC(pg))
7785 @@ -85,7 +85,7 @@ static inline void set_page_memtype(stru
7786 SetPageWC(pg);
7787 break;
7788 default:
7789 - case -1:
7790 + case ~0UL:
7791 ClearPageUncached(pg);
7792 ClearPageWC(pg);
7793 break;
7794 diff -urNp linux-2.6.32.41/arch/x86/include/asm/cache.h linux-2.6.32.41/arch/x86/include/asm/cache.h
7795 --- linux-2.6.32.41/arch/x86/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
7796 +++ linux-2.6.32.41/arch/x86/include/asm/cache.h 2011-05-04 17:56:20.000000000 -0400
7797 @@ -5,9 +5,10 @@
7798
7799 /* L1 cache line size */
7800 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7801 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7802 +#define L1_CACHE_BYTES (_AC(1,U) << L1_CACHE_SHIFT)
7803
7804 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
7805 +#define __read_only __attribute__((__section__(".data.read_only")))
7806
7807 #ifdef CONFIG_X86_VSMP
7808 /* vSMP Internode cacheline shift */
7809 diff -urNp linux-2.6.32.41/arch/x86/include/asm/checksum_32.h linux-2.6.32.41/arch/x86/include/asm/checksum_32.h
7810 --- linux-2.6.32.41/arch/x86/include/asm/checksum_32.h 2011-03-27 14:31:47.000000000 -0400
7811 +++ linux-2.6.32.41/arch/x86/include/asm/checksum_32.h 2011-04-17 15:56:46.000000000 -0400
7812 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
7813 int len, __wsum sum,
7814 int *src_err_ptr, int *dst_err_ptr);
7815
7816 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
7817 + int len, __wsum sum,
7818 + int *src_err_ptr, int *dst_err_ptr);
7819 +
7820 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
7821 + int len, __wsum sum,
7822 + int *src_err_ptr, int *dst_err_ptr);
7823 +
7824 /*
7825 * Note: when you get a NULL pointer exception here this means someone
7826 * passed in an incorrect kernel address to one of these functions.
7827 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
7828 int *err_ptr)
7829 {
7830 might_sleep();
7831 - return csum_partial_copy_generic((__force void *)src, dst,
7832 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
7833 len, sum, err_ptr, NULL);
7834 }
7835
7836 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
7837 {
7838 might_sleep();
7839 if (access_ok(VERIFY_WRITE, dst, len))
7840 - return csum_partial_copy_generic(src, (__force void *)dst,
7841 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
7842 len, sum, NULL, err_ptr);
7843
7844 if (len)
7845 diff -urNp linux-2.6.32.41/arch/x86/include/asm/desc_defs.h linux-2.6.32.41/arch/x86/include/asm/desc_defs.h
7846 --- linux-2.6.32.41/arch/x86/include/asm/desc_defs.h 2011-03-27 14:31:47.000000000 -0400
7847 +++ linux-2.6.32.41/arch/x86/include/asm/desc_defs.h 2011-04-17 15:56:46.000000000 -0400
7848 @@ -31,6 +31,12 @@ struct desc_struct {
7849 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
7850 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
7851 };
7852 + struct {
7853 + u16 offset_low;
7854 + u16 seg;
7855 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
7856 + unsigned offset_high: 16;
7857 + } gate;
7858 };
7859 } __attribute__((packed));
7860
7861 diff -urNp linux-2.6.32.41/arch/x86/include/asm/desc.h linux-2.6.32.41/arch/x86/include/asm/desc.h
7862 --- linux-2.6.32.41/arch/x86/include/asm/desc.h 2011-03-27 14:31:47.000000000 -0400
7863 +++ linux-2.6.32.41/arch/x86/include/asm/desc.h 2011-04-23 12:56:10.000000000 -0400
7864 @@ -4,6 +4,7 @@
7865 #include <asm/desc_defs.h>
7866 #include <asm/ldt.h>
7867 #include <asm/mmu.h>
7868 +#include <asm/pgtable.h>
7869 #include <linux/smp.h>
7870
7871 static inline void fill_ldt(struct desc_struct *desc,
7872 @@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_
7873 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
7874 desc->type = (info->read_exec_only ^ 1) << 1;
7875 desc->type |= info->contents << 2;
7876 + desc->type |= info->seg_not_present ^ 1;
7877 desc->s = 1;
7878 desc->dpl = 0x3;
7879 desc->p = info->seg_not_present ^ 1;
7880 @@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_
7881 }
7882
7883 extern struct desc_ptr idt_descr;
7884 -extern gate_desc idt_table[];
7885 -
7886 -struct gdt_page {
7887 - struct desc_struct gdt[GDT_ENTRIES];
7888 -} __attribute__((aligned(PAGE_SIZE)));
7889 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
7890 +extern gate_desc idt_table[256];
7891
7892 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
7893 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
7894 {
7895 - return per_cpu(gdt_page, cpu).gdt;
7896 + return cpu_gdt_table[cpu];
7897 }
7898
7899 #ifdef CONFIG_X86_64
7900 @@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *
7901 unsigned long base, unsigned dpl, unsigned flags,
7902 unsigned short seg)
7903 {
7904 - gate->a = (seg << 16) | (base & 0xffff);
7905 - gate->b = (base & 0xffff0000) |
7906 - (((0x80 | type | (dpl << 5)) & 0xff) << 8);
7907 + gate->gate.offset_low = base;
7908 + gate->gate.seg = seg;
7909 + gate->gate.reserved = 0;
7910 + gate->gate.type = type;
7911 + gate->gate.s = 0;
7912 + gate->gate.dpl = dpl;
7913 + gate->gate.p = 1;
7914 + gate->gate.offset_high = base >> 16;
7915 }
7916
7917 #endif
7918 @@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(str
7919 static inline void native_write_idt_entry(gate_desc *idt, int entry,
7920 const gate_desc *gate)
7921 {
7922 + pax_open_kernel();
7923 memcpy(&idt[entry], gate, sizeof(*gate));
7924 + pax_close_kernel();
7925 }
7926
7927 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
7928 const void *desc)
7929 {
7930 + pax_open_kernel();
7931 memcpy(&ldt[entry], desc, 8);
7932 + pax_close_kernel();
7933 }
7934
7935 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
7936 @@ -139,7 +146,10 @@ static inline void native_write_gdt_entr
7937 size = sizeof(struct desc_struct);
7938 break;
7939 }
7940 +
7941 + pax_open_kernel();
7942 memcpy(&gdt[entry], desc, size);
7943 + pax_close_kernel();
7944 }
7945
7946 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
7947 @@ -211,7 +221,9 @@ static inline void native_set_ldt(const
7948
7949 static inline void native_load_tr_desc(void)
7950 {
7951 + pax_open_kernel();
7952 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
7953 + pax_close_kernel();
7954 }
7955
7956 static inline void native_load_gdt(const struct desc_ptr *dtr)
7957 @@ -246,8 +258,10 @@ static inline void native_load_tls(struc
7958 unsigned int i;
7959 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
7960
7961 + pax_open_kernel();
7962 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
7963 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
7964 + pax_close_kernel();
7965 }
7966
7967 #define _LDT_empty(info) \
7968 @@ -309,7 +323,7 @@ static inline void set_desc_limit(struct
7969 desc->limit = (limit >> 16) & 0xf;
7970 }
7971
7972 -static inline void _set_gate(int gate, unsigned type, void *addr,
7973 +static inline void _set_gate(int gate, unsigned type, const void *addr,
7974 unsigned dpl, unsigned ist, unsigned seg)
7975 {
7976 gate_desc s;
7977 @@ -327,7 +341,7 @@ static inline void _set_gate(int gate, u
7978 * Pentium F0 0F bugfix can have resulted in the mapped
7979 * IDT being write-protected.
7980 */
7981 -static inline void set_intr_gate(unsigned int n, void *addr)
7982 +static inline void set_intr_gate(unsigned int n, const void *addr)
7983 {
7984 BUG_ON((unsigned)n > 0xFF);
7985 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
7986 @@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsig
7987 /*
7988 * This routine sets up an interrupt gate at directory privilege level 3.
7989 */
7990 -static inline void set_system_intr_gate(unsigned int n, void *addr)
7991 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
7992 {
7993 BUG_ON((unsigned)n > 0xFF);
7994 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
7995 }
7996
7997 -static inline void set_system_trap_gate(unsigned int n, void *addr)
7998 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
7999 {
8000 BUG_ON((unsigned)n > 0xFF);
8001 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8002 }
8003
8004 -static inline void set_trap_gate(unsigned int n, void *addr)
8005 +static inline void set_trap_gate(unsigned int n, const void *addr)
8006 {
8007 BUG_ON((unsigned)n > 0xFF);
8008 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8009 @@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigne
8010 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8011 {
8012 BUG_ON((unsigned)n > 0xFF);
8013 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8014 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8015 }
8016
8017 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8018 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8019 {
8020 BUG_ON((unsigned)n > 0xFF);
8021 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8022 }
8023
8024 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8025 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8026 {
8027 BUG_ON((unsigned)n > 0xFF);
8028 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8029 }
8030
8031 +#ifdef CONFIG_X86_32
8032 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8033 +{
8034 + struct desc_struct d;
8035 +
8036 + if (likely(limit))
8037 + limit = (limit - 1UL) >> PAGE_SHIFT;
8038 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
8039 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8040 +}
8041 +#endif
8042 +
8043 #endif /* _ASM_X86_DESC_H */
8044 diff -urNp linux-2.6.32.41/arch/x86/include/asm/device.h linux-2.6.32.41/arch/x86/include/asm/device.h
8045 --- linux-2.6.32.41/arch/x86/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
8046 +++ linux-2.6.32.41/arch/x86/include/asm/device.h 2011-04-17 15:56:46.000000000 -0400
8047 @@ -6,7 +6,7 @@ struct dev_archdata {
8048 void *acpi_handle;
8049 #endif
8050 #ifdef CONFIG_X86_64
8051 -struct dma_map_ops *dma_ops;
8052 + const struct dma_map_ops *dma_ops;
8053 #endif
8054 #ifdef CONFIG_DMAR
8055 void *iommu; /* hook for IOMMU specific extension */
8056 diff -urNp linux-2.6.32.41/arch/x86/include/asm/dma-mapping.h linux-2.6.32.41/arch/x86/include/asm/dma-mapping.h
8057 --- linux-2.6.32.41/arch/x86/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
8058 +++ linux-2.6.32.41/arch/x86/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
8059 @@ -25,9 +25,9 @@ extern int iommu_merge;
8060 extern struct device x86_dma_fallback_dev;
8061 extern int panic_on_overflow;
8062
8063 -extern struct dma_map_ops *dma_ops;
8064 +extern const struct dma_map_ops *dma_ops;
8065
8066 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
8067 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
8068 {
8069 #ifdef CONFIG_X86_32
8070 return dma_ops;
8071 @@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dm
8072 /* Make sure we keep the same behaviour */
8073 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
8074 {
8075 - struct dma_map_ops *ops = get_dma_ops(dev);
8076 + const struct dma_map_ops *ops = get_dma_ops(dev);
8077 if (ops->mapping_error)
8078 return ops->mapping_error(dev, dma_addr);
8079
8080 @@ -122,7 +122,7 @@ static inline void *
8081 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
8082 gfp_t gfp)
8083 {
8084 - struct dma_map_ops *ops = get_dma_ops(dev);
8085 + const struct dma_map_ops *ops = get_dma_ops(dev);
8086 void *memory;
8087
8088 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
8089 @@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, s
8090 static inline void dma_free_coherent(struct device *dev, size_t size,
8091 void *vaddr, dma_addr_t bus)
8092 {
8093 - struct dma_map_ops *ops = get_dma_ops(dev);
8094 + const struct dma_map_ops *ops = get_dma_ops(dev);
8095
8096 WARN_ON(irqs_disabled()); /* for portability */
8097
8098 diff -urNp linux-2.6.32.41/arch/x86/include/asm/e820.h linux-2.6.32.41/arch/x86/include/asm/e820.h
8099 --- linux-2.6.32.41/arch/x86/include/asm/e820.h 2011-03-27 14:31:47.000000000 -0400
8100 +++ linux-2.6.32.41/arch/x86/include/asm/e820.h 2011-04-17 15:56:46.000000000 -0400
8101 @@ -133,7 +133,7 @@ extern char *default_machine_specific_me
8102 #define ISA_END_ADDRESS 0x100000
8103 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
8104
8105 -#define BIOS_BEGIN 0x000a0000
8106 +#define BIOS_BEGIN 0x000c0000
8107 #define BIOS_END 0x00100000
8108
8109 #ifdef __KERNEL__
8110 diff -urNp linux-2.6.32.41/arch/x86/include/asm/elf.h linux-2.6.32.41/arch/x86/include/asm/elf.h
8111 --- linux-2.6.32.41/arch/x86/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
8112 +++ linux-2.6.32.41/arch/x86/include/asm/elf.h 2011-04-17 15:56:46.000000000 -0400
8113 @@ -257,7 +257,25 @@ extern int force_personality32;
8114 the loader. We need to make sure that it is out of the way of the program
8115 that it will "exec", and that there is sufficient room for the brk. */
8116
8117 +#ifdef CONFIG_PAX_SEGMEXEC
8118 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8119 +#else
8120 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8121 +#endif
8122 +
8123 +#ifdef CONFIG_PAX_ASLR
8124 +#ifdef CONFIG_X86_32
8125 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8126 +
8127 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8128 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8129 +#else
8130 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
8131 +
8132 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8133 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8134 +#endif
8135 +#endif
8136
8137 /* This yields a mask that user programs can use to figure out what
8138 instruction set this CPU supports. This could be done in user space,
8139 @@ -311,8 +329,7 @@ do { \
8140 #define ARCH_DLINFO \
8141 do { \
8142 if (vdso_enabled) \
8143 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8144 - (unsigned long)current->mm->context.vdso); \
8145 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);\
8146 } while (0)
8147
8148 #define AT_SYSINFO 32
8149 @@ -323,7 +340,7 @@ do { \
8150
8151 #endif /* !CONFIG_X86_32 */
8152
8153 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8154 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8155
8156 #define VDSO_ENTRY \
8157 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8158 @@ -337,7 +354,4 @@ extern int arch_setup_additional_pages(s
8159 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8160 #define compat_arch_setup_additional_pages syscall32_setup_pages
8161
8162 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8163 -#define arch_randomize_brk arch_randomize_brk
8164 -
8165 #endif /* _ASM_X86_ELF_H */
8166 diff -urNp linux-2.6.32.41/arch/x86/include/asm/emergency-restart.h linux-2.6.32.41/arch/x86/include/asm/emergency-restart.h
8167 --- linux-2.6.32.41/arch/x86/include/asm/emergency-restart.h 2011-03-27 14:31:47.000000000 -0400
8168 +++ linux-2.6.32.41/arch/x86/include/asm/emergency-restart.h 2011-05-22 23:02:06.000000000 -0400
8169 @@ -15,6 +15,6 @@ enum reboot_type {
8170
8171 extern enum reboot_type reboot_type;
8172
8173 -extern void machine_emergency_restart(void);
8174 +extern void machine_emergency_restart(void) __noreturn;
8175
8176 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8177 diff -urNp linux-2.6.32.41/arch/x86/include/asm/futex.h linux-2.6.32.41/arch/x86/include/asm/futex.h
8178 --- linux-2.6.32.41/arch/x86/include/asm/futex.h 2011-03-27 14:31:47.000000000 -0400
8179 +++ linux-2.6.32.41/arch/x86/include/asm/futex.h 2011-04-17 15:56:46.000000000 -0400
8180 @@ -12,16 +12,18 @@
8181 #include <asm/system.h>
8182
8183 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8184 + typecheck(u32 *, uaddr); \
8185 asm volatile("1:\t" insn "\n" \
8186 "2:\t.section .fixup,\"ax\"\n" \
8187 "3:\tmov\t%3, %1\n" \
8188 "\tjmp\t2b\n" \
8189 "\t.previous\n" \
8190 _ASM_EXTABLE(1b, 3b) \
8191 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8192 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
8193 : "i" (-EFAULT), "0" (oparg), "1" (0))
8194
8195 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8196 + typecheck(u32 *, uaddr); \
8197 asm volatile("1:\tmovl %2, %0\n" \
8198 "\tmovl\t%0, %3\n" \
8199 "\t" insn "\n" \
8200 @@ -34,10 +36,10 @@
8201 _ASM_EXTABLE(1b, 4b) \
8202 _ASM_EXTABLE(2b, 4b) \
8203 : "=&a" (oldval), "=&r" (ret), \
8204 - "+m" (*uaddr), "=&r" (tem) \
8205 + "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
8206 : "r" (oparg), "i" (-EFAULT), "1" (0))
8207
8208 -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
8209 +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8210 {
8211 int op = (encoded_op >> 28) & 7;
8212 int cmp = (encoded_op >> 24) & 15;
8213 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
8214
8215 switch (op) {
8216 case FUTEX_OP_SET:
8217 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8218 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8219 break;
8220 case FUTEX_OP_ADD:
8221 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8222 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8223 uaddr, oparg);
8224 break;
8225 case FUTEX_OP_OR:
8226 @@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser
8227 return ret;
8228 }
8229
8230 -static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
8231 +static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
8232 int newval)
8233 {
8234
8235 @@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_i
8236 return -ENOSYS;
8237 #endif
8238
8239 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
8240 + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8241 return -EFAULT;
8242
8243 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
8244 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
8245 "2:\t.section .fixup, \"ax\"\n"
8246 "3:\tmov %2, %0\n"
8247 "\tjmp 2b\n"
8248 "\t.previous\n"
8249 _ASM_EXTABLE(1b, 3b)
8250 - : "=a" (oldval), "+m" (*uaddr)
8251 + : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
8252 : "i" (-EFAULT), "r" (newval), "0" (oldval)
8253 : "memory"
8254 );
8255 diff -urNp linux-2.6.32.41/arch/x86/include/asm/hw_irq.h linux-2.6.32.41/arch/x86/include/asm/hw_irq.h
8256 --- linux-2.6.32.41/arch/x86/include/asm/hw_irq.h 2011-03-27 14:31:47.000000000 -0400
8257 +++ linux-2.6.32.41/arch/x86/include/asm/hw_irq.h 2011-05-04 17:56:28.000000000 -0400
8258 @@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
8259 extern void enable_IO_APIC(void);
8260
8261 /* Statistics */
8262 -extern atomic_t irq_err_count;
8263 -extern atomic_t irq_mis_count;
8264 +extern atomic_unchecked_t irq_err_count;
8265 +extern atomic_unchecked_t irq_mis_count;
8266
8267 /* EISA */
8268 extern void eisa_set_level_irq(unsigned int irq);
8269 diff -urNp linux-2.6.32.41/arch/x86/include/asm/i387.h linux-2.6.32.41/arch/x86/include/asm/i387.h
8270 --- linux-2.6.32.41/arch/x86/include/asm/i387.h 2011-03-27 14:31:47.000000000 -0400
8271 +++ linux-2.6.32.41/arch/x86/include/asm/i387.h 2011-04-17 15:56:46.000000000 -0400
8272 @@ -60,6 +60,11 @@ static inline int fxrstor_checking(struc
8273 {
8274 int err;
8275
8276 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8277 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8278 + fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
8279 +#endif
8280 +
8281 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
8282 "2:\n"
8283 ".section .fixup,\"ax\"\n"
8284 @@ -105,6 +110,11 @@ static inline int fxsave_user(struct i38
8285 {
8286 int err;
8287
8288 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8289 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8290 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8291 +#endif
8292 +
8293 asm volatile("1: rex64/fxsave (%[fx])\n\t"
8294 "2:\n"
8295 ".section .fixup,\"ax\"\n"
8296 @@ -195,13 +205,8 @@ static inline int fxrstor_checking(struc
8297 }
8298
8299 /* We need a safe address that is cheap to find and that is already
8300 - in L1 during context switch. The best choices are unfortunately
8301 - different for UP and SMP */
8302 -#ifdef CONFIG_SMP
8303 -#define safe_address (__per_cpu_offset[0])
8304 -#else
8305 -#define safe_address (kstat_cpu(0).cpustat.user)
8306 -#endif
8307 + in L1 during context switch. */
8308 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
8309
8310 /*
8311 * These must be called with preempt disabled
8312 @@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void
8313 struct thread_info *me = current_thread_info();
8314 preempt_disable();
8315 if (me->status & TS_USEDFPU)
8316 - __save_init_fpu(me->task);
8317 + __save_init_fpu(current);
8318 else
8319 clts();
8320 }
8321 diff -urNp linux-2.6.32.41/arch/x86/include/asm/io_32.h linux-2.6.32.41/arch/x86/include/asm/io_32.h
8322 --- linux-2.6.32.41/arch/x86/include/asm/io_32.h 2011-03-27 14:31:47.000000000 -0400
8323 +++ linux-2.6.32.41/arch/x86/include/asm/io_32.h 2011-04-17 15:56:46.000000000 -0400
8324 @@ -3,6 +3,7 @@
8325
8326 #include <linux/string.h>
8327 #include <linux/compiler.h>
8328 +#include <asm/processor.h>
8329
8330 /*
8331 * This file contains the definitions for the x86 IO instructions
8332 @@ -42,6 +43,17 @@
8333
8334 #ifdef __KERNEL__
8335
8336 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8337 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8338 +{
8339 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8340 +}
8341 +
8342 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8343 +{
8344 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8345 +}
8346 +
8347 #include <asm-generic/iomap.h>
8348
8349 #include <linux/vmalloc.h>
8350 diff -urNp linux-2.6.32.41/arch/x86/include/asm/io_64.h linux-2.6.32.41/arch/x86/include/asm/io_64.h
8351 --- linux-2.6.32.41/arch/x86/include/asm/io_64.h 2011-03-27 14:31:47.000000000 -0400
8352 +++ linux-2.6.32.41/arch/x86/include/asm/io_64.h 2011-04-17 15:56:46.000000000 -0400
8353 @@ -140,6 +140,17 @@ __OUTS(l)
8354
8355 #include <linux/vmalloc.h>
8356
8357 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8358 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8359 +{
8360 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8361 +}
8362 +
8363 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8364 +{
8365 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8366 +}
8367 +
8368 #include <asm-generic/iomap.h>
8369
8370 void __memcpy_fromio(void *, unsigned long, unsigned);
8371 diff -urNp linux-2.6.32.41/arch/x86/include/asm/iommu.h linux-2.6.32.41/arch/x86/include/asm/iommu.h
8372 --- linux-2.6.32.41/arch/x86/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
8373 +++ linux-2.6.32.41/arch/x86/include/asm/iommu.h 2011-04-17 15:56:46.000000000 -0400
8374 @@ -3,7 +3,7 @@
8375
8376 extern void pci_iommu_shutdown(void);
8377 extern void no_iommu_init(void);
8378 -extern struct dma_map_ops nommu_dma_ops;
8379 +extern const struct dma_map_ops nommu_dma_ops;
8380 extern int force_iommu, no_iommu;
8381 extern int iommu_detected;
8382 extern int iommu_pass_through;
8383 diff -urNp linux-2.6.32.41/arch/x86/include/asm/irqflags.h linux-2.6.32.41/arch/x86/include/asm/irqflags.h
8384 --- linux-2.6.32.41/arch/x86/include/asm/irqflags.h 2011-03-27 14:31:47.000000000 -0400
8385 +++ linux-2.6.32.41/arch/x86/include/asm/irqflags.h 2011-04-17 15:56:46.000000000 -0400
8386 @@ -142,6 +142,11 @@ static inline unsigned long __raw_local_
8387 sti; \
8388 sysexit
8389
8390 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
8391 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8392 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
8393 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8394 +
8395 #else
8396 #define INTERRUPT_RETURN iret
8397 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8398 diff -urNp linux-2.6.32.41/arch/x86/include/asm/kprobes.h linux-2.6.32.41/arch/x86/include/asm/kprobes.h
8399 --- linux-2.6.32.41/arch/x86/include/asm/kprobes.h 2011-03-27 14:31:47.000000000 -0400
8400 +++ linux-2.6.32.41/arch/x86/include/asm/kprobes.h 2011-04-23 12:56:12.000000000 -0400
8401 @@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
8402 #define BREAKPOINT_INSTRUCTION 0xcc
8403 #define RELATIVEJUMP_INSTRUCTION 0xe9
8404 #define MAX_INSN_SIZE 16
8405 -#define MAX_STACK_SIZE 64
8406 -#define MIN_STACK_SIZE(ADDR) \
8407 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8408 - THREAD_SIZE - (unsigned long)(ADDR))) \
8409 - ? (MAX_STACK_SIZE) \
8410 - : (((unsigned long)current_thread_info()) + \
8411 - THREAD_SIZE - (unsigned long)(ADDR)))
8412 +#define MAX_STACK_SIZE 64UL
8413 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8414
8415 #define flush_insn_slot(p) do { } while (0)
8416
8417 diff -urNp linux-2.6.32.41/arch/x86/include/asm/kvm_host.h linux-2.6.32.41/arch/x86/include/asm/kvm_host.h
8418 --- linux-2.6.32.41/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:01.000000000 -0400
8419 +++ linux-2.6.32.41/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:26.000000000 -0400
8420 @@ -536,7 +536,7 @@ struct kvm_x86_ops {
8421 const struct trace_print_flags *exit_reasons_str;
8422 };
8423
8424 -extern struct kvm_x86_ops *kvm_x86_ops;
8425 +extern const struct kvm_x86_ops *kvm_x86_ops;
8426
8427 int kvm_mmu_module_init(void);
8428 void kvm_mmu_module_exit(void);
8429 diff -urNp linux-2.6.32.41/arch/x86/include/asm/local.h linux-2.6.32.41/arch/x86/include/asm/local.h
8430 --- linux-2.6.32.41/arch/x86/include/asm/local.h 2011-03-27 14:31:47.000000000 -0400
8431 +++ linux-2.6.32.41/arch/x86/include/asm/local.h 2011-04-17 15:56:46.000000000 -0400
8432 @@ -18,26 +18,58 @@ typedef struct {
8433
8434 static inline void local_inc(local_t *l)
8435 {
8436 - asm volatile(_ASM_INC "%0"
8437 + asm volatile(_ASM_INC "%0\n"
8438 +
8439 +#ifdef CONFIG_PAX_REFCOUNT
8440 + "jno 0f\n"
8441 + _ASM_DEC "%0\n"
8442 + "int $4\n0:\n"
8443 + _ASM_EXTABLE(0b, 0b)
8444 +#endif
8445 +
8446 : "+m" (l->a.counter));
8447 }
8448
8449 static inline void local_dec(local_t *l)
8450 {
8451 - asm volatile(_ASM_DEC "%0"
8452 + asm volatile(_ASM_DEC "%0\n"
8453 +
8454 +#ifdef CONFIG_PAX_REFCOUNT
8455 + "jno 0f\n"
8456 + _ASM_INC "%0\n"
8457 + "int $4\n0:\n"
8458 + _ASM_EXTABLE(0b, 0b)
8459 +#endif
8460 +
8461 : "+m" (l->a.counter));
8462 }
8463
8464 static inline void local_add(long i, local_t *l)
8465 {
8466 - asm volatile(_ASM_ADD "%1,%0"
8467 + asm volatile(_ASM_ADD "%1,%0\n"
8468 +
8469 +#ifdef CONFIG_PAX_REFCOUNT
8470 + "jno 0f\n"
8471 + _ASM_SUB "%1,%0\n"
8472 + "int $4\n0:\n"
8473 + _ASM_EXTABLE(0b, 0b)
8474 +#endif
8475 +
8476 : "+m" (l->a.counter)
8477 : "ir" (i));
8478 }
8479
8480 static inline void local_sub(long i, local_t *l)
8481 {
8482 - asm volatile(_ASM_SUB "%1,%0"
8483 + asm volatile(_ASM_SUB "%1,%0\n"
8484 +
8485 +#ifdef CONFIG_PAX_REFCOUNT
8486 + "jno 0f\n"
8487 + _ASM_ADD "%1,%0\n"
8488 + "int $4\n0:\n"
8489 + _ASM_EXTABLE(0b, 0b)
8490 +#endif
8491 +
8492 : "+m" (l->a.counter)
8493 : "ir" (i));
8494 }
8495 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
8496 {
8497 unsigned char c;
8498
8499 - asm volatile(_ASM_SUB "%2,%0; sete %1"
8500 + asm volatile(_ASM_SUB "%2,%0\n"
8501 +
8502 +#ifdef CONFIG_PAX_REFCOUNT
8503 + "jno 0f\n"
8504 + _ASM_ADD "%2,%0\n"
8505 + "int $4\n0:\n"
8506 + _ASM_EXTABLE(0b, 0b)
8507 +#endif
8508 +
8509 + "sete %1\n"
8510 : "+m" (l->a.counter), "=qm" (c)
8511 : "ir" (i) : "memory");
8512 return c;
8513 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
8514 {
8515 unsigned char c;
8516
8517 - asm volatile(_ASM_DEC "%0; sete %1"
8518 + asm volatile(_ASM_DEC "%0\n"
8519 +
8520 +#ifdef CONFIG_PAX_REFCOUNT
8521 + "jno 0f\n"
8522 + _ASM_INC "%0\n"
8523 + "int $4\n0:\n"
8524 + _ASM_EXTABLE(0b, 0b)
8525 +#endif
8526 +
8527 + "sete %1\n"
8528 : "+m" (l->a.counter), "=qm" (c)
8529 : : "memory");
8530 return c != 0;
8531 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
8532 {
8533 unsigned char c;
8534
8535 - asm volatile(_ASM_INC "%0; sete %1"
8536 + asm volatile(_ASM_INC "%0\n"
8537 +
8538 +#ifdef CONFIG_PAX_REFCOUNT
8539 + "jno 0f\n"
8540 + _ASM_DEC "%0\n"
8541 + "int $4\n0:\n"
8542 + _ASM_EXTABLE(0b, 0b)
8543 +#endif
8544 +
8545 + "sete %1\n"
8546 : "+m" (l->a.counter), "=qm" (c)
8547 : : "memory");
8548 return c != 0;
8549 @@ -110,7 +169,16 @@ static inline int local_add_negative(lon
8550 {
8551 unsigned char c;
8552
8553 - asm volatile(_ASM_ADD "%2,%0; sets %1"
8554 + asm volatile(_ASM_ADD "%2,%0\n"
8555 +
8556 +#ifdef CONFIG_PAX_REFCOUNT
8557 + "jno 0f\n"
8558 + _ASM_SUB "%2,%0\n"
8559 + "int $4\n0:\n"
8560 + _ASM_EXTABLE(0b, 0b)
8561 +#endif
8562 +
8563 + "sets %1\n"
8564 : "+m" (l->a.counter), "=qm" (c)
8565 : "ir" (i) : "memory");
8566 return c;
8567 @@ -133,7 +201,15 @@ static inline long local_add_return(long
8568 #endif
8569 /* Modern 486+ processor */
8570 __i = i;
8571 - asm volatile(_ASM_XADD "%0, %1;"
8572 + asm volatile(_ASM_XADD "%0, %1\n"
8573 +
8574 +#ifdef CONFIG_PAX_REFCOUNT
8575 + "jno 0f\n"
8576 + _ASM_MOV "%0,%1\n"
8577 + "int $4\n0:\n"
8578 + _ASM_EXTABLE(0b, 0b)
8579 +#endif
8580 +
8581 : "+r" (i), "+m" (l->a.counter)
8582 : : "memory");
8583 return i + __i;
8584 diff -urNp linux-2.6.32.41/arch/x86/include/asm/microcode.h linux-2.6.32.41/arch/x86/include/asm/microcode.h
8585 --- linux-2.6.32.41/arch/x86/include/asm/microcode.h 2011-03-27 14:31:47.000000000 -0400
8586 +++ linux-2.6.32.41/arch/x86/include/asm/microcode.h 2011-04-17 15:56:46.000000000 -0400
8587 @@ -12,13 +12,13 @@ struct device;
8588 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
8589
8590 struct microcode_ops {
8591 - enum ucode_state (*request_microcode_user) (int cpu,
8592 + enum ucode_state (* const request_microcode_user) (int cpu,
8593 const void __user *buf, size_t size);
8594
8595 - enum ucode_state (*request_microcode_fw) (int cpu,
8596 + enum ucode_state (* const request_microcode_fw) (int cpu,
8597 struct device *device);
8598
8599 - void (*microcode_fini_cpu) (int cpu);
8600 + void (* const microcode_fini_cpu) (int cpu);
8601
8602 /*
8603 * The generic 'microcode_core' part guarantees that
8604 @@ -38,18 +38,18 @@ struct ucode_cpu_info {
8605 extern struct ucode_cpu_info ucode_cpu_info[];
8606
8607 #ifdef CONFIG_MICROCODE_INTEL
8608 -extern struct microcode_ops * __init init_intel_microcode(void);
8609 +extern const struct microcode_ops * __init init_intel_microcode(void);
8610 #else
8611 -static inline struct microcode_ops * __init init_intel_microcode(void)
8612 +static inline const struct microcode_ops * __init init_intel_microcode(void)
8613 {
8614 return NULL;
8615 }
8616 #endif /* CONFIG_MICROCODE_INTEL */
8617
8618 #ifdef CONFIG_MICROCODE_AMD
8619 -extern struct microcode_ops * __init init_amd_microcode(void);
8620 +extern const struct microcode_ops * __init init_amd_microcode(void);
8621 #else
8622 -static inline struct microcode_ops * __init init_amd_microcode(void)
8623 +static inline const struct microcode_ops * __init init_amd_microcode(void)
8624 {
8625 return NULL;
8626 }
8627 diff -urNp linux-2.6.32.41/arch/x86/include/asm/mman.h linux-2.6.32.41/arch/x86/include/asm/mman.h
8628 --- linux-2.6.32.41/arch/x86/include/asm/mman.h 2011-03-27 14:31:47.000000000 -0400
8629 +++ linux-2.6.32.41/arch/x86/include/asm/mman.h 2011-04-17 15:56:46.000000000 -0400
8630 @@ -5,4 +5,14 @@
8631
8632 #include <asm-generic/mman.h>
8633
8634 +#ifdef __KERNEL__
8635 +#ifndef __ASSEMBLY__
8636 +#ifdef CONFIG_X86_32
8637 +#define arch_mmap_check i386_mmap_check
8638 +int i386_mmap_check(unsigned long addr, unsigned long len,
8639 + unsigned long flags);
8640 +#endif
8641 +#endif
8642 +#endif
8643 +
8644 #endif /* _ASM_X86_MMAN_H */
8645 diff -urNp linux-2.6.32.41/arch/x86/include/asm/mmu_context.h linux-2.6.32.41/arch/x86/include/asm/mmu_context.h
8646 --- linux-2.6.32.41/arch/x86/include/asm/mmu_context.h 2011-03-27 14:31:47.000000000 -0400
8647 +++ linux-2.6.32.41/arch/x86/include/asm/mmu_context.h 2011-04-17 15:56:46.000000000 -0400
8648 @@ -24,6 +24,21 @@ void destroy_context(struct mm_struct *m
8649
8650 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
8651 {
8652 +
8653 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8654 + unsigned int i;
8655 + pgd_t *pgd;
8656 +
8657 + pax_open_kernel();
8658 + pgd = get_cpu_pgd(smp_processor_id());
8659 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
8660 + if (paravirt_enabled())
8661 + set_pgd(pgd+i, native_make_pgd(0));
8662 + else
8663 + pgd[i] = native_make_pgd(0);
8664 + pax_close_kernel();
8665 +#endif
8666 +
8667 #ifdef CONFIG_SMP
8668 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
8669 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
8670 @@ -34,16 +49,30 @@ static inline void switch_mm(struct mm_s
8671 struct task_struct *tsk)
8672 {
8673 unsigned cpu = smp_processor_id();
8674 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
8675 + int tlbstate = TLBSTATE_OK;
8676 +#endif
8677
8678 if (likely(prev != next)) {
8679 #ifdef CONFIG_SMP
8680 +#ifdef CONFIG_X86_32
8681 + tlbstate = percpu_read(cpu_tlbstate.state);
8682 +#endif
8683 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8684 percpu_write(cpu_tlbstate.active_mm, next);
8685 #endif
8686 cpumask_set_cpu(cpu, mm_cpumask(next));
8687
8688 /* Re-load page tables */
8689 +#ifdef CONFIG_PAX_PER_CPU_PGD
8690 + pax_open_kernel();
8691 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
8692 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
8693 + pax_close_kernel();
8694 + load_cr3(get_cpu_pgd(cpu));
8695 +#else
8696 load_cr3(next->pgd);
8697 +#endif
8698
8699 /* stop flush ipis for the previous mm */
8700 cpumask_clear_cpu(cpu, mm_cpumask(prev));
8701 @@ -53,9 +82,38 @@ static inline void switch_mm(struct mm_s
8702 */
8703 if (unlikely(prev->context.ldt != next->context.ldt))
8704 load_LDT_nolock(&next->context);
8705 - }
8706 +
8707 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8708 + if (!nx_enabled) {
8709 + smp_mb__before_clear_bit();
8710 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
8711 + smp_mb__after_clear_bit();
8712 + cpu_set(cpu, next->context.cpu_user_cs_mask);
8713 + }
8714 +#endif
8715 +
8716 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8717 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
8718 + prev->context.user_cs_limit != next->context.user_cs_limit))
8719 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8720 #ifdef CONFIG_SMP
8721 + else if (unlikely(tlbstate != TLBSTATE_OK))
8722 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8723 +#endif
8724 +#endif
8725 +
8726 + }
8727 else {
8728 +
8729 +#ifdef CONFIG_PAX_PER_CPU_PGD
8730 + pax_open_kernel();
8731 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
8732 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
8733 + pax_close_kernel();
8734 + load_cr3(get_cpu_pgd(cpu));
8735 +#endif
8736 +
8737 +#ifdef CONFIG_SMP
8738 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8739 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
8740
8741 @@ -64,11 +122,28 @@ static inline void switch_mm(struct mm_s
8742 * tlb flush IPI delivery. We must reload CR3
8743 * to make sure to use no freed page tables.
8744 */
8745 +
8746 +#ifndef CONFIG_PAX_PER_CPU_PGD
8747 load_cr3(next->pgd);
8748 +#endif
8749 +
8750 load_LDT_nolock(&next->context);
8751 +
8752 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
8753 + if (!nx_enabled)
8754 + cpu_set(cpu, next->context.cpu_user_cs_mask);
8755 +#endif
8756 +
8757 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8758 +#ifdef CONFIG_PAX_PAGEEXEC
8759 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
8760 +#endif
8761 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8762 +#endif
8763 +
8764 }
8765 - }
8766 #endif
8767 + }
8768 }
8769
8770 #define activate_mm(prev, next) \
8771 diff -urNp linux-2.6.32.41/arch/x86/include/asm/mmu.h linux-2.6.32.41/arch/x86/include/asm/mmu.h
8772 --- linux-2.6.32.41/arch/x86/include/asm/mmu.h 2011-03-27 14:31:47.000000000 -0400
8773 +++ linux-2.6.32.41/arch/x86/include/asm/mmu.h 2011-04-17 15:56:46.000000000 -0400
8774 @@ -9,10 +9,23 @@
8775 * we put the segment information here.
8776 */
8777 typedef struct {
8778 - void *ldt;
8779 + struct desc_struct *ldt;
8780 int size;
8781 struct mutex lock;
8782 - void *vdso;
8783 + unsigned long vdso;
8784 +
8785 +#ifdef CONFIG_X86_32
8786 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
8787 + unsigned long user_cs_base;
8788 + unsigned long user_cs_limit;
8789 +
8790 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8791 + cpumask_t cpu_user_cs_mask;
8792 +#endif
8793 +
8794 +#endif
8795 +#endif
8796 +
8797 } mm_context_t;
8798
8799 #ifdef CONFIG_SMP
8800 diff -urNp linux-2.6.32.41/arch/x86/include/asm/module.h linux-2.6.32.41/arch/x86/include/asm/module.h
8801 --- linux-2.6.32.41/arch/x86/include/asm/module.h 2011-03-27 14:31:47.000000000 -0400
8802 +++ linux-2.6.32.41/arch/x86/include/asm/module.h 2011-04-23 13:18:57.000000000 -0400
8803 @@ -5,6 +5,7 @@
8804
8805 #ifdef CONFIG_X86_64
8806 /* X86_64 does not define MODULE_PROC_FAMILY */
8807 +#define MODULE_PROC_FAMILY ""
8808 #elif defined CONFIG_M386
8809 #define MODULE_PROC_FAMILY "386 "
8810 #elif defined CONFIG_M486
8811 @@ -59,13 +60,36 @@
8812 #error unknown processor family
8813 #endif
8814
8815 -#ifdef CONFIG_X86_32
8816 -# ifdef CONFIG_4KSTACKS
8817 -# define MODULE_STACKSIZE "4KSTACKS "
8818 -# else
8819 -# define MODULE_STACKSIZE ""
8820 -# endif
8821 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
8822 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8823 +#define MODULE_PAX_UDEREF "UDEREF "
8824 +#else
8825 +#define MODULE_PAX_UDEREF ""
8826 +#endif
8827 +
8828 +#ifdef CONFIG_PAX_KERNEXEC
8829 +#define MODULE_PAX_KERNEXEC "KERNEXEC "
8830 +#else
8831 +#define MODULE_PAX_KERNEXEC ""
8832 +#endif
8833 +
8834 +#ifdef CONFIG_PAX_REFCOUNT
8835 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
8836 +#else
8837 +#define MODULE_PAX_REFCOUNT ""
8838 #endif
8839
8840 +#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
8841 +#define MODULE_STACKSIZE "4KSTACKS "
8842 +#else
8843 +#define MODULE_STACKSIZE ""
8844 +#endif
8845 +
8846 +#ifdef CONFIG_GRKERNSEC
8847 +#define MODULE_GRSEC "GRSECURITY "
8848 +#else
8849 +#define MODULE_GRSEC ""
8850 +#endif
8851 +
8852 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
8853 +
8854 #endif /* _ASM_X86_MODULE_H */
8855 diff -urNp linux-2.6.32.41/arch/x86/include/asm/page_64_types.h linux-2.6.32.41/arch/x86/include/asm/page_64_types.h
8856 --- linux-2.6.32.41/arch/x86/include/asm/page_64_types.h 2011-03-27 14:31:47.000000000 -0400
8857 +++ linux-2.6.32.41/arch/x86/include/asm/page_64_types.h 2011-04-17 15:56:46.000000000 -0400
8858 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
8859
8860 /* duplicated to the one in bootmem.h */
8861 extern unsigned long max_pfn;
8862 -extern unsigned long phys_base;
8863 +extern const unsigned long phys_base;
8864
8865 extern unsigned long __phys_addr(unsigned long);
8866 #define __phys_reloc_hide(x) (x)
8867 diff -urNp linux-2.6.32.41/arch/x86/include/asm/paravirt.h linux-2.6.32.41/arch/x86/include/asm/paravirt.h
8868 --- linux-2.6.32.41/arch/x86/include/asm/paravirt.h 2011-03-27 14:31:47.000000000 -0400
8869 +++ linux-2.6.32.41/arch/x86/include/asm/paravirt.h 2011-04-17 15:56:46.000000000 -0400
8870 @@ -729,6 +729,21 @@ static inline void __set_fixmap(unsigned
8871 pv_mmu_ops.set_fixmap(idx, phys, flags);
8872 }
8873
8874 +#ifdef CONFIG_PAX_KERNEXEC
8875 +static inline unsigned long pax_open_kernel(void)
8876 +{
8877 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
8878 +}
8879 +
8880 +static inline unsigned long pax_close_kernel(void)
8881 +{
8882 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
8883 +}
8884 +#else
8885 +static inline unsigned long pax_open_kernel(void) { return 0; }
8886 +static inline unsigned long pax_close_kernel(void) { return 0; }
8887 +#endif
8888 +
8889 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
8890
8891 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
8892 @@ -945,7 +960,7 @@ extern void default_banner(void);
8893
8894 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
8895 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
8896 -#define PARA_INDIRECT(addr) *%cs:addr
8897 +#define PARA_INDIRECT(addr) *%ss:addr
8898 #endif
8899
8900 #define INTERRUPT_RETURN \
8901 @@ -1022,6 +1037,21 @@ extern void default_banner(void);
8902 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
8903 CLBR_NONE, \
8904 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
8905 +
8906 +#define GET_CR0_INTO_RDI \
8907 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
8908 + mov %rax,%rdi
8909 +
8910 +#define SET_RDI_INTO_CR0 \
8911 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
8912 +
8913 +#define GET_CR3_INTO_RDI \
8914 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
8915 + mov %rax,%rdi
8916 +
8917 +#define SET_RDI_INTO_CR3 \
8918 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
8919 +
8920 #endif /* CONFIG_X86_32 */
8921
8922 #endif /* __ASSEMBLY__ */
8923 diff -urNp linux-2.6.32.41/arch/x86/include/asm/paravirt_types.h linux-2.6.32.41/arch/x86/include/asm/paravirt_types.h
8924 --- linux-2.6.32.41/arch/x86/include/asm/paravirt_types.h 2011-03-27 14:31:47.000000000 -0400
8925 +++ linux-2.6.32.41/arch/x86/include/asm/paravirt_types.h 2011-04-17 15:56:46.000000000 -0400
8926 @@ -316,6 +316,12 @@ struct pv_mmu_ops {
8927 an mfn. We can tell which is which from the index. */
8928 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
8929 phys_addr_t phys, pgprot_t flags);
8930 +
8931 +#ifdef CONFIG_PAX_KERNEXEC
8932 + unsigned long (*pax_open_kernel)(void);
8933 + unsigned long (*pax_close_kernel)(void);
8934 +#endif
8935 +
8936 };
8937
8938 struct raw_spinlock;
8939 diff -urNp linux-2.6.32.41/arch/x86/include/asm/pci_x86.h linux-2.6.32.41/arch/x86/include/asm/pci_x86.h
8940 --- linux-2.6.32.41/arch/x86/include/asm/pci_x86.h 2011-03-27 14:31:47.000000000 -0400
8941 +++ linux-2.6.32.41/arch/x86/include/asm/pci_x86.h 2011-04-17 15:56:46.000000000 -0400
8942 @@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct
8943 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
8944
8945 struct pci_raw_ops {
8946 - int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
8947 + int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
8948 int reg, int len, u32 *val);
8949 - int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
8950 + int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
8951 int reg, int len, u32 val);
8952 };
8953
8954 -extern struct pci_raw_ops *raw_pci_ops;
8955 -extern struct pci_raw_ops *raw_pci_ext_ops;
8956 +extern const struct pci_raw_ops *raw_pci_ops;
8957 +extern const struct pci_raw_ops *raw_pci_ext_ops;
8958
8959 -extern struct pci_raw_ops pci_direct_conf1;
8960 +extern const struct pci_raw_ops pci_direct_conf1;
8961 extern bool port_cf9_safe;
8962
8963 /* arch_initcall level */
8964 diff -urNp linux-2.6.32.41/arch/x86/include/asm/pgalloc.h linux-2.6.32.41/arch/x86/include/asm/pgalloc.h
8965 --- linux-2.6.32.41/arch/x86/include/asm/pgalloc.h 2011-03-27 14:31:47.000000000 -0400
8966 +++ linux-2.6.32.41/arch/x86/include/asm/pgalloc.h 2011-04-17 15:56:46.000000000 -0400
8967 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
8968 pmd_t *pmd, pte_t *pte)
8969 {
8970 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
8971 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
8972 +}
8973 +
8974 +static inline void pmd_populate_user(struct mm_struct *mm,
8975 + pmd_t *pmd, pte_t *pte)
8976 +{
8977 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
8978 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
8979 }
8980
8981 diff -urNp linux-2.6.32.41/arch/x86/include/asm/pgtable-2level.h linux-2.6.32.41/arch/x86/include/asm/pgtable-2level.h
8982 --- linux-2.6.32.41/arch/x86/include/asm/pgtable-2level.h 2011-03-27 14:31:47.000000000 -0400
8983 +++ linux-2.6.32.41/arch/x86/include/asm/pgtable-2level.h 2011-04-17 15:56:46.000000000 -0400
8984 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
8985
8986 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8987 {
8988 + pax_open_kernel();
8989 *pmdp = pmd;
8990 + pax_close_kernel();
8991 }
8992
8993 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
8994 diff -urNp linux-2.6.32.41/arch/x86/include/asm/pgtable_32.h linux-2.6.32.41/arch/x86/include/asm/pgtable_32.h
8995 --- linux-2.6.32.41/arch/x86/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
8996 +++ linux-2.6.32.41/arch/x86/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
8997 @@ -26,9 +26,6 @@
8998 struct mm_struct;
8999 struct vm_area_struct;
9000
9001 -extern pgd_t swapper_pg_dir[1024];
9002 -extern pgd_t trampoline_pg_dir[1024];
9003 -
9004 static inline void pgtable_cache_init(void) { }
9005 static inline void check_pgt_cache(void) { }
9006 void paging_init(void);
9007 @@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, u
9008 # include <asm/pgtable-2level.h>
9009 #endif
9010
9011 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9012 +extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
9013 +#ifdef CONFIG_X86_PAE
9014 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9015 +#endif
9016 +
9017 #if defined(CONFIG_HIGHPTE)
9018 #define __KM_PTE \
9019 (in_nmi() ? KM_NMI_PTE : \
9020 @@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, u
9021 /* Clear a kernel PTE and flush it from the TLB */
9022 #define kpte_clear_flush(ptep, vaddr) \
9023 do { \
9024 + pax_open_kernel(); \
9025 pte_clear(&init_mm, (vaddr), (ptep)); \
9026 + pax_close_kernel(); \
9027 __flush_tlb_one((vaddr)); \
9028 } while (0)
9029
9030 @@ -85,6 +90,9 @@ do { \
9031
9032 #endif /* !__ASSEMBLY__ */
9033
9034 +#define HAVE_ARCH_UNMAPPED_AREA
9035 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9036 +
9037 /*
9038 * kern_addr_valid() is (1) for FLATMEM and (0) for
9039 * SPARSEMEM and DISCONTIGMEM
9040 diff -urNp linux-2.6.32.41/arch/x86/include/asm/pgtable_32_types.h linux-2.6.32.41/arch/x86/include/asm/pgtable_32_types.h
9041 --- linux-2.6.32.41/arch/x86/include/asm/pgtable_32_types.h 2011-03-27 14:31:47.000000000 -0400
9042 +++ linux-2.6.32.41/arch/x86/include/asm/pgtable_32_types.h 2011-04-17 15:56:46.000000000 -0400
9043 @@ -8,7 +8,7 @@
9044 */
9045 #ifdef CONFIG_X86_PAE
9046 # include <asm/pgtable-3level_types.h>
9047 -# define PMD_SIZE (1UL << PMD_SHIFT)
9048 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9049 # define PMD_MASK (~(PMD_SIZE - 1))
9050 #else
9051 # include <asm/pgtable-2level_types.h>
9052 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
9053 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9054 #endif
9055
9056 +#ifdef CONFIG_PAX_KERNEXEC
9057 +#ifndef __ASSEMBLY__
9058 +extern unsigned char MODULES_EXEC_VADDR[];
9059 +extern unsigned char MODULES_EXEC_END[];
9060 +#endif
9061 +#include <asm/boot.h>
9062 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9063 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9064 +#else
9065 +#define ktla_ktva(addr) (addr)
9066 +#define ktva_ktla(addr) (addr)
9067 +#endif
9068 +
9069 #define MODULES_VADDR VMALLOC_START
9070 #define MODULES_END VMALLOC_END
9071 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9072 diff -urNp linux-2.6.32.41/arch/x86/include/asm/pgtable-3level.h linux-2.6.32.41/arch/x86/include/asm/pgtable-3level.h
9073 --- linux-2.6.32.41/arch/x86/include/asm/pgtable-3level.h 2011-03-27 14:31:47.000000000 -0400
9074 +++ linux-2.6.32.41/arch/x86/include/asm/pgtable-3level.h 2011-04-17 15:56:46.000000000 -0400
9075 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
9076
9077 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9078 {
9079 + pax_open_kernel();
9080 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9081 + pax_close_kernel();
9082 }
9083
9084 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9085 {
9086 + pax_open_kernel();
9087 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9088 + pax_close_kernel();
9089 }
9090
9091 /*
9092 diff -urNp linux-2.6.32.41/arch/x86/include/asm/pgtable_64.h linux-2.6.32.41/arch/x86/include/asm/pgtable_64.h
9093 --- linux-2.6.32.41/arch/x86/include/asm/pgtable_64.h 2011-03-27 14:31:47.000000000 -0400
9094 +++ linux-2.6.32.41/arch/x86/include/asm/pgtable_64.h 2011-04-17 15:56:46.000000000 -0400
9095 @@ -16,10 +16,13 @@
9096
9097 extern pud_t level3_kernel_pgt[512];
9098 extern pud_t level3_ident_pgt[512];
9099 +extern pud_t level3_vmalloc_pgt[512];
9100 +extern pud_t level3_vmemmap_pgt[512];
9101 +extern pud_t level2_vmemmap_pgt[512];
9102 extern pmd_t level2_kernel_pgt[512];
9103 extern pmd_t level2_fixmap_pgt[512];
9104 -extern pmd_t level2_ident_pgt[512];
9105 -extern pgd_t init_level4_pgt[];
9106 +extern pmd_t level2_ident_pgt[512*2];
9107 +extern pgd_t init_level4_pgt[512];
9108
9109 #define swapper_pg_dir init_level4_pgt
9110
9111 @@ -74,7 +77,9 @@ static inline pte_t native_ptep_get_and_
9112
9113 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9114 {
9115 + pax_open_kernel();
9116 *pmdp = pmd;
9117 + pax_close_kernel();
9118 }
9119
9120 static inline void native_pmd_clear(pmd_t *pmd)
9121 @@ -94,7 +99,9 @@ static inline void native_pud_clear(pud_
9122
9123 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9124 {
9125 + pax_open_kernel();
9126 *pgdp = pgd;
9127 + pax_close_kernel();
9128 }
9129
9130 static inline void native_pgd_clear(pgd_t *pgd)
9131 diff -urNp linux-2.6.32.41/arch/x86/include/asm/pgtable_64_types.h linux-2.6.32.41/arch/x86/include/asm/pgtable_64_types.h
9132 --- linux-2.6.32.41/arch/x86/include/asm/pgtable_64_types.h 2011-03-27 14:31:47.000000000 -0400
9133 +++ linux-2.6.32.41/arch/x86/include/asm/pgtable_64_types.h 2011-04-17 15:56:46.000000000 -0400
9134 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9135 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9136 #define MODULES_END _AC(0xffffffffff000000, UL)
9137 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9138 +#define MODULES_EXEC_VADDR MODULES_VADDR
9139 +#define MODULES_EXEC_END MODULES_END
9140 +
9141 +#define ktla_ktva(addr) (addr)
9142 +#define ktva_ktla(addr) (addr)
9143
9144 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9145 diff -urNp linux-2.6.32.41/arch/x86/include/asm/pgtable.h linux-2.6.32.41/arch/x86/include/asm/pgtable.h
9146 --- linux-2.6.32.41/arch/x86/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
9147 +++ linux-2.6.32.41/arch/x86/include/asm/pgtable.h 2011-04-17 15:56:46.000000000 -0400
9148 @@ -74,12 +74,51 @@ extern struct list_head pgd_list;
9149
9150 #define arch_end_context_switch(prev) do {} while(0)
9151
9152 +#define pax_open_kernel() native_pax_open_kernel()
9153 +#define pax_close_kernel() native_pax_close_kernel()
9154 #endif /* CONFIG_PARAVIRT */
9155
9156 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
9157 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9158 +
9159 +#ifdef CONFIG_PAX_KERNEXEC
9160 +static inline unsigned long native_pax_open_kernel(void)
9161 +{
9162 + unsigned long cr0;
9163 +
9164 + preempt_disable();
9165 + barrier();
9166 + cr0 = read_cr0() ^ X86_CR0_WP;
9167 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
9168 + write_cr0(cr0);
9169 + return cr0 ^ X86_CR0_WP;
9170 +}
9171 +
9172 +static inline unsigned long native_pax_close_kernel(void)
9173 +{
9174 + unsigned long cr0;
9175 +
9176 + cr0 = read_cr0() ^ X86_CR0_WP;
9177 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9178 + write_cr0(cr0);
9179 + barrier();
9180 + preempt_enable_no_resched();
9181 + return cr0 ^ X86_CR0_WP;
9182 +}
9183 +#else
9184 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
9185 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
9186 +#endif
9187 +
9188 /*
9189 * The following only work if pte_present() is true.
9190 * Undefined behaviour if not..
9191 */
9192 +static inline int pte_user(pte_t pte)
9193 +{
9194 + return pte_val(pte) & _PAGE_USER;
9195 +}
9196 +
9197 static inline int pte_dirty(pte_t pte)
9198 {
9199 return pte_flags(pte) & _PAGE_DIRTY;
9200 @@ -167,9 +206,29 @@ static inline pte_t pte_wrprotect(pte_t
9201 return pte_clear_flags(pte, _PAGE_RW);
9202 }
9203
9204 +static inline pte_t pte_mkread(pte_t pte)
9205 +{
9206 + return __pte(pte_val(pte) | _PAGE_USER);
9207 +}
9208 +
9209 static inline pte_t pte_mkexec(pte_t pte)
9210 {
9211 - return pte_clear_flags(pte, _PAGE_NX);
9212 +#ifdef CONFIG_X86_PAE
9213 + if (__supported_pte_mask & _PAGE_NX)
9214 + return pte_clear_flags(pte, _PAGE_NX);
9215 + else
9216 +#endif
9217 + return pte_set_flags(pte, _PAGE_USER);
9218 +}
9219 +
9220 +static inline pte_t pte_exprotect(pte_t pte)
9221 +{
9222 +#ifdef CONFIG_X86_PAE
9223 + if (__supported_pte_mask & _PAGE_NX)
9224 + return pte_set_flags(pte, _PAGE_NX);
9225 + else
9226 +#endif
9227 + return pte_clear_flags(pte, _PAGE_USER);
9228 }
9229
9230 static inline pte_t pte_mkdirty(pte_t pte)
9231 @@ -302,6 +361,15 @@ pte_t *populate_extra_pte(unsigned long
9232 #endif
9233
9234 #ifndef __ASSEMBLY__
9235 +
9236 +#ifdef CONFIG_PAX_PER_CPU_PGD
9237 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9238 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9239 +{
9240 + return cpu_pgd[cpu];
9241 +}
9242 +#endif
9243 +
9244 #include <linux/mm_types.h>
9245
9246 static inline int pte_none(pte_t pte)
9247 @@ -472,7 +540,7 @@ static inline pud_t *pud_offset(pgd_t *p
9248
9249 static inline int pgd_bad(pgd_t pgd)
9250 {
9251 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9252 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9253 }
9254
9255 static inline int pgd_none(pgd_t pgd)
9256 @@ -495,7 +563,12 @@ static inline int pgd_none(pgd_t pgd)
9257 * pgd_offset() returns a (pgd_t *)
9258 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9259 */
9260 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9261 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9262 +
9263 +#ifdef CONFIG_PAX_PER_CPU_PGD
9264 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9265 +#endif
9266 +
9267 /*
9268 * a shortcut which implies the use of the kernel's pgd, instead
9269 * of a process's
9270 @@ -506,6 +579,20 @@ static inline int pgd_none(pgd_t pgd)
9271 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9272 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9273
9274 +#ifdef CONFIG_X86_32
9275 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9276 +#else
9277 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9278 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9279 +
9280 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9281 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9282 +#else
9283 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9284 +#endif
9285 +
9286 +#endif
9287 +
9288 #ifndef __ASSEMBLY__
9289
9290 extern int direct_gbpages;
9291 @@ -611,11 +698,23 @@ static inline void ptep_set_wrprotect(st
9292 * dst and src can be on the same page, but the range must not overlap,
9293 * and must not cross a page boundary.
9294 */
9295 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9296 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9297 {
9298 - memcpy(dst, src, count * sizeof(pgd_t));
9299 + pax_open_kernel();
9300 + while (count--)
9301 + *dst++ = *src++;
9302 + pax_close_kernel();
9303 }
9304
9305 +#ifdef CONFIG_PAX_PER_CPU_PGD
9306 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9307 +#endif
9308 +
9309 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9310 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9311 +#else
9312 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9313 +#endif
9314
9315 #include <asm-generic/pgtable.h>
9316 #endif /* __ASSEMBLY__ */
9317 diff -urNp linux-2.6.32.41/arch/x86/include/asm/pgtable_types.h linux-2.6.32.41/arch/x86/include/asm/pgtable_types.h
9318 --- linux-2.6.32.41/arch/x86/include/asm/pgtable_types.h 2011-03-27 14:31:47.000000000 -0400
9319 +++ linux-2.6.32.41/arch/x86/include/asm/pgtable_types.h 2011-04-17 15:56:46.000000000 -0400
9320 @@ -16,12 +16,11 @@
9321 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9322 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9323 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9324 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9325 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9326 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9327 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9328 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9329 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9330 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9331 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9332 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9333
9334 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9335 @@ -39,7 +38,6 @@
9336 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9337 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9338 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9339 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9340 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9341 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9342 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9343 @@ -55,8 +53,10 @@
9344
9345 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9346 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9347 -#else
9348 +#elif defined(CONFIG_KMEMCHECK)
9349 #define _PAGE_NX (_AT(pteval_t, 0))
9350 +#else
9351 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9352 #endif
9353
9354 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9355 @@ -93,6 +93,9 @@
9356 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9357 _PAGE_ACCESSED)
9358
9359 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
9360 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
9361 +
9362 #define __PAGE_KERNEL_EXEC \
9363 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9364 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9365 @@ -103,8 +106,8 @@
9366 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9367 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9368 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9369 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9370 -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
9371 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9372 +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
9373 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9374 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
9375 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
9376 @@ -163,8 +166,8 @@
9377 * bits are combined, this will alow user to access the high address mapped
9378 * VDSO in the presence of CONFIG_COMPAT_VDSO
9379 */
9380 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9381 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9382 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9383 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9384 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9385 #endif
9386
9387 @@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t p
9388 {
9389 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9390 }
9391 +#endif
9392
9393 +#if PAGETABLE_LEVELS == 3
9394 +#include <asm-generic/pgtable-nopud.h>
9395 +#endif
9396 +
9397 +#if PAGETABLE_LEVELS == 2
9398 +#include <asm-generic/pgtable-nopmd.h>
9399 +#endif
9400 +
9401 +#ifndef __ASSEMBLY__
9402 #if PAGETABLE_LEVELS > 3
9403 typedef struct { pudval_t pud; } pud_t;
9404
9405 @@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pu
9406 return pud.pud;
9407 }
9408 #else
9409 -#include <asm-generic/pgtable-nopud.h>
9410 -
9411 static inline pudval_t native_pud_val(pud_t pud)
9412 {
9413 return native_pgd_val(pud.pgd);
9414 @@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pm
9415 return pmd.pmd;
9416 }
9417 #else
9418 -#include <asm-generic/pgtable-nopmd.h>
9419 -
9420 static inline pmdval_t native_pmd_val(pmd_t pmd)
9421 {
9422 return native_pgd_val(pmd.pud.pgd);
9423 @@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
9424
9425 extern pteval_t __supported_pte_mask;
9426 extern void set_nx(void);
9427 +
9428 +#ifdef CONFIG_X86_32
9429 +#ifdef CONFIG_X86_PAE
9430 extern int nx_enabled;
9431 +#else
9432 +#define nx_enabled (0)
9433 +#endif
9434 +#else
9435 +#define nx_enabled (1)
9436 +#endif
9437
9438 #define pgprot_writecombine pgprot_writecombine
9439 extern pgprot_t pgprot_writecombine(pgprot_t prot);
9440 diff -urNp linux-2.6.32.41/arch/x86/include/asm/processor.h linux-2.6.32.41/arch/x86/include/asm/processor.h
9441 --- linux-2.6.32.41/arch/x86/include/asm/processor.h 2011-04-22 19:16:29.000000000 -0400
9442 +++ linux-2.6.32.41/arch/x86/include/asm/processor.h 2011-05-11 18:25:15.000000000 -0400
9443 @@ -272,7 +272,7 @@ struct tss_struct {
9444
9445 } ____cacheline_aligned;
9446
9447 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
9448 +extern struct tss_struct init_tss[NR_CPUS];
9449
9450 /*
9451 * Save the original ist values for checking stack pointers during debugging
9452 @@ -888,11 +888,18 @@ static inline void spin_lock_prefetch(co
9453 */
9454 #define TASK_SIZE PAGE_OFFSET
9455 #define TASK_SIZE_MAX TASK_SIZE
9456 +
9457 +#ifdef CONFIG_PAX_SEGMEXEC
9458 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
9459 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
9460 +#else
9461 #define STACK_TOP TASK_SIZE
9462 -#define STACK_TOP_MAX STACK_TOP
9463 +#endif
9464 +
9465 +#define STACK_TOP_MAX TASK_SIZE
9466
9467 #define INIT_THREAD { \
9468 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
9469 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9470 .vm86_info = NULL, \
9471 .sysenter_cs = __KERNEL_CS, \
9472 .io_bitmap_ptr = NULL, \
9473 @@ -906,7 +913,7 @@ static inline void spin_lock_prefetch(co
9474 */
9475 #define INIT_TSS { \
9476 .x86_tss = { \
9477 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
9478 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9479 .ss0 = __KERNEL_DS, \
9480 .ss1 = __KERNEL_CS, \
9481 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
9482 @@ -917,11 +924,7 @@ static inline void spin_lock_prefetch(co
9483 extern unsigned long thread_saved_pc(struct task_struct *tsk);
9484
9485 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
9486 -#define KSTK_TOP(info) \
9487 -({ \
9488 - unsigned long *__ptr = (unsigned long *)(info); \
9489 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
9490 -})
9491 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
9492
9493 /*
9494 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
9495 @@ -936,7 +939,7 @@ extern unsigned long thread_saved_pc(str
9496 #define task_pt_regs(task) \
9497 ({ \
9498 struct pt_regs *__regs__; \
9499 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
9500 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
9501 __regs__ - 1; \
9502 })
9503
9504 @@ -946,13 +949,13 @@ extern unsigned long thread_saved_pc(str
9505 /*
9506 * User space process size. 47bits minus one guard page.
9507 */
9508 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
9509 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
9510
9511 /* This decides where the kernel will search for a free chunk of vm
9512 * space during mmap's.
9513 */
9514 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
9515 - 0xc0000000 : 0xFFFFe000)
9516 + 0xc0000000 : 0xFFFFf000)
9517
9518 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
9519 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
9520 @@ -963,11 +966,11 @@ extern unsigned long thread_saved_pc(str
9521 #define STACK_TOP_MAX TASK_SIZE_MAX
9522
9523 #define INIT_THREAD { \
9524 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9525 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9526 }
9527
9528 #define INIT_TSS { \
9529 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9530 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9531 }
9532
9533 /*
9534 @@ -989,6 +992,10 @@ extern void start_thread(struct pt_regs
9535 */
9536 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
9537
9538 +#ifdef CONFIG_PAX_SEGMEXEC
9539 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
9540 +#endif
9541 +
9542 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
9543
9544 /* Get/set a process' ability to use the timestamp counter instruction */
9545 diff -urNp linux-2.6.32.41/arch/x86/include/asm/ptrace.h linux-2.6.32.41/arch/x86/include/asm/ptrace.h
9546 --- linux-2.6.32.41/arch/x86/include/asm/ptrace.h 2011-03-27 14:31:47.000000000 -0400
9547 +++ linux-2.6.32.41/arch/x86/include/asm/ptrace.h 2011-04-17 15:56:46.000000000 -0400
9548 @@ -151,28 +151,29 @@ static inline unsigned long regs_return_
9549 }
9550
9551 /*
9552 - * user_mode_vm(regs) determines whether a register set came from user mode.
9553 + * user_mode(regs) determines whether a register set came from user mode.
9554 * This is true if V8086 mode was enabled OR if the register set was from
9555 * protected mode with RPL-3 CS value. This tricky test checks that with
9556 * one comparison. Many places in the kernel can bypass this full check
9557 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
9558 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
9559 + * be used.
9560 */
9561 -static inline int user_mode(struct pt_regs *regs)
9562 +static inline int user_mode_novm(struct pt_regs *regs)
9563 {
9564 #ifdef CONFIG_X86_32
9565 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
9566 #else
9567 - return !!(regs->cs & 3);
9568 + return !!(regs->cs & SEGMENT_RPL_MASK);
9569 #endif
9570 }
9571
9572 -static inline int user_mode_vm(struct pt_regs *regs)
9573 +static inline int user_mode(struct pt_regs *regs)
9574 {
9575 #ifdef CONFIG_X86_32
9576 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
9577 USER_RPL;
9578 #else
9579 - return user_mode(regs);
9580 + return user_mode_novm(regs);
9581 #endif
9582 }
9583
9584 diff -urNp linux-2.6.32.41/arch/x86/include/asm/reboot.h linux-2.6.32.41/arch/x86/include/asm/reboot.h
9585 --- linux-2.6.32.41/arch/x86/include/asm/reboot.h 2011-03-27 14:31:47.000000000 -0400
9586 +++ linux-2.6.32.41/arch/x86/include/asm/reboot.h 2011-05-22 23:02:03.000000000 -0400
9587 @@ -6,19 +6,19 @@
9588 struct pt_regs;
9589
9590 struct machine_ops {
9591 - void (*restart)(char *cmd);
9592 - void (*halt)(void);
9593 - void (*power_off)(void);
9594 + void (* __noreturn restart)(char *cmd);
9595 + void (* __noreturn halt)(void);
9596 + void (* __noreturn power_off)(void);
9597 void (*shutdown)(void);
9598 void (*crash_shutdown)(struct pt_regs *);
9599 - void (*emergency_restart)(void);
9600 + void (* __noreturn emergency_restart)(void);
9601 };
9602
9603 extern struct machine_ops machine_ops;
9604
9605 void native_machine_crash_shutdown(struct pt_regs *regs);
9606 void native_machine_shutdown(void);
9607 -void machine_real_restart(const unsigned char *code, int length);
9608 +void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
9609
9610 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
9611 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
9612 diff -urNp linux-2.6.32.41/arch/x86/include/asm/rwsem.h linux-2.6.32.41/arch/x86/include/asm/rwsem.h
9613 --- linux-2.6.32.41/arch/x86/include/asm/rwsem.h 2011-03-27 14:31:47.000000000 -0400
9614 +++ linux-2.6.32.41/arch/x86/include/asm/rwsem.h 2011-04-17 15:56:46.000000000 -0400
9615 @@ -118,6 +118,14 @@ static inline void __down_read(struct rw
9616 {
9617 asm volatile("# beginning down_read\n\t"
9618 LOCK_PREFIX _ASM_INC "(%1)\n\t"
9619 +
9620 +#ifdef CONFIG_PAX_REFCOUNT
9621 + "jno 0f\n"
9622 + LOCK_PREFIX _ASM_DEC "(%1)\n\t"
9623 + "int $4\n0:\n"
9624 + _ASM_EXTABLE(0b, 0b)
9625 +#endif
9626 +
9627 /* adds 0x00000001, returns the old value */
9628 " jns 1f\n"
9629 " call call_rwsem_down_read_failed\n"
9630 @@ -139,6 +147,14 @@ static inline int __down_read_trylock(st
9631 "1:\n\t"
9632 " mov %1,%2\n\t"
9633 " add %3,%2\n\t"
9634 +
9635 +#ifdef CONFIG_PAX_REFCOUNT
9636 + "jno 0f\n"
9637 + "sub %3,%2\n"
9638 + "int $4\n0:\n"
9639 + _ASM_EXTABLE(0b, 0b)
9640 +#endif
9641 +
9642 " jle 2f\n\t"
9643 LOCK_PREFIX " cmpxchg %2,%0\n\t"
9644 " jnz 1b\n\t"
9645 @@ -160,6 +176,14 @@ static inline void __down_write_nested(s
9646 tmp = RWSEM_ACTIVE_WRITE_BIAS;
9647 asm volatile("# beginning down_write\n\t"
9648 LOCK_PREFIX " xadd %1,(%2)\n\t"
9649 +
9650 +#ifdef CONFIG_PAX_REFCOUNT
9651 + "jno 0f\n"
9652 + "mov %1,(%2)\n"
9653 + "int $4\n0:\n"
9654 + _ASM_EXTABLE(0b, 0b)
9655 +#endif
9656 +
9657 /* subtract 0x0000ffff, returns the old value */
9658 " test %1,%1\n\t"
9659 /* was the count 0 before? */
9660 @@ -198,6 +222,14 @@ static inline void __up_read(struct rw_s
9661 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
9662 asm volatile("# beginning __up_read\n\t"
9663 LOCK_PREFIX " xadd %1,(%2)\n\t"
9664 +
9665 +#ifdef CONFIG_PAX_REFCOUNT
9666 + "jno 0f\n"
9667 + "mov %1,(%2)\n"
9668 + "int $4\n0:\n"
9669 + _ASM_EXTABLE(0b, 0b)
9670 +#endif
9671 +
9672 /* subtracts 1, returns the old value */
9673 " jns 1f\n\t"
9674 " call call_rwsem_wake\n"
9675 @@ -216,6 +248,14 @@ static inline void __up_write(struct rw_
9676 rwsem_count_t tmp;
9677 asm volatile("# beginning __up_write\n\t"
9678 LOCK_PREFIX " xadd %1,(%2)\n\t"
9679 +
9680 +#ifdef CONFIG_PAX_REFCOUNT
9681 + "jno 0f\n"
9682 + "mov %1,(%2)\n"
9683 + "int $4\n0:\n"
9684 + _ASM_EXTABLE(0b, 0b)
9685 +#endif
9686 +
9687 /* tries to transition
9688 0xffff0001 -> 0x00000000 */
9689 " jz 1f\n"
9690 @@ -234,6 +274,14 @@ static inline void __downgrade_write(str
9691 {
9692 asm volatile("# beginning __downgrade_write\n\t"
9693 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
9694 +
9695 +#ifdef CONFIG_PAX_REFCOUNT
9696 + "jno 0f\n"
9697 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
9698 + "int $4\n0:\n"
9699 + _ASM_EXTABLE(0b, 0b)
9700 +#endif
9701 +
9702 /*
9703 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
9704 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
9705 @@ -253,7 +301,15 @@ static inline void __downgrade_write(str
9706 static inline void rwsem_atomic_add(rwsem_count_t delta,
9707 struct rw_semaphore *sem)
9708 {
9709 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
9710 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
9711 +
9712 +#ifdef CONFIG_PAX_REFCOUNT
9713 + "jno 0f\n"
9714 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
9715 + "int $4\n0:\n"
9716 + _ASM_EXTABLE(0b, 0b)
9717 +#endif
9718 +
9719 : "+m" (sem->count)
9720 : "er" (delta));
9721 }
9722 @@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic
9723 {
9724 rwsem_count_t tmp = delta;
9725
9726 - asm volatile(LOCK_PREFIX "xadd %0,%1"
9727 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
9728 +
9729 +#ifdef CONFIG_PAX_REFCOUNT
9730 + "jno 0f\n"
9731 + "mov %0,%1\n"
9732 + "int $4\n0:\n"
9733 + _ASM_EXTABLE(0b, 0b)
9734 +#endif
9735 +
9736 : "+r" (tmp), "+m" (sem->count)
9737 : : "memory");
9738
9739 diff -urNp linux-2.6.32.41/arch/x86/include/asm/segment.h linux-2.6.32.41/arch/x86/include/asm/segment.h
9740 --- linux-2.6.32.41/arch/x86/include/asm/segment.h 2011-03-27 14:31:47.000000000 -0400
9741 +++ linux-2.6.32.41/arch/x86/include/asm/segment.h 2011-04-17 15:56:46.000000000 -0400
9742 @@ -62,8 +62,8 @@
9743 * 26 - ESPFIX small SS
9744 * 27 - per-cpu [ offset to per-cpu data area ]
9745 * 28 - stack_canary-20 [ for stack protector ]
9746 - * 29 - unused
9747 - * 30 - unused
9748 + * 29 - PCI BIOS CS
9749 + * 30 - PCI BIOS DS
9750 * 31 - TSS for double fault handler
9751 */
9752 #define GDT_ENTRY_TLS_MIN 6
9753 @@ -77,6 +77,8 @@
9754
9755 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
9756
9757 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
9758 +
9759 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
9760
9761 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
9762 @@ -88,7 +90,7 @@
9763 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
9764 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
9765
9766 -#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
9767 +#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
9768 #ifdef CONFIG_SMP
9769 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
9770 #else
9771 @@ -102,6 +104,12 @@
9772 #define __KERNEL_STACK_CANARY 0
9773 #endif
9774
9775 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
9776 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
9777 +
9778 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
9779 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
9780 +
9781 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
9782
9783 /*
9784 @@ -139,7 +147,7 @@
9785 */
9786
9787 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
9788 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
9789 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
9790
9791
9792 #else
9793 @@ -163,6 +171,8 @@
9794 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
9795 #define __USER32_DS __USER_DS
9796
9797 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
9798 +
9799 #define GDT_ENTRY_TSS 8 /* needs two entries */
9800 #define GDT_ENTRY_LDT 10 /* needs two entries */
9801 #define GDT_ENTRY_TLS_MIN 12
9802 @@ -183,6 +193,7 @@
9803 #endif
9804
9805 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
9806 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
9807 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
9808 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
9809 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
9810 diff -urNp linux-2.6.32.41/arch/x86/include/asm/smp.h linux-2.6.32.41/arch/x86/include/asm/smp.h
9811 --- linux-2.6.32.41/arch/x86/include/asm/smp.h 2011-03-27 14:31:47.000000000 -0400
9812 +++ linux-2.6.32.41/arch/x86/include/asm/smp.h 2011-04-17 15:56:46.000000000 -0400
9813 @@ -24,7 +24,7 @@ extern unsigned int num_processors;
9814 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
9815 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
9816 DECLARE_PER_CPU(u16, cpu_llc_id);
9817 -DECLARE_PER_CPU(int, cpu_number);
9818 +DECLARE_PER_CPU(unsigned int, cpu_number);
9819
9820 static inline struct cpumask *cpu_sibling_mask(int cpu)
9821 {
9822 @@ -175,14 +175,8 @@ extern unsigned disabled_cpus __cpuinitd
9823 extern int safe_smp_processor_id(void);
9824
9825 #elif defined(CONFIG_X86_64_SMP)
9826 -#define raw_smp_processor_id() (percpu_read(cpu_number))
9827 -
9828 -#define stack_smp_processor_id() \
9829 -({ \
9830 - struct thread_info *ti; \
9831 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
9832 - ti->cpu; \
9833 -})
9834 +#define raw_smp_processor_id() (percpu_read(cpu_number))
9835 +#define stack_smp_processor_id() raw_smp_processor_id()
9836 #define safe_smp_processor_id() smp_processor_id()
9837
9838 #endif
9839 diff -urNp linux-2.6.32.41/arch/x86/include/asm/spinlock.h linux-2.6.32.41/arch/x86/include/asm/spinlock.h
9840 --- linux-2.6.32.41/arch/x86/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
9841 +++ linux-2.6.32.41/arch/x86/include/asm/spinlock.h 2011-04-17 15:56:46.000000000 -0400
9842 @@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(r
9843 static inline void __raw_read_lock(raw_rwlock_t *rw)
9844 {
9845 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
9846 +
9847 +#ifdef CONFIG_PAX_REFCOUNT
9848 + "jno 0f\n"
9849 + LOCK_PREFIX " addl $1,(%0)\n"
9850 + "int $4\n0:\n"
9851 + _ASM_EXTABLE(0b, 0b)
9852 +#endif
9853 +
9854 "jns 1f\n"
9855 "call __read_lock_failed\n\t"
9856 "1:\n"
9857 @@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_r
9858 static inline void __raw_write_lock(raw_rwlock_t *rw)
9859 {
9860 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
9861 +
9862 +#ifdef CONFIG_PAX_REFCOUNT
9863 + "jno 0f\n"
9864 + LOCK_PREFIX " addl %1,(%0)\n"
9865 + "int $4\n0:\n"
9866 + _ASM_EXTABLE(0b, 0b)
9867 +#endif
9868 +
9869 "jz 1f\n"
9870 "call __write_lock_failed\n\t"
9871 "1:\n"
9872 @@ -286,12 +302,29 @@ static inline int __raw_write_trylock(ra
9873
9874 static inline void __raw_read_unlock(raw_rwlock_t *rw)
9875 {
9876 - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
9877 + asm volatile(LOCK_PREFIX "incl %0\n"
9878 +
9879 +#ifdef CONFIG_PAX_REFCOUNT
9880 + "jno 0f\n"
9881 + LOCK_PREFIX "decl %0\n"
9882 + "int $4\n0:\n"
9883 + _ASM_EXTABLE(0b, 0b)
9884 +#endif
9885 +
9886 + :"+m" (rw->lock) : : "memory");
9887 }
9888
9889 static inline void __raw_write_unlock(raw_rwlock_t *rw)
9890 {
9891 - asm volatile(LOCK_PREFIX "addl %1, %0"
9892 + asm volatile(LOCK_PREFIX "addl %1, %0\n"
9893 +
9894 +#ifdef CONFIG_PAX_REFCOUNT
9895 + "jno 0f\n"
9896 + LOCK_PREFIX "subl %1, %0\n"
9897 + "int $4\n0:\n"
9898 + _ASM_EXTABLE(0b, 0b)
9899 +#endif
9900 +
9901 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
9902 }
9903
9904 diff -urNp linux-2.6.32.41/arch/x86/include/asm/stackprotector.h linux-2.6.32.41/arch/x86/include/asm/stackprotector.h
9905 --- linux-2.6.32.41/arch/x86/include/asm/stackprotector.h 2011-03-27 14:31:47.000000000 -0400
9906 +++ linux-2.6.32.41/arch/x86/include/asm/stackprotector.h 2011-04-17 15:56:46.000000000 -0400
9907 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
9908
9909 static inline void load_stack_canary_segment(void)
9910 {
9911 -#ifdef CONFIG_X86_32
9912 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
9913 asm volatile ("mov %0, %%gs" : : "r" (0));
9914 #endif
9915 }
9916 diff -urNp linux-2.6.32.41/arch/x86/include/asm/system.h linux-2.6.32.41/arch/x86/include/asm/system.h
9917 --- linux-2.6.32.41/arch/x86/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
9918 +++ linux-2.6.32.41/arch/x86/include/asm/system.h 2011-05-22 23:02:03.000000000 -0400
9919 @@ -132,7 +132,7 @@ do { \
9920 "thread_return:\n\t" \
9921 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
9922 __switch_canary \
9923 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
9924 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
9925 "movq %%rax,%%rdi\n\t" \
9926 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
9927 "jnz ret_from_fork\n\t" \
9928 @@ -143,7 +143,7 @@ do { \
9929 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
9930 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
9931 [_tif_fork] "i" (_TIF_FORK), \
9932 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
9933 + [thread_info] "m" (per_cpu_var(current_tinfo)), \
9934 [current_task] "m" (per_cpu_var(current_task)) \
9935 __switch_canary_iparam \
9936 : "memory", "cc" __EXTRA_CLOBBER)
9937 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
9938 {
9939 unsigned long __limit;
9940 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
9941 - return __limit + 1;
9942 + return __limit;
9943 }
9944
9945 static inline void native_clts(void)
9946 @@ -340,12 +340,12 @@ void enable_hlt(void);
9947
9948 void cpu_idle_wait(void);
9949
9950 -extern unsigned long arch_align_stack(unsigned long sp);
9951 +#define arch_align_stack(x) ((x) & ~0xfUL)
9952 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
9953
9954 void default_idle(void);
9955
9956 -void stop_this_cpu(void *dummy);
9957 +void stop_this_cpu(void *dummy) __noreturn;
9958
9959 /*
9960 * Force strict CPU ordering.
9961 diff -urNp linux-2.6.32.41/arch/x86/include/asm/thread_info.h linux-2.6.32.41/arch/x86/include/asm/thread_info.h
9962 --- linux-2.6.32.41/arch/x86/include/asm/thread_info.h 2011-03-27 14:31:47.000000000 -0400
9963 +++ linux-2.6.32.41/arch/x86/include/asm/thread_info.h 2011-05-17 19:26:34.000000000 -0400
9964 @@ -10,6 +10,7 @@
9965 #include <linux/compiler.h>
9966 #include <asm/page.h>
9967 #include <asm/types.h>
9968 +#include <asm/percpu.h>
9969
9970 /*
9971 * low level task data that entry.S needs immediate access to
9972 @@ -24,7 +25,6 @@ struct exec_domain;
9973 #include <asm/atomic.h>
9974
9975 struct thread_info {
9976 - struct task_struct *task; /* main task structure */
9977 struct exec_domain *exec_domain; /* execution domain */
9978 __u32 flags; /* low level flags */
9979 __u32 status; /* thread synchronous flags */
9980 @@ -34,18 +34,12 @@ struct thread_info {
9981 mm_segment_t addr_limit;
9982 struct restart_block restart_block;
9983 void __user *sysenter_return;
9984 -#ifdef CONFIG_X86_32
9985 - unsigned long previous_esp; /* ESP of the previous stack in
9986 - case of nested (IRQ) stacks
9987 - */
9988 - __u8 supervisor_stack[0];
9989 -#endif
9990 + unsigned long lowest_stack;
9991 int uaccess_err;
9992 };
9993
9994 -#define INIT_THREAD_INFO(tsk) \
9995 +#define INIT_THREAD_INFO \
9996 { \
9997 - .task = &tsk, \
9998 .exec_domain = &default_exec_domain, \
9999 .flags = 0, \
10000 .cpu = 0, \
10001 @@ -56,7 +50,7 @@ struct thread_info {
10002 }, \
10003 }
10004
10005 -#define init_thread_info (init_thread_union.thread_info)
10006 +#define init_thread_info (init_thread_union.stack)
10007 #define init_stack (init_thread_union.stack)
10008
10009 #else /* !__ASSEMBLY__ */
10010 @@ -163,6 +157,23 @@ struct thread_info {
10011 #define alloc_thread_info(tsk) \
10012 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
10013
10014 +#ifdef __ASSEMBLY__
10015 +/* how to get the thread information struct from ASM */
10016 +#define GET_THREAD_INFO(reg) \
10017 + mov PER_CPU_VAR(current_tinfo), reg
10018 +
10019 +/* use this one if reg already contains %esp */
10020 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10021 +#else
10022 +/* how to get the thread information struct from C */
10023 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10024 +
10025 +static __always_inline struct thread_info *current_thread_info(void)
10026 +{
10027 + return percpu_read_stable(current_tinfo);
10028 +}
10029 +#endif
10030 +
10031 #ifdef CONFIG_X86_32
10032
10033 #define STACK_WARN (THREAD_SIZE/8)
10034 @@ -173,35 +184,13 @@ struct thread_info {
10035 */
10036 #ifndef __ASSEMBLY__
10037
10038 -
10039 /* how to get the current stack pointer from C */
10040 register unsigned long current_stack_pointer asm("esp") __used;
10041
10042 -/* how to get the thread information struct from C */
10043 -static inline struct thread_info *current_thread_info(void)
10044 -{
10045 - return (struct thread_info *)
10046 - (current_stack_pointer & ~(THREAD_SIZE - 1));
10047 -}
10048 -
10049 -#else /* !__ASSEMBLY__ */
10050 -
10051 -/* how to get the thread information struct from ASM */
10052 -#define GET_THREAD_INFO(reg) \
10053 - movl $-THREAD_SIZE, reg; \
10054 - andl %esp, reg
10055 -
10056 -/* use this one if reg already contains %esp */
10057 -#define GET_THREAD_INFO_WITH_ESP(reg) \
10058 - andl $-THREAD_SIZE, reg
10059 -
10060 #endif
10061
10062 #else /* X86_32 */
10063
10064 -#include <asm/percpu.h>
10065 -#define KERNEL_STACK_OFFSET (5*8)
10066 -
10067 /*
10068 * macros/functions for gaining access to the thread information structure
10069 * preempt_count needs to be 1 initially, until the scheduler is functional.
10070 @@ -209,21 +198,8 @@ static inline struct thread_info *curren
10071 #ifndef __ASSEMBLY__
10072 DECLARE_PER_CPU(unsigned long, kernel_stack);
10073
10074 -static inline struct thread_info *current_thread_info(void)
10075 -{
10076 - struct thread_info *ti;
10077 - ti = (void *)(percpu_read_stable(kernel_stack) +
10078 - KERNEL_STACK_OFFSET - THREAD_SIZE);
10079 - return ti;
10080 -}
10081 -
10082 -#else /* !__ASSEMBLY__ */
10083 -
10084 -/* how to get the thread information struct from ASM */
10085 -#define GET_THREAD_INFO(reg) \
10086 - movq PER_CPU_VAR(kernel_stack),reg ; \
10087 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10088 -
10089 +/* how to get the current stack pointer from C */
10090 +register unsigned long current_stack_pointer asm("rsp") __used;
10091 #endif
10092
10093 #endif /* !X86_32 */
10094 @@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
10095 extern void free_thread_info(struct thread_info *ti);
10096 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10097 #define arch_task_cache_init arch_task_cache_init
10098 +
10099 +#define __HAVE_THREAD_FUNCTIONS
10100 +#define task_thread_info(task) (&(task)->tinfo)
10101 +#define task_stack_page(task) ((task)->stack)
10102 +#define setup_thread_stack(p, org) do {} while (0)
10103 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10104 +
10105 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10106 +extern struct task_struct *alloc_task_struct(void);
10107 +extern void free_task_struct(struct task_struct *);
10108 +
10109 #endif
10110 #endif /* _ASM_X86_THREAD_INFO_H */
10111 diff -urNp linux-2.6.32.41/arch/x86/include/asm/uaccess_32.h linux-2.6.32.41/arch/x86/include/asm/uaccess_32.h
10112 --- linux-2.6.32.41/arch/x86/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
10113 +++ linux-2.6.32.41/arch/x86/include/asm/uaccess_32.h 2011-05-16 21:46:57.000000000 -0400
10114 @@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_u
10115 static __always_inline unsigned long __must_check
10116 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10117 {
10118 + pax_track_stack();
10119 +
10120 + if ((long)n < 0)
10121 + return n;
10122 +
10123 if (__builtin_constant_p(n)) {
10124 unsigned long ret;
10125
10126 @@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to,
10127 return ret;
10128 }
10129 }
10130 + if (!__builtin_constant_p(n))
10131 + check_object_size(from, n, true);
10132 return __copy_to_user_ll(to, from, n);
10133 }
10134
10135 @@ -83,12 +90,16 @@ static __always_inline unsigned long __m
10136 __copy_to_user(void __user *to, const void *from, unsigned long n)
10137 {
10138 might_fault();
10139 +
10140 return __copy_to_user_inatomic(to, from, n);
10141 }
10142
10143 static __always_inline unsigned long
10144 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
10145 {
10146 + if ((long)n < 0)
10147 + return n;
10148 +
10149 /* Avoid zeroing the tail if the copy fails..
10150 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
10151 * but as the zeroing behaviour is only significant when n is not
10152 @@ -138,6 +149,12 @@ static __always_inline unsigned long
10153 __copy_from_user(void *to, const void __user *from, unsigned long n)
10154 {
10155 might_fault();
10156 +
10157 + pax_track_stack();
10158 +
10159 + if ((long)n < 0)
10160 + return n;
10161 +
10162 if (__builtin_constant_p(n)) {
10163 unsigned long ret;
10164
10165 @@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __
10166 return ret;
10167 }
10168 }
10169 + if (!__builtin_constant_p(n))
10170 + check_object_size(to, n, false);
10171 return __copy_from_user_ll(to, from, n);
10172 }
10173
10174 @@ -160,6 +179,10 @@ static __always_inline unsigned long __c
10175 const void __user *from, unsigned long n)
10176 {
10177 might_fault();
10178 +
10179 + if ((long)n < 0)
10180 + return n;
10181 +
10182 if (__builtin_constant_p(n)) {
10183 unsigned long ret;
10184
10185 @@ -182,14 +205,62 @@ static __always_inline unsigned long
10186 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
10187 unsigned long n)
10188 {
10189 - return __copy_from_user_ll_nocache_nozero(to, from, n);
10190 + if ((long)n < 0)
10191 + return n;
10192 +
10193 + return __copy_from_user_ll_nocache_nozero(to, from, n);
10194 +}
10195 +
10196 +/**
10197 + * copy_to_user: - Copy a block of data into user space.
10198 + * @to: Destination address, in user space.
10199 + * @from: Source address, in kernel space.
10200 + * @n: Number of bytes to copy.
10201 + *
10202 + * Context: User context only. This function may sleep.
10203 + *
10204 + * Copy data from kernel space to user space.
10205 + *
10206 + * Returns number of bytes that could not be copied.
10207 + * On success, this will be zero.
10208 + */
10209 +static __always_inline unsigned long __must_check
10210 +copy_to_user(void __user *to, const void *from, unsigned long n)
10211 +{
10212 + if (access_ok(VERIFY_WRITE, to, n))
10213 + n = __copy_to_user(to, from, n);
10214 + return n;
10215 +}
10216 +
10217 +/**
10218 + * copy_from_user: - Copy a block of data from user space.
10219 + * @to: Destination address, in kernel space.
10220 + * @from: Source address, in user space.
10221 + * @n: Number of bytes to copy.
10222 + *
10223 + * Context: User context only. This function may sleep.
10224 + *
10225 + * Copy data from user space to kernel space.
10226 + *
10227 + * Returns number of bytes that could not be copied.
10228 + * On success, this will be zero.
10229 + *
10230 + * If some data could not be copied, this function will pad the copied
10231 + * data to the requested size using zero bytes.
10232 + */
10233 +static __always_inline unsigned long __must_check
10234 +copy_from_user(void *to, const void __user *from, unsigned long n)
10235 +{
10236 + if (access_ok(VERIFY_READ, from, n))
10237 + n = __copy_from_user(to, from, n);
10238 + else if ((long)n > 0) {
10239 + if (!__builtin_constant_p(n))
10240 + check_object_size(to, n, false);
10241 + memset(to, 0, n);
10242 + }
10243 + return n;
10244 }
10245
10246 -unsigned long __must_check copy_to_user(void __user *to,
10247 - const void *from, unsigned long n);
10248 -unsigned long __must_check copy_from_user(void *to,
10249 - const void __user *from,
10250 - unsigned long n);
10251 long __must_check strncpy_from_user(char *dst, const char __user *src,
10252 long count);
10253 long __must_check __strncpy_from_user(char *dst,
10254 diff -urNp linux-2.6.32.41/arch/x86/include/asm/uaccess_64.h linux-2.6.32.41/arch/x86/include/asm/uaccess_64.h
10255 --- linux-2.6.32.41/arch/x86/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
10256 +++ linux-2.6.32.41/arch/x86/include/asm/uaccess_64.h 2011-05-16 21:46:57.000000000 -0400
10257 @@ -9,6 +9,9 @@
10258 #include <linux/prefetch.h>
10259 #include <linux/lockdep.h>
10260 #include <asm/page.h>
10261 +#include <asm/pgtable.h>
10262 +
10263 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
10264
10265 /*
10266 * Copy To/From Userspace
10267 @@ -19,113 +22,203 @@ __must_check unsigned long
10268 copy_user_generic(void *to, const void *from, unsigned len);
10269
10270 __must_check unsigned long
10271 -copy_to_user(void __user *to, const void *from, unsigned len);
10272 -__must_check unsigned long
10273 -copy_from_user(void *to, const void __user *from, unsigned len);
10274 -__must_check unsigned long
10275 copy_in_user(void __user *to, const void __user *from, unsigned len);
10276
10277 static __always_inline __must_check
10278 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
10279 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
10280 {
10281 - int ret = 0;
10282 + unsigned ret = 0;
10283
10284 might_fault();
10285 - if (!__builtin_constant_p(size))
10286 - return copy_user_generic(dst, (__force void *)src, size);
10287 +
10288 + if ((int)size < 0)
10289 + return size;
10290 +
10291 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10292 + if (!__access_ok(VERIFY_READ, src, size))
10293 + return size;
10294 +#endif
10295 +
10296 + if (!__builtin_constant_p(size)) {
10297 + check_object_size(dst, size, false);
10298 +
10299 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10300 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10301 + src += PAX_USER_SHADOW_BASE;
10302 +#endif
10303 +
10304 + return copy_user_generic(dst, (__force const void *)src, size);
10305 + }
10306 switch (size) {
10307 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
10308 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
10309 ret, "b", "b", "=q", 1);
10310 return ret;
10311 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
10312 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
10313 ret, "w", "w", "=r", 2);
10314 return ret;
10315 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
10316 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
10317 ret, "l", "k", "=r", 4);
10318 return ret;
10319 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
10320 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10321 ret, "q", "", "=r", 8);
10322 return ret;
10323 case 10:
10324 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10325 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10326 ret, "q", "", "=r", 10);
10327 if (unlikely(ret))
10328 return ret;
10329 __get_user_asm(*(u16 *)(8 + (char *)dst),
10330 - (u16 __user *)(8 + (char __user *)src),
10331 + (const u16 __user *)(8 + (const char __user *)src),
10332 ret, "w", "w", "=r", 2);
10333 return ret;
10334 case 16:
10335 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10336 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10337 ret, "q", "", "=r", 16);
10338 if (unlikely(ret))
10339 return ret;
10340 __get_user_asm(*(u64 *)(8 + (char *)dst),
10341 - (u64 __user *)(8 + (char __user *)src),
10342 + (const u64 __user *)(8 + (const char __user *)src),
10343 ret, "q", "", "=r", 8);
10344 return ret;
10345 default:
10346 - return copy_user_generic(dst, (__force void *)src, size);
10347 +
10348 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10349 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10350 + src += PAX_USER_SHADOW_BASE;
10351 +#endif
10352 +
10353 + return copy_user_generic(dst, (__force const void *)src, size);
10354 }
10355 }
10356
10357 static __always_inline __must_check
10358 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
10359 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
10360 {
10361 - int ret = 0;
10362 + unsigned ret = 0;
10363
10364 might_fault();
10365 - if (!__builtin_constant_p(size))
10366 +
10367 + pax_track_stack();
10368 +
10369 + if ((int)size < 0)
10370 + return size;
10371 +
10372 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10373 + if (!__access_ok(VERIFY_WRITE, dst, size))
10374 + return size;
10375 +#endif
10376 +
10377 + if (!__builtin_constant_p(size)) {
10378 + check_object_size(src, size, true);
10379 +
10380 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10381 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10382 + dst += PAX_USER_SHADOW_BASE;
10383 +#endif
10384 +
10385 return copy_user_generic((__force void *)dst, src, size);
10386 + }
10387 switch (size) {
10388 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
10389 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
10390 ret, "b", "b", "iq", 1);
10391 return ret;
10392 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
10393 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
10394 ret, "w", "w", "ir", 2);
10395 return ret;
10396 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
10397 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
10398 ret, "l", "k", "ir", 4);
10399 return ret;
10400 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
10401 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10402 ret, "q", "", "er", 8);
10403 return ret;
10404 case 10:
10405 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10406 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10407 ret, "q", "", "er", 10);
10408 if (unlikely(ret))
10409 return ret;
10410 asm("":::"memory");
10411 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
10412 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
10413 ret, "w", "w", "ir", 2);
10414 return ret;
10415 case 16:
10416 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10417 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10418 ret, "q", "", "er", 16);
10419 if (unlikely(ret))
10420 return ret;
10421 asm("":::"memory");
10422 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
10423 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
10424 ret, "q", "", "er", 8);
10425 return ret;
10426 default:
10427 +
10428 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10429 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10430 + dst += PAX_USER_SHADOW_BASE;
10431 +#endif
10432 +
10433 return copy_user_generic((__force void *)dst, src, size);
10434 }
10435 }
10436
10437 static __always_inline __must_check
10438 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10439 +unsigned long copy_to_user(void __user *to, const void *from, unsigned len)
10440 +{
10441 + if (access_ok(VERIFY_WRITE, to, len))
10442 + len = __copy_to_user(to, from, len);
10443 + return len;
10444 +}
10445 +
10446 +static __always_inline __must_check
10447 +unsigned long copy_from_user(void *to, const void __user *from, unsigned len)
10448 +{
10449 + if ((int)len < 0)
10450 + return len;
10451 +
10452 + if (access_ok(VERIFY_READ, from, len))
10453 + len = __copy_from_user(to, from, len);
10454 + else if ((int)len > 0) {
10455 + if (!__builtin_constant_p(len))
10456 + check_object_size(to, len, false);
10457 + memset(to, 0, len);
10458 + }
10459 + return len;
10460 +}
10461 +
10462 +static __always_inline __must_check
10463 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10464 {
10465 - int ret = 0;
10466 + unsigned ret = 0;
10467
10468 might_fault();
10469 - if (!__builtin_constant_p(size))
10470 +
10471 + pax_track_stack();
10472 +
10473 + if ((int)size < 0)
10474 + return size;
10475 +
10476 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10477 + if (!__access_ok(VERIFY_READ, src, size))
10478 + return size;
10479 + if (!__access_ok(VERIFY_WRITE, dst, size))
10480 + return size;
10481 +#endif
10482 +
10483 + if (!__builtin_constant_p(size)) {
10484 +
10485 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10486 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10487 + src += PAX_USER_SHADOW_BASE;
10488 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10489 + dst += PAX_USER_SHADOW_BASE;
10490 +#endif
10491 +
10492 return copy_user_generic((__force void *)dst,
10493 - (__force void *)src, size);
10494 + (__force const void *)src, size);
10495 + }
10496 switch (size) {
10497 case 1: {
10498 u8 tmp;
10499 - __get_user_asm(tmp, (u8 __user *)src,
10500 + __get_user_asm(tmp, (const u8 __user *)src,
10501 ret, "b", "b", "=q", 1);
10502 if (likely(!ret))
10503 __put_user_asm(tmp, (u8 __user *)dst,
10504 @@ -134,7 +227,7 @@ int __copy_in_user(void __user *dst, con
10505 }
10506 case 2: {
10507 u16 tmp;
10508 - __get_user_asm(tmp, (u16 __user *)src,
10509 + __get_user_asm(tmp, (const u16 __user *)src,
10510 ret, "w", "w", "=r", 2);
10511 if (likely(!ret))
10512 __put_user_asm(tmp, (u16 __user *)dst,
10513 @@ -144,7 +237,7 @@ int __copy_in_user(void __user *dst, con
10514
10515 case 4: {
10516 u32 tmp;
10517 - __get_user_asm(tmp, (u32 __user *)src,
10518 + __get_user_asm(tmp, (const u32 __user *)src,
10519 ret, "l", "k", "=r", 4);
10520 if (likely(!ret))
10521 __put_user_asm(tmp, (u32 __user *)dst,
10522 @@ -153,7 +246,7 @@ int __copy_in_user(void __user *dst, con
10523 }
10524 case 8: {
10525 u64 tmp;
10526 - __get_user_asm(tmp, (u64 __user *)src,
10527 + __get_user_asm(tmp, (const u64 __user *)src,
10528 ret, "q", "", "=r", 8);
10529 if (likely(!ret))
10530 __put_user_asm(tmp, (u64 __user *)dst,
10531 @@ -161,8 +254,16 @@ int __copy_in_user(void __user *dst, con
10532 return ret;
10533 }
10534 default:
10535 +
10536 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10537 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10538 + src += PAX_USER_SHADOW_BASE;
10539 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10540 + dst += PAX_USER_SHADOW_BASE;
10541 +#endif
10542 +
10543 return copy_user_generic((__force void *)dst,
10544 - (__force void *)src, size);
10545 + (__force const void *)src, size);
10546 }
10547 }
10548
10549 @@ -176,33 +277,75 @@ __must_check long strlen_user(const char
10550 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
10551 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
10552
10553 -__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
10554 - unsigned size);
10555 +static __must_check __always_inline unsigned long
10556 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
10557 +{
10558 + pax_track_stack();
10559 +
10560 + if ((int)size < 0)
10561 + return size;
10562
10563 -static __must_check __always_inline int
10564 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10565 + if (!__access_ok(VERIFY_READ, src, size))
10566 + return size;
10567 +
10568 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10569 + src += PAX_USER_SHADOW_BASE;
10570 +#endif
10571 +
10572 + return copy_user_generic(dst, (__force const void *)src, size);
10573 +}
10574 +
10575 +static __must_check __always_inline unsigned long
10576 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
10577 {
10578 + if ((int)size < 0)
10579 + return size;
10580 +
10581 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10582 + if (!__access_ok(VERIFY_WRITE, dst, size))
10583 + return size;
10584 +
10585 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10586 + dst += PAX_USER_SHADOW_BASE;
10587 +#endif
10588 +
10589 return copy_user_generic((__force void *)dst, src, size);
10590 }
10591
10592 -extern long __copy_user_nocache(void *dst, const void __user *src,
10593 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
10594 unsigned size, int zerorest);
10595
10596 -static inline int
10597 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
10598 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
10599 {
10600 might_sleep();
10601 +
10602 + if ((int)size < 0)
10603 + return size;
10604 +
10605 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10606 + if (!__access_ok(VERIFY_READ, src, size))
10607 + return size;
10608 +#endif
10609 +
10610 return __copy_user_nocache(dst, src, size, 1);
10611 }
10612
10613 -static inline int
10614 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
10615 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
10616 unsigned size)
10617 {
10618 + if ((int)size < 0)
10619 + return size;
10620 +
10621 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10622 + if (!__access_ok(VERIFY_READ, src, size))
10623 + return size;
10624 +#endif
10625 +
10626 return __copy_user_nocache(dst, src, size, 0);
10627 }
10628
10629 -unsigned long
10630 +extern unsigned long
10631 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
10632
10633 #endif /* _ASM_X86_UACCESS_64_H */
10634 diff -urNp linux-2.6.32.41/arch/x86/include/asm/uaccess.h linux-2.6.32.41/arch/x86/include/asm/uaccess.h
10635 --- linux-2.6.32.41/arch/x86/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
10636 +++ linux-2.6.32.41/arch/x86/include/asm/uaccess.h 2011-04-17 15:56:46.000000000 -0400
10637 @@ -8,12 +8,15 @@
10638 #include <linux/thread_info.h>
10639 #include <linux/prefetch.h>
10640 #include <linux/string.h>
10641 +#include <linux/sched.h>
10642 #include <asm/asm.h>
10643 #include <asm/page.h>
10644
10645 #define VERIFY_READ 0
10646 #define VERIFY_WRITE 1
10647
10648 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
10649 +
10650 /*
10651 * The fs value determines whether argument validity checking should be
10652 * performed or not. If get_fs() == USER_DS, checking is performed, with
10653 @@ -29,7 +32,12 @@
10654
10655 #define get_ds() (KERNEL_DS)
10656 #define get_fs() (current_thread_info()->addr_limit)
10657 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10658 +void __set_fs(mm_segment_t x);
10659 +void set_fs(mm_segment_t x);
10660 +#else
10661 #define set_fs(x) (current_thread_info()->addr_limit = (x))
10662 +#endif
10663
10664 #define segment_eq(a, b) ((a).seg == (b).seg)
10665
10666 @@ -77,7 +85,33 @@
10667 * checks that the pointer is in the user space range - after calling
10668 * this function, memory access functions may still return -EFAULT.
10669 */
10670 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10671 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10672 +#define access_ok(type, addr, size) \
10673 +({ \
10674 + long __size = size; \
10675 + unsigned long __addr = (unsigned long)addr; \
10676 + unsigned long __addr_ao = __addr & PAGE_MASK; \
10677 + unsigned long __end_ao = __addr + __size - 1; \
10678 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
10679 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
10680 + while(__addr_ao <= __end_ao) { \
10681 + char __c_ao; \
10682 + __addr_ao += PAGE_SIZE; \
10683 + if (__size > PAGE_SIZE) \
10684 + cond_resched(); \
10685 + if (__get_user(__c_ao, (char __user *)__addr)) \
10686 + break; \
10687 + if (type != VERIFY_WRITE) { \
10688 + __addr = __addr_ao; \
10689 + continue; \
10690 + } \
10691 + if (__put_user(__c_ao, (char __user *)__addr)) \
10692 + break; \
10693 + __addr = __addr_ao; \
10694 + } \
10695 + } \
10696 + __ret_ao; \
10697 +})
10698
10699 /*
10700 * The exception table consists of pairs of addresses: the first is the
10701 @@ -183,12 +217,20 @@ extern int __get_user_bad(void);
10702 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
10703 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
10704
10705 -
10706 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10707 +#define __copyuser_seg "gs;"
10708 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
10709 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
10710 +#else
10711 +#define __copyuser_seg
10712 +#define __COPYUSER_SET_ES
10713 +#define __COPYUSER_RESTORE_ES
10714 +#endif
10715
10716 #ifdef CONFIG_X86_32
10717 #define __put_user_asm_u64(x, addr, err, errret) \
10718 - asm volatile("1: movl %%eax,0(%2)\n" \
10719 - "2: movl %%edx,4(%2)\n" \
10720 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
10721 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
10722 "3:\n" \
10723 ".section .fixup,\"ax\"\n" \
10724 "4: movl %3,%0\n" \
10725 @@ -200,8 +242,8 @@ extern int __get_user_bad(void);
10726 : "A" (x), "r" (addr), "i" (errret), "0" (err))
10727
10728 #define __put_user_asm_ex_u64(x, addr) \
10729 - asm volatile("1: movl %%eax,0(%1)\n" \
10730 - "2: movl %%edx,4(%1)\n" \
10731 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
10732 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
10733 "3:\n" \
10734 _ASM_EXTABLE(1b, 2b - 1b) \
10735 _ASM_EXTABLE(2b, 3b - 2b) \
10736 @@ -374,7 +416,7 @@ do { \
10737 } while (0)
10738
10739 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10740 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
10741 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
10742 "2:\n" \
10743 ".section .fixup,\"ax\"\n" \
10744 "3: mov %3,%0\n" \
10745 @@ -382,7 +424,7 @@ do { \
10746 " jmp 2b\n" \
10747 ".previous\n" \
10748 _ASM_EXTABLE(1b, 3b) \
10749 - : "=r" (err), ltype(x) \
10750 + : "=r" (err), ltype (x) \
10751 : "m" (__m(addr)), "i" (errret), "0" (err))
10752
10753 #define __get_user_size_ex(x, ptr, size) \
10754 @@ -407,7 +449,7 @@ do { \
10755 } while (0)
10756
10757 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
10758 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
10759 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
10760 "2:\n" \
10761 _ASM_EXTABLE(1b, 2b - 1b) \
10762 : ltype(x) : "m" (__m(addr)))
10763 @@ -424,13 +466,24 @@ do { \
10764 int __gu_err; \
10765 unsigned long __gu_val; \
10766 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
10767 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
10768 + (x) = (__typeof__(*(ptr)))__gu_val; \
10769 __gu_err; \
10770 })
10771
10772 /* FIXME: this hack is definitely wrong -AK */
10773 struct __large_struct { unsigned long buf[100]; };
10774 -#define __m(x) (*(struct __large_struct __user *)(x))
10775 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10776 +#define ____m(x) \
10777 +({ \
10778 + unsigned long ____x = (unsigned long)(x); \
10779 + if (____x < PAX_USER_SHADOW_BASE) \
10780 + ____x += PAX_USER_SHADOW_BASE; \
10781 + (void __user *)____x; \
10782 +})
10783 +#else
10784 +#define ____m(x) (x)
10785 +#endif
10786 +#define __m(x) (*(struct __large_struct __user *)____m(x))
10787
10788 /*
10789 * Tell gcc we read from memory instead of writing: this is because
10790 @@ -438,7 +491,7 @@ struct __large_struct { unsigned long bu
10791 * aliasing issues.
10792 */
10793 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10794 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
10795 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
10796 "2:\n" \
10797 ".section .fixup,\"ax\"\n" \
10798 "3: mov %3,%0\n" \
10799 @@ -446,10 +499,10 @@ struct __large_struct { unsigned long bu
10800 ".previous\n" \
10801 _ASM_EXTABLE(1b, 3b) \
10802 : "=r"(err) \
10803 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
10804 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
10805
10806 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
10807 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
10808 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
10809 "2:\n" \
10810 _ASM_EXTABLE(1b, 2b - 1b) \
10811 : : ltype(x), "m" (__m(addr)))
10812 @@ -488,8 +541,12 @@ struct __large_struct { unsigned long bu
10813 * On error, the variable @x is set to zero.
10814 */
10815
10816 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10817 +#define __get_user(x, ptr) get_user((x), (ptr))
10818 +#else
10819 #define __get_user(x, ptr) \
10820 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
10821 +#endif
10822
10823 /**
10824 * __put_user: - Write a simple value into user space, with less checking.
10825 @@ -511,8 +568,12 @@ struct __large_struct { unsigned long bu
10826 * Returns zero on success, or -EFAULT on error.
10827 */
10828
10829 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10830 +#define __put_user(x, ptr) put_user((x), (ptr))
10831 +#else
10832 #define __put_user(x, ptr) \
10833 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
10834 +#endif
10835
10836 #define __get_user_unaligned __get_user
10837 #define __put_user_unaligned __put_user
10838 @@ -530,7 +591,7 @@ struct __large_struct { unsigned long bu
10839 #define get_user_ex(x, ptr) do { \
10840 unsigned long __gue_val; \
10841 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
10842 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
10843 + (x) = (__typeof__(*(ptr)))__gue_val; \
10844 } while (0)
10845
10846 #ifdef CONFIG_X86_WP_WORKS_OK
10847 @@ -567,6 +628,7 @@ extern struct movsl_mask {
10848
10849 #define ARCH_HAS_NOCACHE_UACCESS 1
10850
10851 +#define ARCH_HAS_SORT_EXTABLE
10852 #ifdef CONFIG_X86_32
10853 # include "uaccess_32.h"
10854 #else
10855 diff -urNp linux-2.6.32.41/arch/x86/include/asm/vgtod.h linux-2.6.32.41/arch/x86/include/asm/vgtod.h
10856 --- linux-2.6.32.41/arch/x86/include/asm/vgtod.h 2011-03-27 14:31:47.000000000 -0400
10857 +++ linux-2.6.32.41/arch/x86/include/asm/vgtod.h 2011-04-17 15:56:46.000000000 -0400
10858 @@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
10859 int sysctl_enabled;
10860 struct timezone sys_tz;
10861 struct { /* extract of a clocksource struct */
10862 + char name[8];
10863 cycle_t (*vread)(void);
10864 cycle_t cycle_last;
10865 cycle_t mask;
10866 diff -urNp linux-2.6.32.41/arch/x86/include/asm/vmi.h linux-2.6.32.41/arch/x86/include/asm/vmi.h
10867 --- linux-2.6.32.41/arch/x86/include/asm/vmi.h 2011-03-27 14:31:47.000000000 -0400
10868 +++ linux-2.6.32.41/arch/x86/include/asm/vmi.h 2011-04-17 15:56:46.000000000 -0400
10869 @@ -191,6 +191,7 @@ struct vrom_header {
10870 u8 reserved[96]; /* Reserved for headers */
10871 char vmi_init[8]; /* VMI_Init jump point */
10872 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
10873 + char rom_data[8048]; /* rest of the option ROM */
10874 } __attribute__((packed));
10875
10876 struct pnp_header {
10877 diff -urNp linux-2.6.32.41/arch/x86/include/asm/vsyscall.h linux-2.6.32.41/arch/x86/include/asm/vsyscall.h
10878 --- linux-2.6.32.41/arch/x86/include/asm/vsyscall.h 2011-03-27 14:31:47.000000000 -0400
10879 +++ linux-2.6.32.41/arch/x86/include/asm/vsyscall.h 2011-04-17 15:56:46.000000000 -0400
10880 @@ -15,9 +15,10 @@ enum vsyscall_num {
10881
10882 #ifdef __KERNEL__
10883 #include <linux/seqlock.h>
10884 +#include <linux/getcpu.h>
10885 +#include <linux/time.h>
10886
10887 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
10888 -#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
10889
10890 /* Definitions for CONFIG_GENERIC_TIME definitions */
10891 #define __section_vsyscall_gtod_data __attribute__ \
10892 @@ -31,7 +32,6 @@ enum vsyscall_num {
10893 #define VGETCPU_LSL 2
10894
10895 extern int __vgetcpu_mode;
10896 -extern volatile unsigned long __jiffies;
10897
10898 /* kernel space (writeable) */
10899 extern int vgetcpu_mode;
10900 @@ -39,6 +39,9 @@ extern struct timezone sys_tz;
10901
10902 extern void map_vsyscall(void);
10903
10904 +extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
10905 +extern time_t vtime(time_t *t);
10906 +extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
10907 #endif /* __KERNEL__ */
10908
10909 #endif /* _ASM_X86_VSYSCALL_H */
10910 diff -urNp linux-2.6.32.41/arch/x86/include/asm/xsave.h linux-2.6.32.41/arch/x86/include/asm/xsave.h
10911 --- linux-2.6.32.41/arch/x86/include/asm/xsave.h 2011-03-27 14:31:47.000000000 -0400
10912 +++ linux-2.6.32.41/arch/x86/include/asm/xsave.h 2011-04-17 15:56:46.000000000 -0400
10913 @@ -56,6 +56,12 @@ static inline int xrstor_checking(struct
10914 static inline int xsave_user(struct xsave_struct __user *buf)
10915 {
10916 int err;
10917 +
10918 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10919 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
10920 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
10921 +#endif
10922 +
10923 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
10924 "2:\n"
10925 ".section .fixup,\"ax\"\n"
10926 @@ -82,6 +88,11 @@ static inline int xrestore_user(struct x
10927 u32 lmask = mask;
10928 u32 hmask = mask >> 32;
10929
10930 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10931 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
10932 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
10933 +#endif
10934 +
10935 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
10936 "2:\n"
10937 ".section .fixup,\"ax\"\n"
10938 diff -urNp linux-2.6.32.41/arch/x86/Kconfig linux-2.6.32.41/arch/x86/Kconfig
10939 --- linux-2.6.32.41/arch/x86/Kconfig 2011-03-27 14:31:47.000000000 -0400
10940 +++ linux-2.6.32.41/arch/x86/Kconfig 2011-04-17 15:56:46.000000000 -0400
10941 @@ -223,7 +223,7 @@ config X86_TRAMPOLINE
10942
10943 config X86_32_LAZY_GS
10944 def_bool y
10945 - depends on X86_32 && !CC_STACKPROTECTOR
10946 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10947
10948 config KTIME_SCALAR
10949 def_bool X86_32
10950 @@ -1008,7 +1008,7 @@ choice
10951
10952 config NOHIGHMEM
10953 bool "off"
10954 - depends on !X86_NUMAQ
10955 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10956 ---help---
10957 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10958 However, the address space of 32-bit x86 processors is only 4
10959 @@ -1045,7 +1045,7 @@ config NOHIGHMEM
10960
10961 config HIGHMEM4G
10962 bool "4GB"
10963 - depends on !X86_NUMAQ
10964 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10965 ---help---
10966 Select this if you have a 32-bit processor and between 1 and 4
10967 gigabytes of physical RAM.
10968 @@ -1099,7 +1099,7 @@ config PAGE_OFFSET
10969 hex
10970 default 0xB0000000 if VMSPLIT_3G_OPT
10971 default 0x80000000 if VMSPLIT_2G
10972 - default 0x78000000 if VMSPLIT_2G_OPT
10973 + default 0x70000000 if VMSPLIT_2G_OPT
10974 default 0x40000000 if VMSPLIT_1G
10975 default 0xC0000000
10976 depends on X86_32
10977 @@ -1430,7 +1430,7 @@ config ARCH_USES_PG_UNCACHED
10978
10979 config EFI
10980 bool "EFI runtime service support"
10981 - depends on ACPI
10982 + depends on ACPI && !PAX_KERNEXEC
10983 ---help---
10984 This enables the kernel to use EFI runtime services that are
10985 available (such as the EFI variable services).
10986 @@ -1460,6 +1460,7 @@ config SECCOMP
10987
10988 config CC_STACKPROTECTOR
10989 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
10990 + depends on X86_64 || !PAX_MEMORY_UDEREF
10991 ---help---
10992 This option turns on the -fstack-protector GCC feature. This
10993 feature puts, at the beginning of functions, a canary value on
10994 @@ -1517,6 +1518,7 @@ config KEXEC_JUMP
10995 config PHYSICAL_START
10996 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
10997 default "0x1000000"
10998 + range 0x400000 0x40000000
10999 ---help---
11000 This gives the physical address where the kernel is loaded.
11001
11002 @@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
11003 hex
11004 prompt "Alignment value to which kernel should be aligned" if X86_32
11005 default "0x1000000"
11006 + range 0x400000 0x1000000 if PAX_KERNEXEC
11007 range 0x2000 0x1000000
11008 ---help---
11009 This value puts the alignment restrictions on physical address
11010 @@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
11011 Say N if you want to disable CPU hotplug.
11012
11013 config COMPAT_VDSO
11014 - def_bool y
11015 + def_bool n
11016 prompt "Compat VDSO support"
11017 depends on X86_32 || IA32_EMULATION
11018 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
11019 ---help---
11020 Map the 32-bit VDSO to the predictable old-style address too.
11021 ---help---
11022 diff -urNp linux-2.6.32.41/arch/x86/Kconfig.cpu linux-2.6.32.41/arch/x86/Kconfig.cpu
11023 --- linux-2.6.32.41/arch/x86/Kconfig.cpu 2011-03-27 14:31:47.000000000 -0400
11024 +++ linux-2.6.32.41/arch/x86/Kconfig.cpu 2011-04-17 15:56:46.000000000 -0400
11025 @@ -340,7 +340,7 @@ config X86_PPRO_FENCE
11026
11027 config X86_F00F_BUG
11028 def_bool y
11029 - depends on M586MMX || M586TSC || M586 || M486 || M386
11030 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
11031
11032 config X86_WP_WORKS_OK
11033 def_bool y
11034 @@ -360,7 +360,7 @@ config X86_POPAD_OK
11035
11036 config X86_ALIGNMENT_16
11037 def_bool y
11038 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11039 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11040
11041 config X86_INTEL_USERCOPY
11042 def_bool y
11043 @@ -406,7 +406,7 @@ config X86_CMPXCHG64
11044 # generates cmov.
11045 config X86_CMOV
11046 def_bool y
11047 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11048 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11049
11050 config X86_MINIMUM_CPU_FAMILY
11051 int
11052 diff -urNp linux-2.6.32.41/arch/x86/Kconfig.debug linux-2.6.32.41/arch/x86/Kconfig.debug
11053 --- linux-2.6.32.41/arch/x86/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
11054 +++ linux-2.6.32.41/arch/x86/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
11055 @@ -99,7 +99,7 @@ config X86_PTDUMP
11056 config DEBUG_RODATA
11057 bool "Write protect kernel read-only data structures"
11058 default y
11059 - depends on DEBUG_KERNEL
11060 + depends on DEBUG_KERNEL && BROKEN
11061 ---help---
11062 Mark the kernel read-only data as write-protected in the pagetables,
11063 in order to catch accidental (and incorrect) writes to such const
11064 diff -urNp linux-2.6.32.41/arch/x86/kernel/acpi/realmode/wakeup.S linux-2.6.32.41/arch/x86/kernel/acpi/realmode/wakeup.S
11065 --- linux-2.6.32.41/arch/x86/kernel/acpi/realmode/wakeup.S 2011-03-27 14:31:47.000000000 -0400
11066 +++ linux-2.6.32.41/arch/x86/kernel/acpi/realmode/wakeup.S 2011-04-17 15:56:46.000000000 -0400
11067 @@ -104,7 +104,7 @@ _start:
11068 movl %eax, %ecx
11069 orl %edx, %ecx
11070 jz 1f
11071 - movl $0xc0000080, %ecx
11072 + mov $MSR_EFER, %ecx
11073 wrmsr
11074 1:
11075
11076 diff -urNp linux-2.6.32.41/arch/x86/kernel/acpi/sleep.c linux-2.6.32.41/arch/x86/kernel/acpi/sleep.c
11077 --- linux-2.6.32.41/arch/x86/kernel/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
11078 +++ linux-2.6.32.41/arch/x86/kernel/acpi/sleep.c 2011-04-17 15:56:46.000000000 -0400
11079 @@ -11,11 +11,12 @@
11080 #include <linux/cpumask.h>
11081 #include <asm/segment.h>
11082 #include <asm/desc.h>
11083 +#include <asm/e820.h>
11084
11085 #include "realmode/wakeup.h"
11086 #include "sleep.h"
11087
11088 -unsigned long acpi_wakeup_address;
11089 +unsigned long acpi_wakeup_address = 0x2000;
11090 unsigned long acpi_realmode_flags;
11091
11092 /* address in low memory of the wakeup routine. */
11093 @@ -99,8 +100,12 @@ int acpi_save_state_mem(void)
11094 header->trampoline_segment = setup_trampoline() >> 4;
11095 #ifdef CONFIG_SMP
11096 stack_start.sp = temp_stack + sizeof(temp_stack);
11097 +
11098 + pax_open_kernel();
11099 early_gdt_descr.address =
11100 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11101 + pax_close_kernel();
11102 +
11103 initial_gs = per_cpu_offset(smp_processor_id());
11104 #endif
11105 initial_code = (unsigned long)wakeup_long64;
11106 @@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
11107 return;
11108 }
11109
11110 - acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
11111 -
11112 - if (!acpi_realmode) {
11113 - printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
11114 - return;
11115 - }
11116 -
11117 - acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
11118 + reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
11119 + acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
11120 }
11121
11122
11123 diff -urNp linux-2.6.32.41/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.32.41/arch/x86/kernel/acpi/wakeup_32.S
11124 --- linux-2.6.32.41/arch/x86/kernel/acpi/wakeup_32.S 2011-03-27 14:31:47.000000000 -0400
11125 +++ linux-2.6.32.41/arch/x86/kernel/acpi/wakeup_32.S 2011-04-17 15:56:46.000000000 -0400
11126 @@ -30,13 +30,11 @@ wakeup_pmode_return:
11127 # and restore the stack ... but you need gdt for this to work
11128 movl saved_context_esp, %esp
11129
11130 - movl %cs:saved_magic, %eax
11131 - cmpl $0x12345678, %eax
11132 + cmpl $0x12345678, saved_magic
11133 jne bogus_magic
11134
11135 # jump to place where we left off
11136 - movl saved_eip, %eax
11137 - jmp *%eax
11138 + jmp *(saved_eip)
11139
11140 bogus_magic:
11141 jmp bogus_magic
11142 diff -urNp linux-2.6.32.41/arch/x86/kernel/alternative.c linux-2.6.32.41/arch/x86/kernel/alternative.c
11143 --- linux-2.6.32.41/arch/x86/kernel/alternative.c 2011-03-27 14:31:47.000000000 -0400
11144 +++ linux-2.6.32.41/arch/x86/kernel/alternative.c 2011-04-17 15:56:46.000000000 -0400
11145 @@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(str
11146
11147 BUG_ON(p->len > MAX_PATCH_LEN);
11148 /* prep the buffer with the original instructions */
11149 - memcpy(insnbuf, p->instr, p->len);
11150 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11151 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11152 (unsigned long)p->instr, p->len);
11153
11154 @@ -475,7 +475,7 @@ void __init alternative_instructions(voi
11155 if (smp_alt_once)
11156 free_init_pages("SMP alternatives",
11157 (unsigned long)__smp_locks,
11158 - (unsigned long)__smp_locks_end);
11159 + PAGE_ALIGN((unsigned long)__smp_locks_end));
11160
11161 restart_nmi();
11162 }
11163 @@ -492,13 +492,17 @@ void __init alternative_instructions(voi
11164 * instructions. And on the local CPU you need to be protected again NMI or MCE
11165 * handlers seeing an inconsistent instruction while you patch.
11166 */
11167 -static void *__init_or_module text_poke_early(void *addr, const void *opcode,
11168 +static void *__kprobes text_poke_early(void *addr, const void *opcode,
11169 size_t len)
11170 {
11171 unsigned long flags;
11172 local_irq_save(flags);
11173 - memcpy(addr, opcode, len);
11174 +
11175 + pax_open_kernel();
11176 + memcpy(ktla_ktva(addr), opcode, len);
11177 sync_core();
11178 + pax_close_kernel();
11179 +
11180 local_irq_restore(flags);
11181 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11182 that causes hangs on some VIA CPUs. */
11183 @@ -520,35 +524,21 @@ static void *__init_or_module text_poke_
11184 */
11185 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11186 {
11187 - unsigned long flags;
11188 - char *vaddr;
11189 + unsigned char *vaddr = ktla_ktva(addr);
11190 struct page *pages[2];
11191 - int i;
11192 + size_t i;
11193
11194 if (!core_kernel_text((unsigned long)addr)) {
11195 - pages[0] = vmalloc_to_page(addr);
11196 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11197 + pages[0] = vmalloc_to_page(vaddr);
11198 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11199 } else {
11200 - pages[0] = virt_to_page(addr);
11201 + pages[0] = virt_to_page(vaddr);
11202 WARN_ON(!PageReserved(pages[0]));
11203 - pages[1] = virt_to_page(addr + PAGE_SIZE);
11204 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11205 }
11206 BUG_ON(!pages[0]);
11207 - local_irq_save(flags);
11208 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11209 - if (pages[1])
11210 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11211 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11212 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11213 - clear_fixmap(FIX_TEXT_POKE0);
11214 - if (pages[1])
11215 - clear_fixmap(FIX_TEXT_POKE1);
11216 - local_flush_tlb();
11217 - sync_core();
11218 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
11219 - that causes hangs on some VIA CPUs. */
11220 + text_poke_early(addr, opcode, len);
11221 for (i = 0; i < len; i++)
11222 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11223 - local_irq_restore(flags);
11224 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11225 return addr;
11226 }
11227 diff -urNp linux-2.6.32.41/arch/x86/kernel/amd_iommu.c linux-2.6.32.41/arch/x86/kernel/amd_iommu.c
11228 --- linux-2.6.32.41/arch/x86/kernel/amd_iommu.c 2011-03-27 14:31:47.000000000 -0400
11229 +++ linux-2.6.32.41/arch/x86/kernel/amd_iommu.c 2011-04-17 15:56:46.000000000 -0400
11230 @@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(
11231 }
11232 }
11233
11234 -static struct dma_map_ops amd_iommu_dma_ops = {
11235 +static const struct dma_map_ops amd_iommu_dma_ops = {
11236 .alloc_coherent = alloc_coherent,
11237 .free_coherent = free_coherent,
11238 .map_page = map_page,
11239 diff -urNp linux-2.6.32.41/arch/x86/kernel/apic/apic.c linux-2.6.32.41/arch/x86/kernel/apic/apic.c
11240 --- linux-2.6.32.41/arch/x86/kernel/apic/apic.c 2011-03-27 14:31:47.000000000 -0400
11241 +++ linux-2.6.32.41/arch/x86/kernel/apic/apic.c 2011-05-16 21:46:57.000000000 -0400
11242 @@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs
11243 apic_write(APIC_ESR, 0);
11244 v1 = apic_read(APIC_ESR);
11245 ack_APIC_irq();
11246 - atomic_inc(&irq_err_count);
11247 + atomic_inc_unchecked(&irq_err_count);
11248
11249 /*
11250 * Here is what the APIC error bits mean:
11251 @@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(vo
11252 u16 *bios_cpu_apicid;
11253 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
11254
11255 + pax_track_stack();
11256 +
11257 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
11258 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
11259
11260 diff -urNp linux-2.6.32.41/arch/x86/kernel/apic/io_apic.c linux-2.6.32.41/arch/x86/kernel/apic/io_apic.c
11261 --- linux-2.6.32.41/arch/x86/kernel/apic/io_apic.c 2011-03-27 14:31:47.000000000 -0400
11262 +++ linux-2.6.32.41/arch/x86/kernel/apic/io_apic.c 2011-05-04 17:56:20.000000000 -0400
11263 @@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapi
11264 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
11265 GFP_ATOMIC);
11266 if (!ioapic_entries)
11267 - return 0;
11268 + return NULL;
11269
11270 for (apic = 0; apic < nr_ioapics; apic++) {
11271 ioapic_entries[apic] =
11272 @@ -733,7 +733,7 @@ nomem:
11273 kfree(ioapic_entries[apic]);
11274 kfree(ioapic_entries);
11275
11276 - return 0;
11277 + return NULL;
11278 }
11279
11280 /*
11281 @@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
11282 }
11283 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
11284
11285 -void lock_vector_lock(void)
11286 +void lock_vector_lock(void) __acquires(vector_lock)
11287 {
11288 /* Used to the online set of cpus does not change
11289 * during assign_irq_vector.
11290 @@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
11291 spin_lock(&vector_lock);
11292 }
11293
11294 -void unlock_vector_lock(void)
11295 +void unlock_vector_lock(void) __releases(vector_lock)
11296 {
11297 spin_unlock(&vector_lock);
11298 }
11299 @@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int i
11300 ack_APIC_irq();
11301 }
11302
11303 -atomic_t irq_mis_count;
11304 +atomic_unchecked_t irq_mis_count;
11305
11306 static void ack_apic_level(unsigned int irq)
11307 {
11308 @@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int
11309
11310 /* Tail end of version 0x11 I/O APIC bug workaround */
11311 if (!(v & (1 << (i & 0x1f)))) {
11312 - atomic_inc(&irq_mis_count);
11313 + atomic_inc_unchecked(&irq_mis_count);
11314 spin_lock(&ioapic_lock);
11315 __mask_and_edge_IO_APIC_irq(cfg);
11316 __unmask_and_level_IO_APIC_irq(cfg);
11317 diff -urNp linux-2.6.32.41/arch/x86/kernel/apm_32.c linux-2.6.32.41/arch/x86/kernel/apm_32.c
11318 --- linux-2.6.32.41/arch/x86/kernel/apm_32.c 2011-03-27 14:31:47.000000000 -0400
11319 +++ linux-2.6.32.41/arch/x86/kernel/apm_32.c 2011-04-23 12:56:10.000000000 -0400
11320 @@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
11321 * This is for buggy BIOS's that refer to (real mode) segment 0x40
11322 * even though they are called in protected mode.
11323 */
11324 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
11325 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
11326 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
11327
11328 static const char driver_version[] = "1.16ac"; /* no spaces */
11329 @@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
11330 BUG_ON(cpu != 0);
11331 gdt = get_cpu_gdt_table(cpu);
11332 save_desc_40 = gdt[0x40 / 8];
11333 +
11334 + pax_open_kernel();
11335 gdt[0x40 / 8] = bad_bios_desc;
11336 + pax_close_kernel();
11337
11338 apm_irq_save(flags);
11339 APM_DO_SAVE_SEGS;
11340 @@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
11341 &call->esi);
11342 APM_DO_RESTORE_SEGS;
11343 apm_irq_restore(flags);
11344 +
11345 + pax_open_kernel();
11346 gdt[0x40 / 8] = save_desc_40;
11347 + pax_close_kernel();
11348 +
11349 put_cpu();
11350
11351 return call->eax & 0xff;
11352 @@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void
11353 BUG_ON(cpu != 0);
11354 gdt = get_cpu_gdt_table(cpu);
11355 save_desc_40 = gdt[0x40 / 8];
11356 +
11357 + pax_open_kernel();
11358 gdt[0x40 / 8] = bad_bios_desc;
11359 + pax_close_kernel();
11360
11361 apm_irq_save(flags);
11362 APM_DO_SAVE_SEGS;
11363 @@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void
11364 &call->eax);
11365 APM_DO_RESTORE_SEGS;
11366 apm_irq_restore(flags);
11367 +
11368 + pax_open_kernel();
11369 gdt[0x40 / 8] = save_desc_40;
11370 + pax_close_kernel();
11371 +
11372 put_cpu();
11373 return error;
11374 }
11375 @@ -975,7 +989,7 @@ recalc:
11376
11377 static void apm_power_off(void)
11378 {
11379 - unsigned char po_bios_call[] = {
11380 + const unsigned char po_bios_call[] = {
11381 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
11382 0x8e, 0xd0, /* movw ax,ss */
11383 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
11384 @@ -2357,12 +2371,15 @@ static int __init apm_init(void)
11385 * code to that CPU.
11386 */
11387 gdt = get_cpu_gdt_table(0);
11388 +
11389 + pax_open_kernel();
11390 set_desc_base(&gdt[APM_CS >> 3],
11391 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
11392 set_desc_base(&gdt[APM_CS_16 >> 3],
11393 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
11394 set_desc_base(&gdt[APM_DS >> 3],
11395 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
11396 + pax_close_kernel();
11397
11398 proc_create("apm", 0, NULL, &apm_file_ops);
11399
11400 diff -urNp linux-2.6.32.41/arch/x86/kernel/asm-offsets_32.c linux-2.6.32.41/arch/x86/kernel/asm-offsets_32.c
11401 --- linux-2.6.32.41/arch/x86/kernel/asm-offsets_32.c 2011-03-27 14:31:47.000000000 -0400
11402 +++ linux-2.6.32.41/arch/x86/kernel/asm-offsets_32.c 2011-05-16 21:46:57.000000000 -0400
11403 @@ -51,7 +51,6 @@ void foo(void)
11404 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
11405 BLANK();
11406
11407 - OFFSET(TI_task, thread_info, task);
11408 OFFSET(TI_exec_domain, thread_info, exec_domain);
11409 OFFSET(TI_flags, thread_info, flags);
11410 OFFSET(TI_status, thread_info, status);
11411 @@ -60,6 +59,8 @@ void foo(void)
11412 OFFSET(TI_restart_block, thread_info, restart_block);
11413 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
11414 OFFSET(TI_cpu, thread_info, cpu);
11415 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
11416 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11417 BLANK();
11418
11419 OFFSET(GDS_size, desc_ptr, size);
11420 @@ -99,6 +100,7 @@ void foo(void)
11421
11422 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11423 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
11424 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
11425 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
11426 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
11427 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
11428 @@ -115,6 +117,11 @@ void foo(void)
11429 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
11430 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11431 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11432 +
11433 +#ifdef CONFIG_PAX_KERNEXEC
11434 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11435 +#endif
11436 +
11437 #endif
11438
11439 #ifdef CONFIG_XEN
11440 diff -urNp linux-2.6.32.41/arch/x86/kernel/asm-offsets_64.c linux-2.6.32.41/arch/x86/kernel/asm-offsets_64.c
11441 --- linux-2.6.32.41/arch/x86/kernel/asm-offsets_64.c 2011-03-27 14:31:47.000000000 -0400
11442 +++ linux-2.6.32.41/arch/x86/kernel/asm-offsets_64.c 2011-05-16 21:46:57.000000000 -0400
11443 @@ -44,6 +44,8 @@ int main(void)
11444 ENTRY(addr_limit);
11445 ENTRY(preempt_count);
11446 ENTRY(status);
11447 + ENTRY(lowest_stack);
11448 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11449 #ifdef CONFIG_IA32_EMULATION
11450 ENTRY(sysenter_return);
11451 #endif
11452 @@ -63,6 +65,18 @@ int main(void)
11453 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11454 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
11455 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
11456 +
11457 +#ifdef CONFIG_PAX_KERNEXEC
11458 + OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11459 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11460 +#endif
11461 +
11462 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11463 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
11464 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
11465 + OFFSET(PV_MMU_set_pgd, pv_mmu_ops, set_pgd);
11466 +#endif
11467 +
11468 #endif
11469
11470
11471 @@ -115,6 +129,7 @@ int main(void)
11472 ENTRY(cr8);
11473 BLANK();
11474 #undef ENTRY
11475 + DEFINE(TSS_size, sizeof(struct tss_struct));
11476 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
11477 BLANK();
11478 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
11479 @@ -130,6 +145,7 @@ int main(void)
11480
11481 BLANK();
11482 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11483 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
11484 #ifdef CONFIG_XEN
11485 BLANK();
11486 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
11487 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/amd.c linux-2.6.32.41/arch/x86/kernel/cpu/amd.c
11488 --- linux-2.6.32.41/arch/x86/kernel/cpu/amd.c 2011-05-23 16:56:59.000000000 -0400
11489 +++ linux-2.6.32.41/arch/x86/kernel/cpu/amd.c 2011-05-23 16:57:13.000000000 -0400
11490 @@ -596,7 +596,7 @@ static unsigned int __cpuinit amd_size_c
11491 unsigned int size)
11492 {
11493 /* AMD errata T13 (order #21922) */
11494 - if ((c->x86 == 6)) {
11495 + if (c->x86 == 6) {
11496 /* Duron Rev A0 */
11497 if (c->x86_model == 3 && c->x86_mask == 0)
11498 size = 64;
11499 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/common.c linux-2.6.32.41/arch/x86/kernel/cpu/common.c
11500 --- linux-2.6.32.41/arch/x86/kernel/cpu/common.c 2011-03-27 14:31:47.000000000 -0400
11501 +++ linux-2.6.32.41/arch/x86/kernel/cpu/common.c 2011-05-11 18:25:15.000000000 -0400
11502 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
11503
11504 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
11505
11506 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
11507 -#ifdef CONFIG_X86_64
11508 - /*
11509 - * We need valid kernel segments for data and code in long mode too
11510 - * IRET will check the segment types kkeil 2000/10/28
11511 - * Also sysret mandates a special GDT layout
11512 - *
11513 - * TLS descriptors are currently at a different place compared to i386.
11514 - * Hopefully nobody expects them at a fixed place (Wine?)
11515 - */
11516 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
11517 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
11518 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
11519 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
11520 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
11521 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
11522 -#else
11523 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
11524 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11525 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
11526 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
11527 - /*
11528 - * Segments used for calling PnP BIOS have byte granularity.
11529 - * They code segments and data segments have fixed 64k limits,
11530 - * the transfer segment sizes are set at run time.
11531 - */
11532 - /* 32-bit code */
11533 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11534 - /* 16-bit code */
11535 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11536 - /* 16-bit data */
11537 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
11538 - /* 16-bit data */
11539 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
11540 - /* 16-bit data */
11541 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
11542 - /*
11543 - * The APM segments have byte granularity and their bases
11544 - * are set at run time. All have 64k limits.
11545 - */
11546 - /* 32-bit code */
11547 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11548 - /* 16-bit code */
11549 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11550 - /* data */
11551 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
11552 -
11553 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11554 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11555 - GDT_STACK_CANARY_INIT
11556 -#endif
11557 -} };
11558 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
11559 -
11560 static int __init x86_xsave_setup(char *s)
11561 {
11562 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
11563 @@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
11564 {
11565 struct desc_ptr gdt_descr;
11566
11567 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
11568 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
11569 gdt_descr.size = GDT_SIZE - 1;
11570 load_gdt(&gdt_descr);
11571 /* Reload the per-cpu base */
11572 @@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struc
11573 /* Filter out anything that depends on CPUID levels we don't have */
11574 filter_cpuid_features(c, true);
11575
11576 +#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
11577 + setup_clear_cpu_cap(X86_FEATURE_SEP);
11578 +#endif
11579 +
11580 /* If the model name is still unset, do table lookup. */
11581 if (!c->x86_model_id[0]) {
11582 const char *p;
11583 @@ -980,6 +930,9 @@ static __init int setup_disablecpuid(cha
11584 }
11585 __setup("clearcpuid=", setup_disablecpuid);
11586
11587 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
11588 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
11589 +
11590 #ifdef CONFIG_X86_64
11591 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
11592
11593 @@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
11594 EXPORT_PER_CPU_SYMBOL(current_task);
11595
11596 DEFINE_PER_CPU(unsigned long, kernel_stack) =
11597 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
11598 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
11599 EXPORT_PER_CPU_SYMBOL(kernel_stack);
11600
11601 DEFINE_PER_CPU(char *, irq_stack_ptr) =
11602 @@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(str
11603 {
11604 memset(regs, 0, sizeof(struct pt_regs));
11605 regs->fs = __KERNEL_PERCPU;
11606 - regs->gs = __KERNEL_STACK_CANARY;
11607 + savesegment(gs, regs->gs);
11608
11609 return regs;
11610 }
11611 @@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
11612 int i;
11613
11614 cpu = stack_smp_processor_id();
11615 - t = &per_cpu(init_tss, cpu);
11616 + t = init_tss + cpu;
11617 orig_ist = &per_cpu(orig_ist, cpu);
11618
11619 #ifdef CONFIG_NUMA
11620 @@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
11621 switch_to_new_gdt(cpu);
11622 loadsegment(fs, 0);
11623
11624 - load_idt((const struct desc_ptr *)&idt_descr);
11625 + load_idt(&idt_descr);
11626
11627 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
11628 syscall_init();
11629 @@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
11630 wrmsrl(MSR_KERNEL_GS_BASE, 0);
11631 barrier();
11632
11633 - check_efer();
11634 if (cpu != 0)
11635 enable_x2apic();
11636
11637 @@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
11638 {
11639 int cpu = smp_processor_id();
11640 struct task_struct *curr = current;
11641 - struct tss_struct *t = &per_cpu(init_tss, cpu);
11642 + struct tss_struct *t = init_tss + cpu;
11643 struct thread_struct *thread = &curr->thread;
11644
11645 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
11646 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/intel.c linux-2.6.32.41/arch/x86/kernel/cpu/intel.c
11647 --- linux-2.6.32.41/arch/x86/kernel/cpu/intel.c 2011-03-27 14:31:47.000000000 -0400
11648 +++ linux-2.6.32.41/arch/x86/kernel/cpu/intel.c 2011-04-17 15:56:46.000000000 -0400
11649 @@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug
11650 * Update the IDT descriptor and reload the IDT so that
11651 * it uses the read-only mapped virtual address.
11652 */
11653 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
11654 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
11655 load_idt(&idt_descr);
11656 }
11657 #endif
11658 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/intel_cacheinfo.c linux-2.6.32.41/arch/x86/kernel/cpu/intel_cacheinfo.c
11659 --- linux-2.6.32.41/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
11660 +++ linux-2.6.32.41/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-04-17 15:56:46.000000000 -0400
11661 @@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kob
11662 return ret;
11663 }
11664
11665 -static struct sysfs_ops sysfs_ops = {
11666 +static const struct sysfs_ops sysfs_ops = {
11667 .show = show,
11668 .store = store,
11669 };
11670 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/Makefile linux-2.6.32.41/arch/x86/kernel/cpu/Makefile
11671 --- linux-2.6.32.41/arch/x86/kernel/cpu/Makefile 2011-03-27 14:31:47.000000000 -0400
11672 +++ linux-2.6.32.41/arch/x86/kernel/cpu/Makefile 2011-04-17 15:56:46.000000000 -0400
11673 @@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
11674 CFLAGS_REMOVE_common.o = -pg
11675 endif
11676
11677 -# Make sure load_percpu_segment has no stackprotector
11678 -nostackp := $(call cc-option, -fno-stack-protector)
11679 -CFLAGS_common.o := $(nostackp)
11680 -
11681 obj-y := intel_cacheinfo.o addon_cpuid_features.o
11682 obj-y += proc.o capflags.o powerflags.o common.o
11683 obj-y += vmware.o hypervisor.o sched.o
11684 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/mcheck/mce_amd.c linux-2.6.32.41/arch/x86/kernel/cpu/mcheck/mce_amd.c
11685 --- linux-2.6.32.41/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:56:59.000000000 -0400
11686 +++ linux-2.6.32.41/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:57:13.000000000 -0400
11687 @@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kob
11688 return ret;
11689 }
11690
11691 -static struct sysfs_ops threshold_ops = {
11692 +static const struct sysfs_ops threshold_ops = {
11693 .show = show,
11694 .store = store,
11695 };
11696 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/mcheck/mce.c linux-2.6.32.41/arch/x86/kernel/cpu/mcheck/mce.c
11697 --- linux-2.6.32.41/arch/x86/kernel/cpu/mcheck/mce.c 2011-03-27 14:31:47.000000000 -0400
11698 +++ linux-2.6.32.41/arch/x86/kernel/cpu/mcheck/mce.c 2011-05-04 17:56:20.000000000 -0400
11699 @@ -43,6 +43,7 @@
11700 #include <asm/ipi.h>
11701 #include <asm/mce.h>
11702 #include <asm/msr.h>
11703 +#include <asm/local.h>
11704
11705 #include "mce-internal.h"
11706
11707 @@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
11708 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
11709 m->cs, m->ip);
11710
11711 - if (m->cs == __KERNEL_CS)
11712 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
11713 print_symbol("{%s}", m->ip);
11714 pr_cont("\n");
11715 }
11716 @@ -221,10 +222,10 @@ static void print_mce_tail(void)
11717
11718 #define PANIC_TIMEOUT 5 /* 5 seconds */
11719
11720 -static atomic_t mce_paniced;
11721 +static atomic_unchecked_t mce_paniced;
11722
11723 static int fake_panic;
11724 -static atomic_t mce_fake_paniced;
11725 +static atomic_unchecked_t mce_fake_paniced;
11726
11727 /* Panic in progress. Enable interrupts and wait for final IPI */
11728 static void wait_for_panic(void)
11729 @@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct
11730 /*
11731 * Make sure only one CPU runs in machine check panic
11732 */
11733 - if (atomic_inc_return(&mce_paniced) > 1)
11734 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
11735 wait_for_panic();
11736 barrier();
11737
11738 @@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct
11739 console_verbose();
11740 } else {
11741 /* Don't log too much for fake panic */
11742 - if (atomic_inc_return(&mce_fake_paniced) > 1)
11743 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
11744 return;
11745 }
11746 print_mce_head();
11747 @@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
11748 * might have been modified by someone else.
11749 */
11750 rmb();
11751 - if (atomic_read(&mce_paniced))
11752 + if (atomic_read_unchecked(&mce_paniced))
11753 wait_for_panic();
11754 if (!monarch_timeout)
11755 goto out;
11756 @@ -1429,14 +1430,14 @@ void __cpuinit mcheck_init(struct cpuinf
11757 */
11758
11759 static DEFINE_SPINLOCK(mce_state_lock);
11760 -static int open_count; /* #times opened */
11761 +static local_t open_count; /* #times opened */
11762 static int open_exclu; /* already open exclusive? */
11763
11764 static int mce_open(struct inode *inode, struct file *file)
11765 {
11766 spin_lock(&mce_state_lock);
11767
11768 - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
11769 + if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
11770 spin_unlock(&mce_state_lock);
11771
11772 return -EBUSY;
11773 @@ -1444,7 +1445,7 @@ static int mce_open(struct inode *inode,
11774
11775 if (file->f_flags & O_EXCL)
11776 open_exclu = 1;
11777 - open_count++;
11778 + local_inc(&open_count);
11779
11780 spin_unlock(&mce_state_lock);
11781
11782 @@ -1455,7 +1456,7 @@ static int mce_release(struct inode *ino
11783 {
11784 spin_lock(&mce_state_lock);
11785
11786 - open_count--;
11787 + local_dec(&open_count);
11788 open_exclu = 0;
11789
11790 spin_unlock(&mce_state_lock);
11791 @@ -2082,7 +2083,7 @@ struct dentry *mce_get_debugfs_dir(void)
11792 static void mce_reset(void)
11793 {
11794 cpu_missing = 0;
11795 - atomic_set(&mce_fake_paniced, 0);
11796 + atomic_set_unchecked(&mce_fake_paniced, 0);
11797 atomic_set(&mce_executing, 0);
11798 atomic_set(&mce_callin, 0);
11799 atomic_set(&global_nwo, 0);
11800 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/amd.c linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/amd.c
11801 --- linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/amd.c 2011-03-27 14:31:47.000000000 -0400
11802 +++ linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/amd.c 2011-04-17 15:56:46.000000000 -0400
11803 @@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base
11804 return 0;
11805 }
11806
11807 -static struct mtrr_ops amd_mtrr_ops = {
11808 +static const struct mtrr_ops amd_mtrr_ops = {
11809 .vendor = X86_VENDOR_AMD,
11810 .set = amd_set_mtrr,
11811 .get = amd_get_mtrr,
11812 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/centaur.c linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/centaur.c
11813 --- linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/centaur.c 2011-03-27 14:31:47.000000000 -0400
11814 +++ linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/centaur.c 2011-04-17 15:56:46.000000000 -0400
11815 @@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long
11816 return 0;
11817 }
11818
11819 -static struct mtrr_ops centaur_mtrr_ops = {
11820 +static const struct mtrr_ops centaur_mtrr_ops = {
11821 .vendor = X86_VENDOR_CENTAUR,
11822 .set = centaur_set_mcr,
11823 .get = centaur_get_mcr,
11824 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/cyrix.c linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/cyrix.c
11825 --- linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-03-27 14:31:47.000000000 -0400
11826 +++ linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-04-17 15:56:46.000000000 -0400
11827 @@ -265,7 +265,7 @@ static void cyrix_set_all(void)
11828 post_set();
11829 }
11830
11831 -static struct mtrr_ops cyrix_mtrr_ops = {
11832 +static const struct mtrr_ops cyrix_mtrr_ops = {
11833 .vendor = X86_VENDOR_CYRIX,
11834 .set_all = cyrix_set_all,
11835 .set = cyrix_set_arr,
11836 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/generic.c linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/generic.c
11837 --- linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/generic.c 2011-03-27 14:31:47.000000000 -0400
11838 +++ linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/generic.c 2011-04-23 12:56:10.000000000 -0400
11839 @@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
11840 /*
11841 * Generic structure...
11842 */
11843 -struct mtrr_ops generic_mtrr_ops = {
11844 +const struct mtrr_ops generic_mtrr_ops = {
11845 .use_intel_if = 1,
11846 .set_all = generic_set_all,
11847 .get = generic_get_mtrr,
11848 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/main.c linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/main.c
11849 --- linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:00:52.000000000 -0400
11850 +++ linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:03:05.000000000 -0400
11851 @@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
11852 u64 size_or_mask, size_and_mask;
11853 static bool mtrr_aps_delayed_init;
11854
11855 -static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
11856 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
11857
11858 -struct mtrr_ops *mtrr_if;
11859 +const struct mtrr_ops *mtrr_if;
11860
11861 static void set_mtrr(unsigned int reg, unsigned long base,
11862 unsigned long size, mtrr_type type);
11863
11864 -void set_mtrr_ops(struct mtrr_ops *ops)
11865 +void set_mtrr_ops(const struct mtrr_ops *ops)
11866 {
11867 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
11868 mtrr_ops[ops->vendor] = ops;
11869 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/mtrr.h linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/mtrr.h
11870 --- linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-03-27 14:31:47.000000000 -0400
11871 +++ linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-04-17 15:56:46.000000000 -0400
11872 @@ -12,19 +12,19 @@
11873 extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
11874
11875 struct mtrr_ops {
11876 - u32 vendor;
11877 - u32 use_intel_if;
11878 - void (*set)(unsigned int reg, unsigned long base,
11879 + const u32 vendor;
11880 + const u32 use_intel_if;
11881 + void (* const set)(unsigned int reg, unsigned long base,
11882 unsigned long size, mtrr_type type);
11883 - void (*set_all)(void);
11884 + void (* const set_all)(void);
11885
11886 - void (*get)(unsigned int reg, unsigned long *base,
11887 + void (* const get)(unsigned int reg, unsigned long *base,
11888 unsigned long *size, mtrr_type *type);
11889 - int (*get_free_region)(unsigned long base, unsigned long size,
11890 + int (* const get_free_region)(unsigned long base, unsigned long size,
11891 int replace_reg);
11892 - int (*validate_add_page)(unsigned long base, unsigned long size,
11893 + int (* const validate_add_page)(unsigned long base, unsigned long size,
11894 unsigned int type);
11895 - int (*have_wrcomb)(void);
11896 + int (* const have_wrcomb)(void);
11897 };
11898
11899 extern int generic_get_free_region(unsigned long base, unsigned long size,
11900 @@ -32,7 +32,7 @@ extern int generic_get_free_region(unsig
11901 extern int generic_validate_add_page(unsigned long base, unsigned long size,
11902 unsigned int type);
11903
11904 -extern struct mtrr_ops generic_mtrr_ops;
11905 +extern const struct mtrr_ops generic_mtrr_ops;
11906
11907 extern int positive_have_wrcomb(void);
11908
11909 @@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int in
11910 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
11911 void get_mtrr_state(void);
11912
11913 -extern void set_mtrr_ops(struct mtrr_ops *ops);
11914 +extern void set_mtrr_ops(const struct mtrr_ops *ops);
11915
11916 extern u64 size_or_mask, size_and_mask;
11917 -extern struct mtrr_ops *mtrr_if;
11918 +extern const struct mtrr_ops *mtrr_if;
11919
11920 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
11921 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
11922 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/perfctr-watchdog.c linux-2.6.32.41/arch/x86/kernel/cpu/perfctr-watchdog.c
11923 --- linux-2.6.32.41/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-03-27 14:31:47.000000000 -0400
11924 +++ linux-2.6.32.41/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-04-17 15:56:46.000000000 -0400
11925 @@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
11926
11927 /* Interface defining a CPU specific perfctr watchdog */
11928 struct wd_ops {
11929 - int (*reserve)(void);
11930 - void (*unreserve)(void);
11931 - int (*setup)(unsigned nmi_hz);
11932 - void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
11933 - void (*stop)(void);
11934 + int (* const reserve)(void);
11935 + void (* const unreserve)(void);
11936 + int (* const setup)(unsigned nmi_hz);
11937 + void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
11938 + void (* const stop)(void);
11939 unsigned perfctr;
11940 unsigned evntsel;
11941 u64 checkbit;
11942 @@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
11943 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
11944 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
11945
11946 +/* cannot be const */
11947 static struct wd_ops intel_arch_wd_ops;
11948
11949 static int setup_intel_arch_watchdog(unsigned nmi_hz)
11950 @@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(uns
11951 return 1;
11952 }
11953
11954 +/* cannot be const */
11955 static struct wd_ops intel_arch_wd_ops __read_mostly = {
11956 .reserve = single_msr_reserve,
11957 .unreserve = single_msr_unreserve,
11958 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/perf_event.c linux-2.6.32.41/arch/x86/kernel/cpu/perf_event.c
11959 --- linux-2.6.32.41/arch/x86/kernel/cpu/perf_event.c 2011-03-27 14:31:47.000000000 -0400
11960 +++ linux-2.6.32.41/arch/x86/kernel/cpu/perf_event.c 2011-05-04 17:56:20.000000000 -0400
11961 @@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event
11962 * count to the generic event atomically:
11963 */
11964 again:
11965 - prev_raw_count = atomic64_read(&hwc->prev_count);
11966 + prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
11967 rdmsrl(hwc->event_base + idx, new_raw_count);
11968
11969 - if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
11970 + if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
11971 new_raw_count) != prev_raw_count)
11972 goto again;
11973
11974 @@ -741,7 +741,7 @@ again:
11975 delta = (new_raw_count << shift) - (prev_raw_count << shift);
11976 delta >>= shift;
11977
11978 - atomic64_add(delta, &event->count);
11979 + atomic64_add_unchecked(delta, &event->count);
11980 atomic64_sub(delta, &hwc->period_left);
11981
11982 return new_raw_count;
11983 @@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_ev
11984 * The hw event starts counting from this event offset,
11985 * mark it to be able to extra future deltas:
11986 */
11987 - atomic64_set(&hwc->prev_count, (u64)-left);
11988 + atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
11989
11990 err = checking_wrmsrl(hwc->event_base + idx,
11991 (u64)(-left) & x86_pmu.event_mask);
11992 @@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs
11993 break;
11994
11995 callchain_store(entry, frame.return_address);
11996 - fp = frame.next_frame;
11997 + fp = (__force const void __user *)frame.next_frame;
11998 }
11999 }
12000
12001 diff -urNp linux-2.6.32.41/arch/x86/kernel/crash.c linux-2.6.32.41/arch/x86/kernel/crash.c
12002 --- linux-2.6.32.41/arch/x86/kernel/crash.c 2011-03-27 14:31:47.000000000 -0400
12003 +++ linux-2.6.32.41/arch/x86/kernel/crash.c 2011-04-17 15:56:46.000000000 -0400
12004 @@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu,
12005 regs = args->regs;
12006
12007 #ifdef CONFIG_X86_32
12008 - if (!user_mode_vm(regs)) {
12009 + if (!user_mode(regs)) {
12010 crash_fixup_ss_esp(&fixed_regs, regs);
12011 regs = &fixed_regs;
12012 }
12013 diff -urNp linux-2.6.32.41/arch/x86/kernel/doublefault_32.c linux-2.6.32.41/arch/x86/kernel/doublefault_32.c
12014 --- linux-2.6.32.41/arch/x86/kernel/doublefault_32.c 2011-03-27 14:31:47.000000000 -0400
12015 +++ linux-2.6.32.41/arch/x86/kernel/doublefault_32.c 2011-04-17 15:56:46.000000000 -0400
12016 @@ -11,7 +11,7 @@
12017
12018 #define DOUBLEFAULT_STACKSIZE (1024)
12019 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12020 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12021 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12022
12023 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12024
12025 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
12026 unsigned long gdt, tss;
12027
12028 store_gdt(&gdt_desc);
12029 - gdt = gdt_desc.address;
12030 + gdt = (unsigned long)gdt_desc.address;
12031
12032 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12033
12034 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
12035 /* 0x2 bit is always set */
12036 .flags = X86_EFLAGS_SF | 0x2,
12037 .sp = STACK_START,
12038 - .es = __USER_DS,
12039 + .es = __KERNEL_DS,
12040 .cs = __KERNEL_CS,
12041 .ss = __KERNEL_DS,
12042 - .ds = __USER_DS,
12043 + .ds = __KERNEL_DS,
12044 .fs = __KERNEL_PERCPU,
12045
12046 .__cr3 = __pa_nodebug(swapper_pg_dir),
12047 diff -urNp linux-2.6.32.41/arch/x86/kernel/dumpstack_32.c linux-2.6.32.41/arch/x86/kernel/dumpstack_32.c
12048 --- linux-2.6.32.41/arch/x86/kernel/dumpstack_32.c 2011-03-27 14:31:47.000000000 -0400
12049 +++ linux-2.6.32.41/arch/x86/kernel/dumpstack_32.c 2011-04-17 15:56:46.000000000 -0400
12050 @@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task
12051 #endif
12052
12053 for (;;) {
12054 - struct thread_info *context;
12055 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12056 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12057
12058 - context = (struct thread_info *)
12059 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12060 - bp = print_context_stack(context, stack, bp, ops,
12061 - data, NULL, &graph);
12062 -
12063 - stack = (unsigned long *)context->previous_esp;
12064 - if (!stack)
12065 + if (stack_start == task_stack_page(task))
12066 break;
12067 + stack = *(unsigned long **)stack_start;
12068 if (ops->stack(data, "IRQ") < 0)
12069 break;
12070 touch_nmi_watchdog();
12071 @@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs
12072 * When in-kernel, we also print out the stack and code at the
12073 * time of the fault..
12074 */
12075 - if (!user_mode_vm(regs)) {
12076 + if (!user_mode(regs)) {
12077 unsigned int code_prologue = code_bytes * 43 / 64;
12078 unsigned int code_len = code_bytes;
12079 unsigned char c;
12080 u8 *ip;
12081 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12082
12083 printk(KERN_EMERG "Stack:\n");
12084 show_stack_log_lvl(NULL, regs, &regs->sp,
12085 @@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs
12086
12087 printk(KERN_EMERG "Code: ");
12088
12089 - ip = (u8 *)regs->ip - code_prologue;
12090 + ip = (u8 *)regs->ip - code_prologue + cs_base;
12091 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12092 /* try starting at IP */
12093 - ip = (u8 *)regs->ip;
12094 + ip = (u8 *)regs->ip + cs_base;
12095 code_len = code_len - code_prologue + 1;
12096 }
12097 for (i = 0; i < code_len; i++, ip++) {
12098 @@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs
12099 printk(" Bad EIP value.");
12100 break;
12101 }
12102 - if (ip == (u8 *)regs->ip)
12103 + if (ip == (u8 *)regs->ip + cs_base)
12104 printk("<%02x> ", c);
12105 else
12106 printk("%02x ", c);
12107 @@ -149,6 +146,7 @@ int is_valid_bugaddr(unsigned long ip)
12108 {
12109 unsigned short ud2;
12110
12111 + ip = ktla_ktva(ip);
12112 if (ip < PAGE_OFFSET)
12113 return 0;
12114 if (probe_kernel_address((unsigned short *)ip, ud2))
12115 diff -urNp linux-2.6.32.41/arch/x86/kernel/dumpstack_64.c linux-2.6.32.41/arch/x86/kernel/dumpstack_64.c
12116 --- linux-2.6.32.41/arch/x86/kernel/dumpstack_64.c 2011-03-27 14:31:47.000000000 -0400
12117 +++ linux-2.6.32.41/arch/x86/kernel/dumpstack_64.c 2011-04-17 15:56:46.000000000 -0400
12118 @@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task
12119 unsigned long *irq_stack_end =
12120 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12121 unsigned used = 0;
12122 - struct thread_info *tinfo;
12123 int graph = 0;
12124 + void *stack_start;
12125
12126 if (!task)
12127 task = current;
12128 @@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task
12129 * current stack address. If the stacks consist of nested
12130 * exceptions
12131 */
12132 - tinfo = task_thread_info(task);
12133 for (;;) {
12134 char *id;
12135 unsigned long *estack_end;
12136 +
12137 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12138 &used, &id);
12139
12140 @@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task
12141 if (ops->stack(data, id) < 0)
12142 break;
12143
12144 - bp = print_context_stack(tinfo, stack, bp, ops,
12145 + bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12146 data, estack_end, &graph);
12147 ops->stack(data, "<EOE>");
12148 /*
12149 @@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task
12150 if (stack >= irq_stack && stack < irq_stack_end) {
12151 if (ops->stack(data, "IRQ") < 0)
12152 break;
12153 - bp = print_context_stack(tinfo, stack, bp,
12154 + bp = print_context_stack(task, irq_stack, stack, bp,
12155 ops, data, irq_stack_end, &graph);
12156 /*
12157 * We link to the next stack (which would be
12158 @@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task
12159 /*
12160 * This handles the process stack:
12161 */
12162 - bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12163 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12164 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12165 put_cpu();
12166 }
12167 EXPORT_SYMBOL(dump_trace);
12168 diff -urNp linux-2.6.32.41/arch/x86/kernel/dumpstack.c linux-2.6.32.41/arch/x86/kernel/dumpstack.c
12169 --- linux-2.6.32.41/arch/x86/kernel/dumpstack.c 2011-03-27 14:31:47.000000000 -0400
12170 +++ linux-2.6.32.41/arch/x86/kernel/dumpstack.c 2011-04-17 15:56:46.000000000 -0400
12171 @@ -2,6 +2,9 @@
12172 * Copyright (C) 1991, 1992 Linus Torvalds
12173 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12174 */
12175 +#ifdef CONFIG_GRKERNSEC_HIDESYM
12176 +#define __INCLUDED_BY_HIDESYM 1
12177 +#endif
12178 #include <linux/kallsyms.h>
12179 #include <linux/kprobes.h>
12180 #include <linux/uaccess.h>
12181 @@ -28,7 +31,7 @@ static int die_counter;
12182
12183 void printk_address(unsigned long address, int reliable)
12184 {
12185 - printk(" [<%p>] %s%pS\n", (void *) address,
12186 + printk(" [<%p>] %s%pA\n", (void *) address,
12187 reliable ? "" : "? ", (void *) address);
12188 }
12189
12190 @@ -36,9 +39,8 @@ void printk_address(unsigned long addres
12191 static void
12192 print_ftrace_graph_addr(unsigned long addr, void *data,
12193 const struct stacktrace_ops *ops,
12194 - struct thread_info *tinfo, int *graph)
12195 + struct task_struct *task, int *graph)
12196 {
12197 - struct task_struct *task = tinfo->task;
12198 unsigned long ret_addr;
12199 int index = task->curr_ret_stack;
12200
12201 @@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long ad
12202 static inline void
12203 print_ftrace_graph_addr(unsigned long addr, void *data,
12204 const struct stacktrace_ops *ops,
12205 - struct thread_info *tinfo, int *graph)
12206 + struct task_struct *task, int *graph)
12207 { }
12208 #endif
12209
12210 @@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long ad
12211 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12212 */
12213
12214 -static inline int valid_stack_ptr(struct thread_info *tinfo,
12215 - void *p, unsigned int size, void *end)
12216 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12217 {
12218 - void *t = tinfo;
12219 if (end) {
12220 if (p < end && p >= (end-THREAD_SIZE))
12221 return 1;
12222 @@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct
12223 }
12224
12225 unsigned long
12226 -print_context_stack(struct thread_info *tinfo,
12227 +print_context_stack(struct task_struct *task, void *stack_start,
12228 unsigned long *stack, unsigned long bp,
12229 const struct stacktrace_ops *ops, void *data,
12230 unsigned long *end, int *graph)
12231 {
12232 struct stack_frame *frame = (struct stack_frame *)bp;
12233
12234 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12235 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12236 unsigned long addr;
12237
12238 addr = *stack;
12239 @@ -103,7 +103,7 @@ print_context_stack(struct thread_info *
12240 } else {
12241 ops->address(data, addr, 0);
12242 }
12243 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12244 + print_ftrace_graph_addr(addr, data, ops, task, graph);
12245 }
12246 stack++;
12247 }
12248 @@ -180,7 +180,7 @@ void dump_stack(void)
12249 #endif
12250
12251 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
12252 - current->pid, current->comm, print_tainted(),
12253 + task_pid_nr(current), current->comm, print_tainted(),
12254 init_utsname()->release,
12255 (int)strcspn(init_utsname()->version, " "),
12256 init_utsname()->version);
12257 @@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
12258 return flags;
12259 }
12260
12261 +extern void gr_handle_kernel_exploit(void);
12262 +
12263 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12264 {
12265 if (regs && kexec_should_crash(current))
12266 @@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long fl
12267 panic("Fatal exception in interrupt");
12268 if (panic_on_oops)
12269 panic("Fatal exception");
12270 - do_exit(signr);
12271 +
12272 + gr_handle_kernel_exploit();
12273 +
12274 + do_group_exit(signr);
12275 }
12276
12277 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12278 @@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs
12279 unsigned long flags = oops_begin();
12280 int sig = SIGSEGV;
12281
12282 - if (!user_mode_vm(regs))
12283 + if (!user_mode(regs))
12284 report_bug(regs->ip, regs);
12285
12286 if (__die(str, regs, err))
12287 diff -urNp linux-2.6.32.41/arch/x86/kernel/dumpstack.h linux-2.6.32.41/arch/x86/kernel/dumpstack.h
12288 --- linux-2.6.32.41/arch/x86/kernel/dumpstack.h 2011-03-27 14:31:47.000000000 -0400
12289 +++ linux-2.6.32.41/arch/x86/kernel/dumpstack.h 2011-04-23 13:25:26.000000000 -0400
12290 @@ -15,7 +15,7 @@
12291 #endif
12292
12293 extern unsigned long
12294 -print_context_stack(struct thread_info *tinfo,
12295 +print_context_stack(struct task_struct *task, void *stack_start,
12296 unsigned long *stack, unsigned long bp,
12297 const struct stacktrace_ops *ops, void *data,
12298 unsigned long *end, int *graph);
12299 diff -urNp linux-2.6.32.41/arch/x86/kernel/e820.c linux-2.6.32.41/arch/x86/kernel/e820.c
12300 --- linux-2.6.32.41/arch/x86/kernel/e820.c 2011-03-27 14:31:47.000000000 -0400
12301 +++ linux-2.6.32.41/arch/x86/kernel/e820.c 2011-04-17 15:56:46.000000000 -0400
12302 @@ -733,7 +733,7 @@ struct early_res {
12303 };
12304 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
12305 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
12306 - {}
12307 + { 0, 0, {0}, 0 }
12308 };
12309
12310 static int __init find_overlapped_early(u64 start, u64 end)
12311 diff -urNp linux-2.6.32.41/arch/x86/kernel/early_printk.c linux-2.6.32.41/arch/x86/kernel/early_printk.c
12312 --- linux-2.6.32.41/arch/x86/kernel/early_printk.c 2011-03-27 14:31:47.000000000 -0400
12313 +++ linux-2.6.32.41/arch/x86/kernel/early_printk.c 2011-05-16 21:46:57.000000000 -0400
12314 @@ -7,6 +7,7 @@
12315 #include <linux/pci_regs.h>
12316 #include <linux/pci_ids.h>
12317 #include <linux/errno.h>
12318 +#include <linux/sched.h>
12319 #include <asm/io.h>
12320 #include <asm/processor.h>
12321 #include <asm/fcntl.h>
12322 @@ -170,6 +171,8 @@ asmlinkage void early_printk(const char
12323 int n;
12324 va_list ap;
12325
12326 + pax_track_stack();
12327 +
12328 va_start(ap, fmt);
12329 n = vscnprintf(buf, sizeof(buf), fmt, ap);
12330 early_console->write(early_console, buf, n);
12331 diff -urNp linux-2.6.32.41/arch/x86/kernel/efi_32.c linux-2.6.32.41/arch/x86/kernel/efi_32.c
12332 --- linux-2.6.32.41/arch/x86/kernel/efi_32.c 2011-03-27 14:31:47.000000000 -0400
12333 +++ linux-2.6.32.41/arch/x86/kernel/efi_32.c 2011-04-17 15:56:46.000000000 -0400
12334 @@ -38,70 +38,38 @@
12335 */
12336
12337 static unsigned long efi_rt_eflags;
12338 -static pgd_t efi_bak_pg_dir_pointer[2];
12339 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
12340
12341 -void efi_call_phys_prelog(void)
12342 +void __init efi_call_phys_prelog(void)
12343 {
12344 - unsigned long cr4;
12345 - unsigned long temp;
12346 struct desc_ptr gdt_descr;
12347
12348 local_irq_save(efi_rt_eflags);
12349
12350 - /*
12351 - * If I don't have PAE, I should just duplicate two entries in page
12352 - * directory. If I have PAE, I just need to duplicate one entry in
12353 - * page directory.
12354 - */
12355 - cr4 = read_cr4_safe();
12356
12357 - if (cr4 & X86_CR4_PAE) {
12358 - efi_bak_pg_dir_pointer[0].pgd =
12359 - swapper_pg_dir[pgd_index(0)].pgd;
12360 - swapper_pg_dir[0].pgd =
12361 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
12362 - } else {
12363 - efi_bak_pg_dir_pointer[0].pgd =
12364 - swapper_pg_dir[pgd_index(0)].pgd;
12365 - efi_bak_pg_dir_pointer[1].pgd =
12366 - swapper_pg_dir[pgd_index(0x400000)].pgd;
12367 - swapper_pg_dir[pgd_index(0)].pgd =
12368 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
12369 - temp = PAGE_OFFSET + 0x400000;
12370 - swapper_pg_dir[pgd_index(0x400000)].pgd =
12371 - swapper_pg_dir[pgd_index(temp)].pgd;
12372 - }
12373 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
12374 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
12375 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
12376
12377 /*
12378 * After the lock is released, the original page table is restored.
12379 */
12380 __flush_tlb_all();
12381
12382 - gdt_descr.address = __pa(get_cpu_gdt_table(0));
12383 + gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
12384 gdt_descr.size = GDT_SIZE - 1;
12385 load_gdt(&gdt_descr);
12386 }
12387
12388 -void efi_call_phys_epilog(void)
12389 +void __init efi_call_phys_epilog(void)
12390 {
12391 - unsigned long cr4;
12392 struct desc_ptr gdt_descr;
12393
12394 - gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
12395 + gdt_descr.address = get_cpu_gdt_table(0);
12396 gdt_descr.size = GDT_SIZE - 1;
12397 load_gdt(&gdt_descr);
12398
12399 - cr4 = read_cr4_safe();
12400 -
12401 - if (cr4 & X86_CR4_PAE) {
12402 - swapper_pg_dir[pgd_index(0)].pgd =
12403 - efi_bak_pg_dir_pointer[0].pgd;
12404 - } else {
12405 - swapper_pg_dir[pgd_index(0)].pgd =
12406 - efi_bak_pg_dir_pointer[0].pgd;
12407 - swapper_pg_dir[pgd_index(0x400000)].pgd =
12408 - efi_bak_pg_dir_pointer[1].pgd;
12409 - }
12410 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
12411
12412 /*
12413 * After the lock is released, the original page table is restored.
12414 diff -urNp linux-2.6.32.41/arch/x86/kernel/efi_stub_32.S linux-2.6.32.41/arch/x86/kernel/efi_stub_32.S
12415 --- linux-2.6.32.41/arch/x86/kernel/efi_stub_32.S 2011-03-27 14:31:47.000000000 -0400
12416 +++ linux-2.6.32.41/arch/x86/kernel/efi_stub_32.S 2011-04-17 15:56:46.000000000 -0400
12417 @@ -6,6 +6,7 @@
12418 */
12419
12420 #include <linux/linkage.h>
12421 +#include <linux/init.h>
12422 #include <asm/page_types.h>
12423
12424 /*
12425 @@ -20,7 +21,7 @@
12426 * service functions will comply with gcc calling convention, too.
12427 */
12428
12429 -.text
12430 +__INIT
12431 ENTRY(efi_call_phys)
12432 /*
12433 * 0. The function can only be called in Linux kernel. So CS has been
12434 @@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
12435 * The mapping of lower virtual memory has been created in prelog and
12436 * epilog.
12437 */
12438 - movl $1f, %edx
12439 - subl $__PAGE_OFFSET, %edx
12440 - jmp *%edx
12441 + jmp 1f-__PAGE_OFFSET
12442 1:
12443
12444 /*
12445 @@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
12446 * parameter 2, ..., param n. To make things easy, we save the return
12447 * address of efi_call_phys in a global variable.
12448 */
12449 - popl %edx
12450 - movl %edx, saved_return_addr
12451 - /* get the function pointer into ECX*/
12452 - popl %ecx
12453 - movl %ecx, efi_rt_function_ptr
12454 - movl $2f, %edx
12455 - subl $__PAGE_OFFSET, %edx
12456 - pushl %edx
12457 + popl (saved_return_addr)
12458 + popl (efi_rt_function_ptr)
12459
12460 /*
12461 * 3. Clear PG bit in %CR0.
12462 @@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
12463 /*
12464 * 5. Call the physical function.
12465 */
12466 - jmp *%ecx
12467 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
12468
12469 -2:
12470 /*
12471 * 6. After EFI runtime service returns, control will return to
12472 * following instruction. We'd better readjust stack pointer first.
12473 @@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
12474 movl %cr0, %edx
12475 orl $0x80000000, %edx
12476 movl %edx, %cr0
12477 - jmp 1f
12478 -1:
12479 +
12480 /*
12481 * 8. Now restore the virtual mode from flat mode by
12482 * adding EIP with PAGE_OFFSET.
12483 */
12484 - movl $1f, %edx
12485 - jmp *%edx
12486 + jmp 1f+__PAGE_OFFSET
12487 1:
12488
12489 /*
12490 * 9. Balance the stack. And because EAX contain the return value,
12491 * we'd better not clobber it.
12492 */
12493 - leal efi_rt_function_ptr, %edx
12494 - movl (%edx), %ecx
12495 - pushl %ecx
12496 + pushl (efi_rt_function_ptr)
12497
12498 /*
12499 - * 10. Push the saved return address onto the stack and return.
12500 + * 10. Return to the saved return address.
12501 */
12502 - leal saved_return_addr, %edx
12503 - movl (%edx), %ecx
12504 - pushl %ecx
12505 - ret
12506 + jmpl *(saved_return_addr)
12507 ENDPROC(efi_call_phys)
12508 .previous
12509
12510 -.data
12511 +__INITDATA
12512 saved_return_addr:
12513 .long 0
12514 efi_rt_function_ptr:
12515 diff -urNp linux-2.6.32.41/arch/x86/kernel/entry_32.S linux-2.6.32.41/arch/x86/kernel/entry_32.S
12516 --- linux-2.6.32.41/arch/x86/kernel/entry_32.S 2011-03-27 14:31:47.000000000 -0400
12517 +++ linux-2.6.32.41/arch/x86/kernel/entry_32.S 2011-05-22 23:02:03.000000000 -0400
12518 @@ -185,13 +185,146 @@
12519 /*CFI_REL_OFFSET gs, PT_GS*/
12520 .endm
12521 .macro SET_KERNEL_GS reg
12522 +
12523 +#ifdef CONFIG_CC_STACKPROTECTOR
12524 movl $(__KERNEL_STACK_CANARY), \reg
12525 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12526 + movl $(__USER_DS), \reg
12527 +#else
12528 + xorl \reg, \reg
12529 +#endif
12530 +
12531 movl \reg, %gs
12532 .endm
12533
12534 #endif /* CONFIG_X86_32_LAZY_GS */
12535
12536 -.macro SAVE_ALL
12537 +.macro pax_enter_kernel
12538 +#ifdef CONFIG_PAX_KERNEXEC
12539 + call pax_enter_kernel
12540 +#endif
12541 +.endm
12542 +
12543 +.macro pax_exit_kernel
12544 +#ifdef CONFIG_PAX_KERNEXEC
12545 + call pax_exit_kernel
12546 +#endif
12547 +.endm
12548 +
12549 +#ifdef CONFIG_PAX_KERNEXEC
12550 +ENTRY(pax_enter_kernel)
12551 +#ifdef CONFIG_PARAVIRT
12552 + pushl %eax
12553 + pushl %ecx
12554 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
12555 + mov %eax, %esi
12556 +#else
12557 + mov %cr0, %esi
12558 +#endif
12559 + bts $16, %esi
12560 + jnc 1f
12561 + mov %cs, %esi
12562 + cmp $__KERNEL_CS, %esi
12563 + jz 3f
12564 + ljmp $__KERNEL_CS, $3f
12565 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
12566 +2:
12567 +#ifdef CONFIG_PARAVIRT
12568 + mov %esi, %eax
12569 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
12570 +#else
12571 + mov %esi, %cr0
12572 +#endif
12573 +3:
12574 +#ifdef CONFIG_PARAVIRT
12575 + popl %ecx
12576 + popl %eax
12577 +#endif
12578 + ret
12579 +ENDPROC(pax_enter_kernel)
12580 +
12581 +ENTRY(pax_exit_kernel)
12582 +#ifdef CONFIG_PARAVIRT
12583 + pushl %eax
12584 + pushl %ecx
12585 +#endif
12586 + mov %cs, %esi
12587 + cmp $__KERNEXEC_KERNEL_CS, %esi
12588 + jnz 2f
12589 +#ifdef CONFIG_PARAVIRT
12590 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
12591 + mov %eax, %esi
12592 +#else
12593 + mov %cr0, %esi
12594 +#endif
12595 + btr $16, %esi
12596 + ljmp $__KERNEL_CS, $1f
12597 +1:
12598 +#ifdef CONFIG_PARAVIRT
12599 + mov %esi, %eax
12600 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
12601 +#else
12602 + mov %esi, %cr0
12603 +#endif
12604 +2:
12605 +#ifdef CONFIG_PARAVIRT
12606 + popl %ecx
12607 + popl %eax
12608 +#endif
12609 + ret
12610 +ENDPROC(pax_exit_kernel)
12611 +#endif
12612 +
12613 +.macro pax_erase_kstack
12614 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12615 + call pax_erase_kstack
12616 +#endif
12617 +.endm
12618 +
12619 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12620 +/*
12621 + * ebp: thread_info
12622 + * ecx, edx: can be clobbered
12623 + */
12624 +ENTRY(pax_erase_kstack)
12625 + pushl %edi
12626 + pushl %eax
12627 +
12628 + mov TI_lowest_stack(%ebp), %edi
12629 + mov $-0xBEEF, %eax
12630 + std
12631 +
12632 +1: mov %edi, %ecx
12633 + and $THREAD_SIZE_asm - 1, %ecx
12634 + shr $2, %ecx
12635 + repne scasl
12636 + jecxz 2f
12637 +
12638 + cmp $2*16, %ecx
12639 + jc 2f
12640 +
12641 + mov $2*16, %ecx
12642 + repe scasl
12643 + jecxz 2f
12644 + jne 1b
12645 +
12646 +2: cld
12647 + mov %esp, %ecx
12648 + sub %edi, %ecx
12649 + shr $2, %ecx
12650 + rep stosl
12651 +
12652 + mov TI_task_thread_sp0(%ebp), %edi
12653 + sub $128, %edi
12654 + mov %edi, TI_lowest_stack(%ebp)
12655 +
12656 + popl %eax
12657 + popl %edi
12658 + ret
12659 +ENDPROC(pax_erase_kstack)
12660 +#endif
12661 +
12662 +.macro __SAVE_ALL _DS
12663 cld
12664 PUSH_GS
12665 pushl %fs
12666 @@ -224,7 +357,7 @@
12667 pushl %ebx
12668 CFI_ADJUST_CFA_OFFSET 4
12669 CFI_REL_OFFSET ebx, 0
12670 - movl $(__USER_DS), %edx
12671 + movl $\_DS, %edx
12672 movl %edx, %ds
12673 movl %edx, %es
12674 movl $(__KERNEL_PERCPU), %edx
12675 @@ -232,6 +365,15 @@
12676 SET_KERNEL_GS %edx
12677 .endm
12678
12679 +.macro SAVE_ALL
12680 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
12681 + __SAVE_ALL __KERNEL_DS
12682 + pax_enter_kernel
12683 +#else
12684 + __SAVE_ALL __USER_DS
12685 +#endif
12686 +.endm
12687 +
12688 .macro RESTORE_INT_REGS
12689 popl %ebx
12690 CFI_ADJUST_CFA_OFFSET -4
12691 @@ -352,7 +494,15 @@ check_userspace:
12692 movb PT_CS(%esp), %al
12693 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
12694 cmpl $USER_RPL, %eax
12695 +
12696 +#ifdef CONFIG_PAX_KERNEXEC
12697 + jae resume_userspace
12698 +
12699 + PAX_EXIT_KERNEL
12700 + jmp resume_kernel
12701 +#else
12702 jb resume_kernel # not returning to v8086 or userspace
12703 +#endif
12704
12705 ENTRY(resume_userspace)
12706 LOCKDEP_SYS_EXIT
12707 @@ -364,7 +514,7 @@ ENTRY(resume_userspace)
12708 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
12709 # int/exception return?
12710 jne work_pending
12711 - jmp restore_all
12712 + jmp restore_all_pax
12713 END(ret_from_exception)
12714
12715 #ifdef CONFIG_PREEMPT
12716 @@ -414,25 +564,36 @@ sysenter_past_esp:
12717 /*CFI_REL_OFFSET cs, 0*/
12718 /*
12719 * Push current_thread_info()->sysenter_return to the stack.
12720 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
12721 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
12722 */
12723 - pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
12724 + pushl $0
12725 CFI_ADJUST_CFA_OFFSET 4
12726 CFI_REL_OFFSET eip, 0
12727
12728 pushl %eax
12729 CFI_ADJUST_CFA_OFFSET 4
12730 SAVE_ALL
12731 + GET_THREAD_INFO(%ebp)
12732 + movl TI_sysenter_return(%ebp),%ebp
12733 + movl %ebp,PT_EIP(%esp)
12734 ENABLE_INTERRUPTS(CLBR_NONE)
12735
12736 /*
12737 * Load the potential sixth argument from user stack.
12738 * Careful about security.
12739 */
12740 + movl PT_OLDESP(%esp),%ebp
12741 +
12742 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12743 + mov PT_OLDSS(%esp),%ds
12744 +1: movl %ds:(%ebp),%ebp
12745 + push %ss
12746 + pop %ds
12747 +#else
12748 cmpl $__PAGE_OFFSET-3,%ebp
12749 jae syscall_fault
12750 1: movl (%ebp),%ebp
12751 +#endif
12752 +
12753 movl %ebp,PT_EBP(%esp)
12754 .section __ex_table,"a"
12755 .align 4
12756 @@ -455,12 +616,23 @@ sysenter_do_call:
12757 testl $_TIF_ALLWORK_MASK, %ecx
12758 jne sysexit_audit
12759 sysenter_exit:
12760 +
12761 +#ifdef CONFIG_PAX_RANDKSTACK
12762 + pushl_cfi %eax
12763 + call pax_randomize_kstack
12764 + popl_cfi %eax
12765 +#endif
12766 +
12767 + pax_erase_kstack
12768 +
12769 /* if something modifies registers it must also disable sysexit */
12770 movl PT_EIP(%esp), %edx
12771 movl PT_OLDESP(%esp), %ecx
12772 xorl %ebp,%ebp
12773 TRACE_IRQS_ON
12774 1: mov PT_FS(%esp), %fs
12775 +2: mov PT_DS(%esp), %ds
12776 +3: mov PT_ES(%esp), %es
12777 PTGS_TO_GS
12778 ENABLE_INTERRUPTS_SYSEXIT
12779
12780 @@ -477,6 +649,9 @@ sysenter_audit:
12781 movl %eax,%edx /* 2nd arg: syscall number */
12782 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
12783 call audit_syscall_entry
12784 +
12785 + pax_erase_kstack
12786 +
12787 pushl %ebx
12788 CFI_ADJUST_CFA_OFFSET 4
12789 movl PT_EAX(%esp),%eax /* reload syscall number */
12790 @@ -504,11 +679,17 @@ sysexit_audit:
12791
12792 CFI_ENDPROC
12793 .pushsection .fixup,"ax"
12794 -2: movl $0,PT_FS(%esp)
12795 +4: movl $0,PT_FS(%esp)
12796 + jmp 1b
12797 +5: movl $0,PT_DS(%esp)
12798 + jmp 1b
12799 +6: movl $0,PT_ES(%esp)
12800 jmp 1b
12801 .section __ex_table,"a"
12802 .align 4
12803 - .long 1b,2b
12804 + .long 1b,4b
12805 + .long 2b,5b
12806 + .long 3b,6b
12807 .popsection
12808 PTGS_TO_GS_EX
12809 ENDPROC(ia32_sysenter_target)
12810 @@ -538,6 +719,14 @@ syscall_exit:
12811 testl $_TIF_ALLWORK_MASK, %ecx # current->work
12812 jne syscall_exit_work
12813
12814 +restore_all_pax:
12815 +
12816 +#ifdef CONFIG_PAX_RANDKSTACK
12817 + call pax_randomize_kstack
12818 +#endif
12819 +
12820 + pax_erase_kstack
12821 +
12822 restore_all:
12823 TRACE_IRQS_IRET
12824 restore_all_notrace:
12825 @@ -602,7 +791,13 @@ ldt_ss:
12826 mov PT_OLDESP(%esp), %eax /* load userspace esp */
12827 mov %dx, %ax /* eax: new kernel esp */
12828 sub %eax, %edx /* offset (low word is 0) */
12829 - PER_CPU(gdt_page, %ebx)
12830 +#ifdef CONFIG_SMP
12831 + movl PER_CPU_VAR(cpu_number), %ebx
12832 + shll $PAGE_SHIFT_asm, %ebx
12833 + addl $cpu_gdt_table, %ebx
12834 +#else
12835 + movl $cpu_gdt_table, %ebx
12836 +#endif
12837 shr $16, %edx
12838 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
12839 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
12840 @@ -636,31 +831,25 @@ work_resched:
12841 movl TI_flags(%ebp), %ecx
12842 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
12843 # than syscall tracing?
12844 - jz restore_all
12845 + jz restore_all_pax
12846 testb $_TIF_NEED_RESCHED, %cl
12847 jnz work_resched
12848
12849 work_notifysig: # deal with pending signals and
12850 # notify-resume requests
12851 + movl %esp, %eax
12852 #ifdef CONFIG_VM86
12853 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
12854 - movl %esp, %eax
12855 - jne work_notifysig_v86 # returning to kernel-space or
12856 + jz 1f # returning to kernel-space or
12857 # vm86-space
12858 - xorl %edx, %edx
12859 - call do_notify_resume
12860 - jmp resume_userspace_sig
12861
12862 - ALIGN
12863 -work_notifysig_v86:
12864 pushl %ecx # save ti_flags for do_notify_resume
12865 CFI_ADJUST_CFA_OFFSET 4
12866 call save_v86_state # %eax contains pt_regs pointer
12867 popl %ecx
12868 CFI_ADJUST_CFA_OFFSET -4
12869 movl %eax, %esp
12870 -#else
12871 - movl %esp, %eax
12872 +1:
12873 #endif
12874 xorl %edx, %edx
12875 call do_notify_resume
12876 @@ -673,6 +862,9 @@ syscall_trace_entry:
12877 movl $-ENOSYS,PT_EAX(%esp)
12878 movl %esp, %eax
12879 call syscall_trace_enter
12880 +
12881 + pax_erase_kstack
12882 +
12883 /* What it returned is what we'll actually use. */
12884 cmpl $(nr_syscalls), %eax
12885 jnae syscall_call
12886 @@ -695,6 +887,10 @@ END(syscall_exit_work)
12887
12888 RING0_INT_FRAME # can't unwind into user space anyway
12889 syscall_fault:
12890 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12891 + push %ss
12892 + pop %ds
12893 +#endif
12894 GET_THREAD_INFO(%ebp)
12895 movl $-EFAULT,PT_EAX(%esp)
12896 jmp resume_userspace
12897 @@ -726,6 +922,33 @@ PTREGSCALL(rt_sigreturn)
12898 PTREGSCALL(vm86)
12899 PTREGSCALL(vm86old)
12900
12901 + ALIGN;
12902 +ENTRY(kernel_execve)
12903 + push %ebp
12904 + sub $PT_OLDSS+4,%esp
12905 + push %edi
12906 + push %ecx
12907 + push %eax
12908 + lea 3*4(%esp),%edi
12909 + mov $PT_OLDSS/4+1,%ecx
12910 + xorl %eax,%eax
12911 + rep stosl
12912 + pop %eax
12913 + pop %ecx
12914 + pop %edi
12915 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
12916 + mov %eax,PT_EBX(%esp)
12917 + mov %edx,PT_ECX(%esp)
12918 + mov %ecx,PT_EDX(%esp)
12919 + mov %esp,%eax
12920 + call sys_execve
12921 + GET_THREAD_INFO(%ebp)
12922 + test %eax,%eax
12923 + jz syscall_exit
12924 + add $PT_OLDSS+4,%esp
12925 + pop %ebp
12926 + ret
12927 +
12928 .macro FIXUP_ESPFIX_STACK
12929 /*
12930 * Switch back for ESPFIX stack to the normal zerobased stack
12931 @@ -735,7 +958,13 @@ PTREGSCALL(vm86old)
12932 * normal stack and adjusts ESP with the matching offset.
12933 */
12934 /* fixup the stack */
12935 - PER_CPU(gdt_page, %ebx)
12936 +#ifdef CONFIG_SMP
12937 + movl PER_CPU_VAR(cpu_number), %ebx
12938 + shll $PAGE_SHIFT_asm, %ebx
12939 + addl $cpu_gdt_table, %ebx
12940 +#else
12941 + movl $cpu_gdt_table, %ebx
12942 +#endif
12943 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
12944 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
12945 shl $16, %eax
12946 @@ -1198,7 +1427,6 @@ return_to_handler:
12947 ret
12948 #endif
12949
12950 -.section .rodata,"a"
12951 #include "syscall_table_32.S"
12952
12953 syscall_table_size=(.-sys_call_table)
12954 @@ -1255,9 +1483,12 @@ error_code:
12955 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
12956 REG_TO_PTGS %ecx
12957 SET_KERNEL_GS %ecx
12958 - movl $(__USER_DS), %ecx
12959 + movl $(__KERNEL_DS), %ecx
12960 movl %ecx, %ds
12961 movl %ecx, %es
12962 +
12963 + pax_enter_kernel
12964 +
12965 TRACE_IRQS_OFF
12966 movl %esp,%eax # pt_regs pointer
12967 call *%edi
12968 @@ -1351,6 +1582,9 @@ nmi_stack_correct:
12969 xorl %edx,%edx # zero error code
12970 movl %esp,%eax # pt_regs pointer
12971 call do_nmi
12972 +
12973 + pax_exit_kernel
12974 +
12975 jmp restore_all_notrace
12976 CFI_ENDPROC
12977
12978 @@ -1391,6 +1625,9 @@ nmi_espfix_stack:
12979 FIXUP_ESPFIX_STACK # %eax == %esp
12980 xorl %edx,%edx # zero error code
12981 call do_nmi
12982 +
12983 + pax_exit_kernel
12984 +
12985 RESTORE_REGS
12986 lss 12+4(%esp), %esp # back to espfix stack
12987 CFI_ADJUST_CFA_OFFSET -24
12988 diff -urNp linux-2.6.32.41/arch/x86/kernel/entry_64.S linux-2.6.32.41/arch/x86/kernel/entry_64.S
12989 --- linux-2.6.32.41/arch/x86/kernel/entry_64.S 2011-03-27 14:31:47.000000000 -0400
12990 +++ linux-2.6.32.41/arch/x86/kernel/entry_64.S 2011-06-04 20:30:53.000000000 -0400
12991 @@ -53,6 +53,7 @@
12992 #include <asm/paravirt.h>
12993 #include <asm/ftrace.h>
12994 #include <asm/percpu.h>
12995 +#include <asm/pgtable.h>
12996
12997 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
12998 #include <linux/elf-em.h>
12999 @@ -174,6 +175,257 @@ ENTRY(native_usergs_sysret64)
13000 ENDPROC(native_usergs_sysret64)
13001 #endif /* CONFIG_PARAVIRT */
13002
13003 + .macro ljmpq sel, off
13004 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
13005 + .byte 0x48; ljmp *1234f(%rip)
13006 + .pushsection .rodata
13007 + .align 16
13008 + 1234: .quad \off; .word \sel
13009 + .popsection
13010 +#else
13011 + pushq $\sel
13012 + pushq $\off
13013 + lretq
13014 +#endif
13015 + .endm
13016 +
13017 + .macro pax_enter_kernel
13018 +#ifdef CONFIG_PAX_KERNEXEC
13019 + call pax_enter_kernel
13020 +#endif
13021 + .endm
13022 +
13023 + .macro pax_exit_kernel
13024 +#ifdef CONFIG_PAX_KERNEXEC
13025 + call pax_exit_kernel
13026 +#endif
13027 + .endm
13028 +
13029 +#ifdef CONFIG_PAX_KERNEXEC
13030 +ENTRY(pax_enter_kernel)
13031 + pushq %rdi
13032 +
13033 +#ifdef CONFIG_PARAVIRT
13034 + PV_SAVE_REGS(CLBR_RDI)
13035 +#endif
13036 +
13037 + GET_CR0_INTO_RDI
13038 + bts $16,%rdi
13039 + jnc 1f
13040 + mov %cs,%edi
13041 + cmp $__KERNEL_CS,%edi
13042 + jz 3f
13043 + ljmpq __KERNEL_CS,3f
13044 +1: ljmpq __KERNEXEC_KERNEL_CS,2f
13045 +2: SET_RDI_INTO_CR0
13046 +3:
13047 +
13048 +#ifdef CONFIG_PARAVIRT
13049 + PV_RESTORE_REGS(CLBR_RDI)
13050 +#endif
13051 +
13052 + popq %rdi
13053 + retq
13054 +ENDPROC(pax_enter_kernel)
13055 +
13056 +ENTRY(pax_exit_kernel)
13057 + pushq %rdi
13058 +
13059 +#ifdef CONFIG_PARAVIRT
13060 + PV_SAVE_REGS(CLBR_RDI)
13061 +#endif
13062 +
13063 + mov %cs,%rdi
13064 + cmp $__KERNEXEC_KERNEL_CS,%edi
13065 + jnz 2f
13066 + GET_CR0_INTO_RDI
13067 + btr $16,%rdi
13068 + ljmpq __KERNEL_CS,1f
13069 +1: SET_RDI_INTO_CR0
13070 +2:
13071 +
13072 +#ifdef CONFIG_PARAVIRT
13073 + PV_RESTORE_REGS(CLBR_RDI);
13074 +#endif
13075 +
13076 + popq %rdi
13077 + retq
13078 +ENDPROC(pax_exit_kernel)
13079 +#endif
13080 +
13081 + .macro pax_enter_kernel_user
13082 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13083 + call pax_enter_kernel_user
13084 +#endif
13085 + .endm
13086 +
13087 + .macro pax_exit_kernel_user
13088 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13089 + call pax_exit_kernel_user
13090 +#endif
13091 +#ifdef CONFIG_PAX_RANDKSTACK
13092 + push %rax
13093 + call pax_randomize_kstack
13094 + pop %rax
13095 +#endif
13096 + pax_erase_kstack
13097 + .endm
13098 +
13099 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13100 +ENTRY(pax_enter_kernel_user)
13101 + pushq %rdi
13102 + pushq %rbx
13103 +
13104 +#ifdef CONFIG_PARAVIRT
13105 + PV_SAVE_REGS(CLBR_RDI)
13106 +#endif
13107 +
13108 + GET_CR3_INTO_RDI
13109 + mov %rdi,%rbx
13110 + add $__START_KERNEL_map,%rbx
13111 + sub phys_base(%rip),%rbx
13112 +
13113 +#ifdef CONFIG_PARAVIRT
13114 + pushq %rdi
13115 + cmpl $0, pv_info+PARAVIRT_enabled
13116 + jz 1f
13117 + i = 0
13118 + .rept USER_PGD_PTRS
13119 + mov i*8(%rbx),%rsi
13120 + mov $0,%sil
13121 + lea i*8(%rbx),%rdi
13122 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
13123 + i = i + 1
13124 + .endr
13125 + jmp 2f
13126 +1:
13127 +#endif
13128 +
13129 + i = 0
13130 + .rept USER_PGD_PTRS
13131 + movb $0,i*8(%rbx)
13132 + i = i + 1
13133 + .endr
13134 +
13135 +#ifdef CONFIG_PARAVIRT
13136 +2: popq %rdi
13137 +#endif
13138 + SET_RDI_INTO_CR3
13139 +
13140 +#ifdef CONFIG_PAX_KERNEXEC
13141 + GET_CR0_INTO_RDI
13142 + bts $16,%rdi
13143 + SET_RDI_INTO_CR0
13144 +#endif
13145 +
13146 +#ifdef CONFIG_PARAVIRT
13147 + PV_RESTORE_REGS(CLBR_RDI)
13148 +#endif
13149 +
13150 + popq %rbx
13151 + popq %rdi
13152 + retq
13153 +ENDPROC(pax_enter_kernel_user)
13154 +
13155 +ENTRY(pax_exit_kernel_user)
13156 + push %rdi
13157 +
13158 +#ifdef CONFIG_PARAVIRT
13159 + pushq %rbx
13160 + PV_SAVE_REGS(CLBR_RDI)
13161 +#endif
13162 +
13163 +#ifdef CONFIG_PAX_KERNEXEC
13164 + GET_CR0_INTO_RDI
13165 + btr $16,%rdi
13166 + SET_RDI_INTO_CR0
13167 +#endif
13168 +
13169 + GET_CR3_INTO_RDI
13170 + add $__START_KERNEL_map,%rdi
13171 + sub phys_base(%rip),%rdi
13172 +
13173 +#ifdef CONFIG_PARAVIRT
13174 + cmpl $0, pv_info+PARAVIRT_enabled
13175 + jz 1f
13176 + mov %rdi,%rbx
13177 + i = 0
13178 + .rept USER_PGD_PTRS
13179 + mov i*8(%rbx),%rsi
13180 + mov $0x67,%sil
13181 + lea i*8(%rbx),%rdi
13182 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
13183 + i = i + 1
13184 + .endr
13185 + jmp 2f
13186 +1:
13187 +#endif
13188 +
13189 + i = 0
13190 + .rept USER_PGD_PTRS
13191 + movb $0x67,i*8(%rdi)
13192 + i = i + 1
13193 + .endr
13194 +
13195 +#ifdef CONFIG_PARAVIRT
13196 +2: PV_RESTORE_REGS(CLBR_RDI)
13197 + popq %rbx
13198 +#endif
13199 +
13200 + popq %rdi
13201 + retq
13202 +ENDPROC(pax_exit_kernel_user)
13203 +#endif
13204 +
13205 +.macro pax_erase_kstack
13206 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13207 + call pax_erase_kstack
13208 +#endif
13209 +.endm
13210 +
13211 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13212 +/*
13213 + * r10: thread_info
13214 + * rcx, rdx: can be clobbered
13215 + */
13216 +ENTRY(pax_erase_kstack)
13217 + pushq %rdi
13218 + pushq %rax
13219 +
13220 + GET_THREAD_INFO(%r10)
13221 + mov TI_lowest_stack(%r10), %rdi
13222 + mov $-0xBEEF, %rax
13223 + std
13224 +
13225 +1: mov %edi, %ecx
13226 + and $THREAD_SIZE_asm - 1, %ecx
13227 + shr $3, %ecx
13228 + repne scasq
13229 + jecxz 2f
13230 +
13231 + cmp $2*8, %ecx
13232 + jc 2f
13233 +
13234 + mov $2*8, %ecx
13235 + repe scasq
13236 + jecxz 2f
13237 + jne 1b
13238 +
13239 +2: cld
13240 + mov %esp, %ecx
13241 + sub %edi, %ecx
13242 + shr $3, %ecx
13243 + rep stosq
13244 +
13245 + mov TI_task_thread_sp0(%r10), %rdi
13246 + sub $256, %rdi
13247 + mov %rdi, TI_lowest_stack(%r10)
13248 +
13249 + popq %rax
13250 + popq %rdi
13251 + ret
13252 +ENDPROC(pax_erase_kstack)
13253 +#endif
13254
13255 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
13256 #ifdef CONFIG_TRACE_IRQFLAGS
13257 @@ -317,7 +569,7 @@ ENTRY(save_args)
13258 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
13259 movq_cfi rbp, 8 /* push %rbp */
13260 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
13261 - testl $3, CS(%rdi)
13262 + testb $3, CS(%rdi)
13263 je 1f
13264 SWAPGS
13265 /*
13266 @@ -409,7 +661,7 @@ ENTRY(ret_from_fork)
13267
13268 RESTORE_REST
13269
13270 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13271 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13272 je int_ret_from_sys_call
13273
13274 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
13275 @@ -455,7 +707,7 @@ END(ret_from_fork)
13276 ENTRY(system_call)
13277 CFI_STARTPROC simple
13278 CFI_SIGNAL_FRAME
13279 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
13280 + CFI_DEF_CFA rsp,0
13281 CFI_REGISTER rip,rcx
13282 /*CFI_REGISTER rflags,r11*/
13283 SWAPGS_UNSAFE_STACK
13284 @@ -468,12 +720,13 @@ ENTRY(system_call_after_swapgs)
13285
13286 movq %rsp,PER_CPU_VAR(old_rsp)
13287 movq PER_CPU_VAR(kernel_stack),%rsp
13288 + pax_enter_kernel_user
13289 /*
13290 * No need to follow this irqs off/on section - it's straight
13291 * and short:
13292 */
13293 ENABLE_INTERRUPTS(CLBR_NONE)
13294 - SAVE_ARGS 8,1
13295 + SAVE_ARGS 8*6,1
13296 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
13297 movq %rcx,RIP-ARGOFFSET(%rsp)
13298 CFI_REL_OFFSET rip,RIP-ARGOFFSET
13299 @@ -502,6 +755,7 @@ sysret_check:
13300 andl %edi,%edx
13301 jnz sysret_careful
13302 CFI_REMEMBER_STATE
13303 + pax_exit_kernel_user
13304 /*
13305 * sysretq will re-enable interrupts:
13306 */
13307 @@ -562,6 +816,9 @@ auditsys:
13308 movq %rax,%rsi /* 2nd arg: syscall number */
13309 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
13310 call audit_syscall_entry
13311 +
13312 + pax_erase_kstack
13313 +
13314 LOAD_ARGS 0 /* reload call-clobbered registers */
13315 jmp system_call_fastpath
13316
13317 @@ -592,6 +849,9 @@ tracesys:
13318 FIXUP_TOP_OF_STACK %rdi
13319 movq %rsp,%rdi
13320 call syscall_trace_enter
13321 +
13322 + pax_erase_kstack
13323 +
13324 /*
13325 * Reload arg registers from stack in case ptrace changed them.
13326 * We don't reload %rax because syscall_trace_enter() returned
13327 @@ -613,7 +873,7 @@ tracesys:
13328 GLOBAL(int_ret_from_sys_call)
13329 DISABLE_INTERRUPTS(CLBR_NONE)
13330 TRACE_IRQS_OFF
13331 - testl $3,CS-ARGOFFSET(%rsp)
13332 + testb $3,CS-ARGOFFSET(%rsp)
13333 je retint_restore_args
13334 movl $_TIF_ALLWORK_MASK,%edi
13335 /* edi: mask to check */
13336 @@ -800,6 +1060,16 @@ END(interrupt)
13337 CFI_ADJUST_CFA_OFFSET 10*8
13338 call save_args
13339 PARTIAL_FRAME 0
13340 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13341 + testb $3, CS(%rdi)
13342 + jnz 1f
13343 + pax_enter_kernel
13344 + jmp 2f
13345 +1: pax_enter_kernel_user
13346 +2:
13347 +#else
13348 + pax_enter_kernel
13349 +#endif
13350 call \func
13351 .endm
13352
13353 @@ -822,7 +1092,7 @@ ret_from_intr:
13354 CFI_ADJUST_CFA_OFFSET -8
13355 exit_intr:
13356 GET_THREAD_INFO(%rcx)
13357 - testl $3,CS-ARGOFFSET(%rsp)
13358 + testb $3,CS-ARGOFFSET(%rsp)
13359 je retint_kernel
13360
13361 /* Interrupt came from user space */
13362 @@ -844,12 +1114,14 @@ retint_swapgs: /* return to user-space
13363 * The iretq could re-enable interrupts:
13364 */
13365 DISABLE_INTERRUPTS(CLBR_ANY)
13366 + pax_exit_kernel_user
13367 TRACE_IRQS_IRETQ
13368 SWAPGS
13369 jmp restore_args
13370
13371 retint_restore_args: /* return to kernel space */
13372 DISABLE_INTERRUPTS(CLBR_ANY)
13373 + pax_exit_kernel
13374 /*
13375 * The iretq could re-enable interrupts:
13376 */
13377 @@ -1032,6 +1304,16 @@ ENTRY(\sym)
13378 CFI_ADJUST_CFA_OFFSET 15*8
13379 call error_entry
13380 DEFAULT_FRAME 0
13381 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13382 + testb $3, CS(%rsp)
13383 + jnz 1f
13384 + pax_enter_kernel
13385 + jmp 2f
13386 +1: pax_enter_kernel_user
13387 +2:
13388 +#else
13389 + pax_enter_kernel
13390 +#endif
13391 movq %rsp,%rdi /* pt_regs pointer */
13392 xorl %esi,%esi /* no error code */
13393 call \do_sym
13394 @@ -1049,6 +1331,16 @@ ENTRY(\sym)
13395 subq $15*8, %rsp
13396 call save_paranoid
13397 TRACE_IRQS_OFF
13398 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13399 + testb $3, CS(%rsp)
13400 + jnz 1f
13401 + pax_enter_kernel
13402 + jmp 2f
13403 +1: pax_enter_kernel_user
13404 +2:
13405 +#else
13406 + pax_enter_kernel
13407 +#endif
13408 movq %rsp,%rdi /* pt_regs pointer */
13409 xorl %esi,%esi /* no error code */
13410 call \do_sym
13411 @@ -1066,9 +1358,24 @@ ENTRY(\sym)
13412 subq $15*8, %rsp
13413 call save_paranoid
13414 TRACE_IRQS_OFF
13415 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13416 + testb $3, CS(%rsp)
13417 + jnz 1f
13418 + pax_enter_kernel
13419 + jmp 2f
13420 +1: pax_enter_kernel_user
13421 +2:
13422 +#else
13423 + pax_enter_kernel
13424 +#endif
13425 movq %rsp,%rdi /* pt_regs pointer */
13426 xorl %esi,%esi /* no error code */
13427 - PER_CPU(init_tss, %rbp)
13428 +#ifdef CONFIG_SMP
13429 + imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
13430 + lea init_tss(%rbp), %rbp
13431 +#else
13432 + lea init_tss(%rip), %rbp
13433 +#endif
13434 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
13435 call \do_sym
13436 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
13437 @@ -1085,6 +1392,16 @@ ENTRY(\sym)
13438 CFI_ADJUST_CFA_OFFSET 15*8
13439 call error_entry
13440 DEFAULT_FRAME 0
13441 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13442 + testb $3, CS(%rsp)
13443 + jnz 1f
13444 + pax_enter_kernel
13445 + jmp 2f
13446 +1: pax_enter_kernel_user
13447 +2:
13448 +#else
13449 + pax_enter_kernel
13450 +#endif
13451 movq %rsp,%rdi /* pt_regs pointer */
13452 movq ORIG_RAX(%rsp),%rsi /* get error code */
13453 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
13454 @@ -1104,6 +1421,16 @@ ENTRY(\sym)
13455 call save_paranoid
13456 DEFAULT_FRAME 0
13457 TRACE_IRQS_OFF
13458 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13459 + testb $3, CS(%rsp)
13460 + jnz 1f
13461 + pax_enter_kernel
13462 + jmp 2f
13463 +1: pax_enter_kernel_user
13464 +2:
13465 +#else
13466 + pax_enter_kernel
13467 +#endif
13468 movq %rsp,%rdi /* pt_regs pointer */
13469 movq ORIG_RAX(%rsp),%rsi /* get error code */
13470 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
13471 @@ -1405,14 +1732,27 @@ ENTRY(paranoid_exit)
13472 TRACE_IRQS_OFF
13473 testl %ebx,%ebx /* swapgs needed? */
13474 jnz paranoid_restore
13475 - testl $3,CS(%rsp)
13476 + testb $3,CS(%rsp)
13477 jnz paranoid_userspace
13478 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13479 + pax_exit_kernel
13480 + TRACE_IRQS_IRETQ 0
13481 + SWAPGS_UNSAFE_STACK
13482 + RESTORE_ALL 8
13483 + jmp irq_return
13484 +#endif
13485 paranoid_swapgs:
13486 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13487 + pax_exit_kernel_user
13488 +#else
13489 + pax_exit_kernel
13490 +#endif
13491 TRACE_IRQS_IRETQ 0
13492 SWAPGS_UNSAFE_STACK
13493 RESTORE_ALL 8
13494 jmp irq_return
13495 paranoid_restore:
13496 + pax_exit_kernel
13497 TRACE_IRQS_IRETQ 0
13498 RESTORE_ALL 8
13499 jmp irq_return
13500 @@ -1470,7 +1810,7 @@ ENTRY(error_entry)
13501 movq_cfi r14, R14+8
13502 movq_cfi r15, R15+8
13503 xorl %ebx,%ebx
13504 - testl $3,CS+8(%rsp)
13505 + testb $3,CS+8(%rsp)
13506 je error_kernelspace
13507 error_swapgs:
13508 SWAPGS
13509 @@ -1529,6 +1869,16 @@ ENTRY(nmi)
13510 CFI_ADJUST_CFA_OFFSET 15*8
13511 call save_paranoid
13512 DEFAULT_FRAME 0
13513 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13514 + testb $3, CS(%rsp)
13515 + jnz 1f
13516 + pax_enter_kernel
13517 + jmp 2f
13518 +1: pax_enter_kernel_user
13519 +2:
13520 +#else
13521 + pax_enter_kernel
13522 +#endif
13523 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
13524 movq %rsp,%rdi
13525 movq $-1,%rsi
13526 @@ -1539,11 +1889,25 @@ ENTRY(nmi)
13527 DISABLE_INTERRUPTS(CLBR_NONE)
13528 testl %ebx,%ebx /* swapgs needed? */
13529 jnz nmi_restore
13530 - testl $3,CS(%rsp)
13531 + testb $3,CS(%rsp)
13532 jnz nmi_userspace
13533 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13534 + pax_exit_kernel
13535 + SWAPGS_UNSAFE_STACK
13536 + RESTORE_ALL 8
13537 + jmp irq_return
13538 +#endif
13539 nmi_swapgs:
13540 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13541 + pax_exit_kernel_user
13542 +#else
13543 + pax_exit_kernel
13544 +#endif
13545 SWAPGS_UNSAFE_STACK
13546 + RESTORE_ALL 8
13547 + jmp irq_return
13548 nmi_restore:
13549 + pax_exit_kernel
13550 RESTORE_ALL 8
13551 jmp irq_return
13552 nmi_userspace:
13553 diff -urNp linux-2.6.32.41/arch/x86/kernel/ftrace.c linux-2.6.32.41/arch/x86/kernel/ftrace.c
13554 --- linux-2.6.32.41/arch/x86/kernel/ftrace.c 2011-03-27 14:31:47.000000000 -0400
13555 +++ linux-2.6.32.41/arch/x86/kernel/ftrace.c 2011-05-04 17:56:20.000000000 -0400
13556 @@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the
13557 static void *mod_code_newcode; /* holds the text to write to the IP */
13558
13559 static unsigned nmi_wait_count;
13560 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
13561 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
13562
13563 int ftrace_arch_read_dyn_info(char *buf, int size)
13564 {
13565 @@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf,
13566
13567 r = snprintf(buf, size, "%u %u",
13568 nmi_wait_count,
13569 - atomic_read(&nmi_update_count));
13570 + atomic_read_unchecked(&nmi_update_count));
13571 return r;
13572 }
13573
13574 @@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
13575 {
13576 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
13577 smp_rmb();
13578 + pax_open_kernel();
13579 ftrace_mod_code();
13580 - atomic_inc(&nmi_update_count);
13581 + pax_close_kernel();
13582 + atomic_inc_unchecked(&nmi_update_count);
13583 }
13584 /* Must have previous changes seen before executions */
13585 smp_mb();
13586 @@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, voi
13587
13588
13589
13590 -static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
13591 +static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
13592
13593 static unsigned char *ftrace_nop_replace(void)
13594 {
13595 @@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, uns
13596 {
13597 unsigned char replaced[MCOUNT_INSN_SIZE];
13598
13599 + ip = ktla_ktva(ip);
13600 +
13601 /*
13602 * Note: Due to modules and __init, code can
13603 * disappear and change, we need to protect against faulting
13604 @@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_fun
13605 unsigned char old[MCOUNT_INSN_SIZE], *new;
13606 int ret;
13607
13608 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
13609 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
13610 new = ftrace_call_replace(ip, (unsigned long)func);
13611 ret = ftrace_modify_code(ip, old, new);
13612
13613 @@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *da
13614 switch (faulted) {
13615 case 0:
13616 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
13617 - memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
13618 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
13619 break;
13620 case 1:
13621 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
13622 - memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
13623 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
13624 break;
13625 case 2:
13626 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
13627 - memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
13628 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
13629 break;
13630 }
13631
13632 @@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long
13633 {
13634 unsigned char code[MCOUNT_INSN_SIZE];
13635
13636 + ip = ktla_ktva(ip);
13637 +
13638 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
13639 return -EFAULT;
13640
13641 diff -urNp linux-2.6.32.41/arch/x86/kernel/head32.c linux-2.6.32.41/arch/x86/kernel/head32.c
13642 --- linux-2.6.32.41/arch/x86/kernel/head32.c 2011-03-27 14:31:47.000000000 -0400
13643 +++ linux-2.6.32.41/arch/x86/kernel/head32.c 2011-04-17 15:56:46.000000000 -0400
13644 @@ -16,6 +16,7 @@
13645 #include <asm/apic.h>
13646 #include <asm/io_apic.h>
13647 #include <asm/bios_ebda.h>
13648 +#include <asm/boot.h>
13649
13650 static void __init i386_default_early_setup(void)
13651 {
13652 @@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
13653 {
13654 reserve_trampoline_memory();
13655
13656 - reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
13657 + reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
13658
13659 #ifdef CONFIG_BLK_DEV_INITRD
13660 /* Reserve INITRD */
13661 diff -urNp linux-2.6.32.41/arch/x86/kernel/head_32.S linux-2.6.32.41/arch/x86/kernel/head_32.S
13662 --- linux-2.6.32.41/arch/x86/kernel/head_32.S 2011-03-27 14:31:47.000000000 -0400
13663 +++ linux-2.6.32.41/arch/x86/kernel/head_32.S 2011-04-17 15:56:46.000000000 -0400
13664 @@ -19,10 +19,17 @@
13665 #include <asm/setup.h>
13666 #include <asm/processor-flags.h>
13667 #include <asm/percpu.h>
13668 +#include <asm/msr-index.h>
13669
13670 /* Physical address */
13671 #define pa(X) ((X) - __PAGE_OFFSET)
13672
13673 +#ifdef CONFIG_PAX_KERNEXEC
13674 +#define ta(X) (X)
13675 +#else
13676 +#define ta(X) ((X) - __PAGE_OFFSET)
13677 +#endif
13678 +
13679 /*
13680 * References to members of the new_cpu_data structure.
13681 */
13682 @@ -52,11 +59,7 @@
13683 * and small than max_low_pfn, otherwise will waste some page table entries
13684 */
13685
13686 -#if PTRS_PER_PMD > 1
13687 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
13688 -#else
13689 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
13690 -#endif
13691 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
13692
13693 /* Enough space to fit pagetables for the low memory linear map */
13694 MAPPING_BEYOND_END = \
13695 @@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
13696 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
13697
13698 /*
13699 + * Real beginning of normal "text" segment
13700 + */
13701 +ENTRY(stext)
13702 +ENTRY(_stext)
13703 +
13704 +/*
13705 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
13706 * %esi points to the real-mode code as a 32-bit pointer.
13707 * CS and DS must be 4 GB flat segments, but we don't depend on
13708 @@ -80,6 +89,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
13709 * can.
13710 */
13711 __HEAD
13712 +
13713 +#ifdef CONFIG_PAX_KERNEXEC
13714 + jmp startup_32
13715 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
13716 +.fill PAGE_SIZE-5,1,0xcc
13717 +#endif
13718 +
13719 ENTRY(startup_32)
13720 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
13721 us to not reload segments */
13722 @@ -97,6 +113,57 @@ ENTRY(startup_32)
13723 movl %eax,%gs
13724 2:
13725
13726 +#ifdef CONFIG_SMP
13727 + movl $pa(cpu_gdt_table),%edi
13728 + movl $__per_cpu_load,%eax
13729 + movw %ax,__KERNEL_PERCPU + 2(%edi)
13730 + rorl $16,%eax
13731 + movb %al,__KERNEL_PERCPU + 4(%edi)
13732 + movb %ah,__KERNEL_PERCPU + 7(%edi)
13733 + movl $__per_cpu_end - 1,%eax
13734 + subl $__per_cpu_start,%eax
13735 + movw %ax,__KERNEL_PERCPU + 0(%edi)
13736 +#endif
13737 +
13738 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13739 + movl $NR_CPUS,%ecx
13740 + movl $pa(cpu_gdt_table),%edi
13741 +1:
13742 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
13743 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
13744 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
13745 + addl $PAGE_SIZE_asm,%edi
13746 + loop 1b
13747 +#endif
13748 +
13749 +#ifdef CONFIG_PAX_KERNEXEC
13750 + movl $pa(boot_gdt),%edi
13751 + movl $__LOAD_PHYSICAL_ADDR,%eax
13752 + movw %ax,__BOOT_CS + 2(%edi)
13753 + rorl $16,%eax
13754 + movb %al,__BOOT_CS + 4(%edi)
13755 + movb %ah,__BOOT_CS + 7(%edi)
13756 + rorl $16,%eax
13757 +
13758 + ljmp $(__BOOT_CS),$1f
13759 +1:
13760 +
13761 + movl $NR_CPUS,%ecx
13762 + movl $pa(cpu_gdt_table),%edi
13763 + addl $__PAGE_OFFSET,%eax
13764 +1:
13765 + movw %ax,__KERNEL_CS + 2(%edi)
13766 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
13767 + rorl $16,%eax
13768 + movb %al,__KERNEL_CS + 4(%edi)
13769 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
13770 + movb %ah,__KERNEL_CS + 7(%edi)
13771 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
13772 + rorl $16,%eax
13773 + addl $PAGE_SIZE_asm,%edi
13774 + loop 1b
13775 +#endif
13776 +
13777 /*
13778 * Clear BSS first so that there are no surprises...
13779 */
13780 @@ -140,9 +207,7 @@ ENTRY(startup_32)
13781 cmpl $num_subarch_entries, %eax
13782 jae bad_subarch
13783
13784 - movl pa(subarch_entries)(,%eax,4), %eax
13785 - subl $__PAGE_OFFSET, %eax
13786 - jmp *%eax
13787 + jmp *pa(subarch_entries)(,%eax,4)
13788
13789 bad_subarch:
13790 WEAK(lguest_entry)
13791 @@ -154,10 +219,10 @@ WEAK(xen_entry)
13792 __INITDATA
13793
13794 subarch_entries:
13795 - .long default_entry /* normal x86/PC */
13796 - .long lguest_entry /* lguest hypervisor */
13797 - .long xen_entry /* Xen hypervisor */
13798 - .long default_entry /* Moorestown MID */
13799 + .long ta(default_entry) /* normal x86/PC */
13800 + .long ta(lguest_entry) /* lguest hypervisor */
13801 + .long ta(xen_entry) /* Xen hypervisor */
13802 + .long ta(default_entry) /* Moorestown MID */
13803 num_subarch_entries = (. - subarch_entries) / 4
13804 .previous
13805 #endif /* CONFIG_PARAVIRT */
13806 @@ -218,8 +283,11 @@ default_entry:
13807 movl %eax, pa(max_pfn_mapped)
13808
13809 /* Do early initialization of the fixmap area */
13810 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
13811 - movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
13812 +#ifdef CONFIG_COMPAT_VDSO
13813 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
13814 +#else
13815 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
13816 +#endif
13817 #else /* Not PAE */
13818
13819 page_pde_offset = (__PAGE_OFFSET >> 20);
13820 @@ -249,8 +317,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
13821 movl %eax, pa(max_pfn_mapped)
13822
13823 /* Do early initialization of the fixmap area */
13824 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
13825 - movl %eax,pa(swapper_pg_dir+0xffc)
13826 +#ifdef CONFIG_COMPAT_VDSO
13827 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
13828 +#else
13829 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
13830 +#endif
13831 #endif
13832 jmp 3f
13833 /*
13834 @@ -297,6 +368,7 @@ ENTRY(startup_32_smp)
13835 orl %edx,%eax
13836 movl %eax,%cr4
13837
13838 +#ifdef CONFIG_X86_PAE
13839 btl $5, %eax # check if PAE is enabled
13840 jnc 6f
13841
13842 @@ -312,13 +384,17 @@ ENTRY(startup_32_smp)
13843 jnc 6f
13844
13845 /* Setup EFER (Extended Feature Enable Register) */
13846 - movl $0xc0000080, %ecx
13847 + movl $MSR_EFER, %ecx
13848 rdmsr
13849
13850 btsl $11, %eax
13851 /* Make changes effective */
13852 wrmsr
13853
13854 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
13855 + movl $1,pa(nx_enabled)
13856 +#endif
13857 +
13858 6:
13859
13860 /*
13861 @@ -344,9 +420,7 @@ ENTRY(startup_32_smp)
13862
13863 #ifdef CONFIG_SMP
13864 cmpb $0, ready
13865 - jz 1f /* Initial CPU cleans BSS */
13866 - jmp checkCPUtype
13867 -1:
13868 + jnz checkCPUtype /* Initial CPU cleans BSS */
13869 #endif /* CONFIG_SMP */
13870
13871 /*
13872 @@ -424,7 +498,7 @@ is386: movl $2,%ecx # set MP
13873 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
13874 movl %eax,%ss # after changing gdt.
13875
13876 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
13877 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
13878 movl %eax,%ds
13879 movl %eax,%es
13880
13881 @@ -438,15 +512,22 @@ is386: movl $2,%ecx # set MP
13882 */
13883 cmpb $0,ready
13884 jne 1f
13885 - movl $per_cpu__gdt_page,%eax
13886 + movl $cpu_gdt_table,%eax
13887 movl $per_cpu__stack_canary,%ecx
13888 +#ifdef CONFIG_SMP
13889 + addl $__per_cpu_load,%ecx
13890 +#endif
13891 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
13892 shrl $16, %ecx
13893 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
13894 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
13895 1:
13896 -#endif
13897 movl $(__KERNEL_STACK_CANARY),%eax
13898 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
13899 + movl $(__USER_DS),%eax
13900 +#else
13901 + xorl %eax,%eax
13902 +#endif
13903 movl %eax,%gs
13904
13905 xorl %eax,%eax # Clear LDT
13906 @@ -457,10 +538,6 @@ is386: movl $2,%ecx # set MP
13907 #ifdef CONFIG_SMP
13908 movb ready, %cl
13909 movb $1, ready
13910 - cmpb $0,%cl # the first CPU calls start_kernel
13911 - je 1f
13912 - movl (stack_start), %esp
13913 -1:
13914 #endif /* CONFIG_SMP */
13915 jmp *(initial_code)
13916
13917 @@ -546,22 +623,22 @@ early_page_fault:
13918 jmp early_fault
13919
13920 early_fault:
13921 - cld
13922 #ifdef CONFIG_PRINTK
13923 + cmpl $1,%ss:early_recursion_flag
13924 + je hlt_loop
13925 + incl %ss:early_recursion_flag
13926 + cld
13927 pusha
13928 movl $(__KERNEL_DS),%eax
13929 movl %eax,%ds
13930 movl %eax,%es
13931 - cmpl $2,early_recursion_flag
13932 - je hlt_loop
13933 - incl early_recursion_flag
13934 movl %cr2,%eax
13935 pushl %eax
13936 pushl %edx /* trapno */
13937 pushl $fault_msg
13938 call printk
13939 +; call dump_stack
13940 #endif
13941 - call dump_stack
13942 hlt_loop:
13943 hlt
13944 jmp hlt_loop
13945 @@ -569,8 +646,11 @@ hlt_loop:
13946 /* This is the default interrupt "handler" :-) */
13947 ALIGN
13948 ignore_int:
13949 - cld
13950 #ifdef CONFIG_PRINTK
13951 + cmpl $2,%ss:early_recursion_flag
13952 + je hlt_loop
13953 + incl %ss:early_recursion_flag
13954 + cld
13955 pushl %eax
13956 pushl %ecx
13957 pushl %edx
13958 @@ -579,9 +659,6 @@ ignore_int:
13959 movl $(__KERNEL_DS),%eax
13960 movl %eax,%ds
13961 movl %eax,%es
13962 - cmpl $2,early_recursion_flag
13963 - je hlt_loop
13964 - incl early_recursion_flag
13965 pushl 16(%esp)
13966 pushl 24(%esp)
13967 pushl 32(%esp)
13968 @@ -610,31 +687,47 @@ ENTRY(initial_page_table)
13969 /*
13970 * BSS section
13971 */
13972 -__PAGE_ALIGNED_BSS
13973 - .align PAGE_SIZE_asm
13974 #ifdef CONFIG_X86_PAE
13975 +.section .swapper_pg_pmd,"a",@progbits
13976 swapper_pg_pmd:
13977 .fill 1024*KPMDS,4,0
13978 #else
13979 +.section .swapper_pg_dir,"a",@progbits
13980 ENTRY(swapper_pg_dir)
13981 .fill 1024,4,0
13982 #endif
13983 +.section .swapper_pg_fixmap,"a",@progbits
13984 swapper_pg_fixmap:
13985 .fill 1024,4,0
13986 #ifdef CONFIG_X86_TRAMPOLINE
13987 +.section .trampoline_pg_dir,"a",@progbits
13988 ENTRY(trampoline_pg_dir)
13989 +#ifdef CONFIG_X86_PAE
13990 + .fill 4,8,0
13991 +#else
13992 .fill 1024,4,0
13993 #endif
13994 +#endif
13995 +
13996 +.section .empty_zero_page,"a",@progbits
13997 ENTRY(empty_zero_page)
13998 .fill 4096,1,0
13999
14000 /*
14001 + * The IDT has to be page-aligned to simplify the Pentium
14002 + * F0 0F bug workaround.. We have a special link segment
14003 + * for this.
14004 + */
14005 +.section .idt,"a",@progbits
14006 +ENTRY(idt_table)
14007 + .fill 256,8,0
14008 +
14009 +/*
14010 * This starts the data section.
14011 */
14012 #ifdef CONFIG_X86_PAE
14013 -__PAGE_ALIGNED_DATA
14014 - /* Page-aligned for the benefit of paravirt? */
14015 - .align PAGE_SIZE_asm
14016 +.section .swapper_pg_dir,"a",@progbits
14017 +
14018 ENTRY(swapper_pg_dir)
14019 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
14020 # if KPMDS == 3
14021 @@ -653,15 +746,24 @@ ENTRY(swapper_pg_dir)
14022 # error "Kernel PMDs should be 1, 2 or 3"
14023 # endif
14024 .align PAGE_SIZE_asm /* needs to be page-sized too */
14025 +
14026 +#ifdef CONFIG_PAX_PER_CPU_PGD
14027 +ENTRY(cpu_pgd)
14028 + .rept NR_CPUS
14029 + .fill 4,8,0
14030 + .endr
14031 +#endif
14032 +
14033 #endif
14034
14035 .data
14036 ENTRY(stack_start)
14037 - .long init_thread_union+THREAD_SIZE
14038 + .long init_thread_union+THREAD_SIZE-8
14039 .long __BOOT_DS
14040
14041 ready: .byte 0
14042
14043 +.section .rodata,"a",@progbits
14044 early_recursion_flag:
14045 .long 0
14046
14047 @@ -697,7 +799,7 @@ fault_msg:
14048 .word 0 # 32 bit align gdt_desc.address
14049 boot_gdt_descr:
14050 .word __BOOT_DS+7
14051 - .long boot_gdt - __PAGE_OFFSET
14052 + .long pa(boot_gdt)
14053
14054 .word 0 # 32-bit align idt_desc.address
14055 idt_descr:
14056 @@ -708,7 +810,7 @@ idt_descr:
14057 .word 0 # 32 bit align gdt_desc.address
14058 ENTRY(early_gdt_descr)
14059 .word GDT_ENTRIES*8-1
14060 - .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
14061 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
14062
14063 /*
14064 * The boot_gdt must mirror the equivalent in setup.S and is
14065 @@ -717,5 +819,65 @@ ENTRY(early_gdt_descr)
14066 .align L1_CACHE_BYTES
14067 ENTRY(boot_gdt)
14068 .fill GDT_ENTRY_BOOT_CS,8,0
14069 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
14070 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
14071 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
14072 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
14073 +
14074 + .align PAGE_SIZE_asm
14075 +ENTRY(cpu_gdt_table)
14076 + .rept NR_CPUS
14077 + .quad 0x0000000000000000 /* NULL descriptor */
14078 + .quad 0x0000000000000000 /* 0x0b reserved */
14079 + .quad 0x0000000000000000 /* 0x13 reserved */
14080 + .quad 0x0000000000000000 /* 0x1b reserved */
14081 +
14082 +#ifdef CONFIG_PAX_KERNEXEC
14083 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
14084 +#else
14085 + .quad 0x0000000000000000 /* 0x20 unused */
14086 +#endif
14087 +
14088 + .quad 0x0000000000000000 /* 0x28 unused */
14089 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
14090 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
14091 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
14092 + .quad 0x0000000000000000 /* 0x4b reserved */
14093 + .quad 0x0000000000000000 /* 0x53 reserved */
14094 + .quad 0x0000000000000000 /* 0x5b reserved */
14095 +
14096 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
14097 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
14098 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
14099 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
14100 +
14101 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
14102 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
14103 +
14104 + /*
14105 + * Segments used for calling PnP BIOS have byte granularity.
14106 + * The code segments and data segments have fixed 64k limits,
14107 + * the transfer segment sizes are set at run time.
14108 + */
14109 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
14110 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
14111 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
14112 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
14113 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
14114 +
14115 + /*
14116 + * The APM segments have byte granularity and their bases
14117 + * are set at run time. All have 64k limits.
14118 + */
14119 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
14120 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
14121 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
14122 +
14123 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
14124 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
14125 + .quad 0x0040910000000018 /* 0xe0 - STACK_CANARY */
14126 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
14127 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
14128 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
14129 +
14130 + /* Be sure this is zeroed to avoid false validations in Xen */
14131 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
14132 + .endr
14133 diff -urNp linux-2.6.32.41/arch/x86/kernel/head_64.S linux-2.6.32.41/arch/x86/kernel/head_64.S
14134 --- linux-2.6.32.41/arch/x86/kernel/head_64.S 2011-03-27 14:31:47.000000000 -0400
14135 +++ linux-2.6.32.41/arch/x86/kernel/head_64.S 2011-04-17 15:56:46.000000000 -0400
14136 @@ -19,6 +19,7 @@
14137 #include <asm/cache.h>
14138 #include <asm/processor-flags.h>
14139 #include <asm/percpu.h>
14140 +#include <asm/cpufeature.h>
14141
14142 #ifdef CONFIG_PARAVIRT
14143 #include <asm/asm-offsets.h>
14144 @@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
14145 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
14146 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
14147 L3_START_KERNEL = pud_index(__START_KERNEL_map)
14148 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
14149 +L3_VMALLOC_START = pud_index(VMALLOC_START)
14150 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
14151 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
14152
14153 .text
14154 __HEAD
14155 @@ -85,35 +90,22 @@ startup_64:
14156 */
14157 addq %rbp, init_level4_pgt + 0(%rip)
14158 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
14159 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
14160 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
14161 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
14162
14163 addq %rbp, level3_ident_pgt + 0(%rip)
14164 +#ifndef CONFIG_XEN
14165 + addq %rbp, level3_ident_pgt + 8(%rip)
14166 +#endif
14167
14168 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
14169 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
14170 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
14171
14172 - addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14173 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
14174 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
14175
14176 - /* Add an Identity mapping if I am above 1G */
14177 - leaq _text(%rip), %rdi
14178 - andq $PMD_PAGE_MASK, %rdi
14179 -
14180 - movq %rdi, %rax
14181 - shrq $PUD_SHIFT, %rax
14182 - andq $(PTRS_PER_PUD - 1), %rax
14183 - jz ident_complete
14184 -
14185 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
14186 - leaq level3_ident_pgt(%rip), %rbx
14187 - movq %rdx, 0(%rbx, %rax, 8)
14188 -
14189 - movq %rdi, %rax
14190 - shrq $PMD_SHIFT, %rax
14191 - andq $(PTRS_PER_PMD - 1), %rax
14192 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
14193 - leaq level2_spare_pgt(%rip), %rbx
14194 - movq %rdx, 0(%rbx, %rax, 8)
14195 -ident_complete:
14196 + addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14197 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
14198
14199 /*
14200 * Fixup the kernel text+data virtual addresses. Note that
14201 @@ -161,8 +153,8 @@ ENTRY(secondary_startup_64)
14202 * after the boot processor executes this code.
14203 */
14204
14205 - /* Enable PAE mode and PGE */
14206 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
14207 + /* Enable PAE mode and PSE/PGE */
14208 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
14209 movq %rax, %cr4
14210
14211 /* Setup early boot stage 4 level pagetables. */
14212 @@ -184,9 +176,13 @@ ENTRY(secondary_startup_64)
14213 movl $MSR_EFER, %ecx
14214 rdmsr
14215 btsl $_EFER_SCE, %eax /* Enable System Call */
14216 - btl $20,%edi /* No Execute supported? */
14217 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
14218 jnc 1f
14219 btsl $_EFER_NX, %eax
14220 + leaq init_level4_pgt(%rip), %rdi
14221 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
14222 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
14223 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
14224 1: wrmsr /* Make changes effective */
14225
14226 /* Setup cr0 */
14227 @@ -262,16 +258,16 @@ ENTRY(secondary_startup_64)
14228 .quad x86_64_start_kernel
14229 ENTRY(initial_gs)
14230 .quad INIT_PER_CPU_VAR(irq_stack_union)
14231 - __FINITDATA
14232
14233 ENTRY(stack_start)
14234 .quad init_thread_union+THREAD_SIZE-8
14235 .word 0
14236 + __FINITDATA
14237
14238 bad_address:
14239 jmp bad_address
14240
14241 - .section ".init.text","ax"
14242 + __INIT
14243 #ifdef CONFIG_EARLY_PRINTK
14244 .globl early_idt_handlers
14245 early_idt_handlers:
14246 @@ -316,18 +312,23 @@ ENTRY(early_idt_handler)
14247 #endif /* EARLY_PRINTK */
14248 1: hlt
14249 jmp 1b
14250 + .previous
14251
14252 #ifdef CONFIG_EARLY_PRINTK
14253 + __INITDATA
14254 early_recursion_flag:
14255 .long 0
14256 + .previous
14257
14258 + .section .rodata,"a",@progbits
14259 early_idt_msg:
14260 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
14261 early_idt_ripmsg:
14262 .asciz "RIP %s\n"
14263 -#endif /* CONFIG_EARLY_PRINTK */
14264 .previous
14265 +#endif /* CONFIG_EARLY_PRINTK */
14266
14267 + .section .rodata,"a",@progbits
14268 #define NEXT_PAGE(name) \
14269 .balign PAGE_SIZE; \
14270 ENTRY(name)
14271 @@ -350,13 +351,36 @@ NEXT_PAGE(init_level4_pgt)
14272 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14273 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
14274 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14275 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
14276 + .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
14277 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
14278 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
14279 .org init_level4_pgt + L4_START_KERNEL*8, 0
14280 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
14281 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
14282
14283 +#ifdef CONFIG_PAX_PER_CPU_PGD
14284 +NEXT_PAGE(cpu_pgd)
14285 + .rept NR_CPUS
14286 + .fill 512,8,0
14287 + .endr
14288 +#endif
14289 +
14290 NEXT_PAGE(level3_ident_pgt)
14291 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14292 +#ifdef CONFIG_XEN
14293 .fill 511,8,0
14294 +#else
14295 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
14296 + .fill 510,8,0
14297 +#endif
14298 +
14299 +NEXT_PAGE(level3_vmalloc_pgt)
14300 + .fill 512,8,0
14301 +
14302 +NEXT_PAGE(level3_vmemmap_pgt)
14303 + .fill L3_VMEMMAP_START,8,0
14304 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
14305
14306 NEXT_PAGE(level3_kernel_pgt)
14307 .fill L3_START_KERNEL,8,0
14308 @@ -364,20 +388,23 @@ NEXT_PAGE(level3_kernel_pgt)
14309 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
14310 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
14311
14312 +NEXT_PAGE(level2_vmemmap_pgt)
14313 + .fill 512,8,0
14314 +
14315 NEXT_PAGE(level2_fixmap_pgt)
14316 - .fill 506,8,0
14317 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
14318 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
14319 - .fill 5,8,0
14320 + .fill 507,8,0
14321 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
14322 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
14323 + .fill 4,8,0
14324
14325 -NEXT_PAGE(level1_fixmap_pgt)
14326 +NEXT_PAGE(level1_vsyscall_pgt)
14327 .fill 512,8,0
14328
14329 -NEXT_PAGE(level2_ident_pgt)
14330 - /* Since I easily can, map the first 1G.
14331 + /* Since I easily can, map the first 2G.
14332 * Don't set NX because code runs from these pages.
14333 */
14334 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
14335 +NEXT_PAGE(level2_ident_pgt)
14336 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
14337
14338 NEXT_PAGE(level2_kernel_pgt)
14339 /*
14340 @@ -390,33 +417,55 @@ NEXT_PAGE(level2_kernel_pgt)
14341 * If you want to increase this then increase MODULES_VADDR
14342 * too.)
14343 */
14344 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
14345 - KERNEL_IMAGE_SIZE/PMD_SIZE)
14346 -
14347 -NEXT_PAGE(level2_spare_pgt)
14348 - .fill 512, 8, 0
14349 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
14350
14351 #undef PMDS
14352 #undef NEXT_PAGE
14353
14354 - .data
14355 + .align PAGE_SIZE
14356 +ENTRY(cpu_gdt_table)
14357 + .rept NR_CPUS
14358 + .quad 0x0000000000000000 /* NULL descriptor */
14359 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
14360 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
14361 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
14362 + .quad 0x00cffb000000ffff /* __USER32_CS */
14363 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
14364 + .quad 0x00affb000000ffff /* __USER_CS */
14365 +
14366 +#ifdef CONFIG_PAX_KERNEXEC
14367 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
14368 +#else
14369 + .quad 0x0 /* unused */
14370 +#endif
14371 +
14372 + .quad 0,0 /* TSS */
14373 + .quad 0,0 /* LDT */
14374 + .quad 0,0,0 /* three TLS descriptors */
14375 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
14376 + /* asm/segment.h:GDT_ENTRIES must match this */
14377 +
14378 + /* zero the remaining page */
14379 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
14380 + .endr
14381 +
14382 .align 16
14383 .globl early_gdt_descr
14384 early_gdt_descr:
14385 .word GDT_ENTRIES*8-1
14386 early_gdt_descr_base:
14387 - .quad INIT_PER_CPU_VAR(gdt_page)
14388 + .quad cpu_gdt_table
14389
14390 ENTRY(phys_base)
14391 /* This must match the first entry in level2_kernel_pgt */
14392 .quad 0x0000000000000000
14393
14394 #include "../../x86/xen/xen-head.S"
14395 -
14396 - .section .bss, "aw", @nobits
14397 +
14398 + .section .rodata,"a",@progbits
14399 .align L1_CACHE_BYTES
14400 ENTRY(idt_table)
14401 - .skip IDT_ENTRIES * 16
14402 + .fill 512,8,0
14403
14404 __PAGE_ALIGNED_BSS
14405 .align PAGE_SIZE
14406 diff -urNp linux-2.6.32.41/arch/x86/kernel/i386_ksyms_32.c linux-2.6.32.41/arch/x86/kernel/i386_ksyms_32.c
14407 --- linux-2.6.32.41/arch/x86/kernel/i386_ksyms_32.c 2011-03-27 14:31:47.000000000 -0400
14408 +++ linux-2.6.32.41/arch/x86/kernel/i386_ksyms_32.c 2011-04-17 15:56:46.000000000 -0400
14409 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
14410 EXPORT_SYMBOL(cmpxchg8b_emu);
14411 #endif
14412
14413 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
14414 +
14415 /* Networking helper routines. */
14416 EXPORT_SYMBOL(csum_partial_copy_generic);
14417 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
14418 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
14419
14420 EXPORT_SYMBOL(__get_user_1);
14421 EXPORT_SYMBOL(__get_user_2);
14422 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
14423
14424 EXPORT_SYMBOL(csum_partial);
14425 EXPORT_SYMBOL(empty_zero_page);
14426 +
14427 +#ifdef CONFIG_PAX_KERNEXEC
14428 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
14429 +#endif
14430 diff -urNp linux-2.6.32.41/arch/x86/kernel/i8259.c linux-2.6.32.41/arch/x86/kernel/i8259.c
14431 --- linux-2.6.32.41/arch/x86/kernel/i8259.c 2011-03-27 14:31:47.000000000 -0400
14432 +++ linux-2.6.32.41/arch/x86/kernel/i8259.c 2011-05-04 17:56:28.000000000 -0400
14433 @@ -208,7 +208,7 @@ spurious_8259A_irq:
14434 "spurious 8259A interrupt: IRQ%d.\n", irq);
14435 spurious_irq_mask |= irqmask;
14436 }
14437 - atomic_inc(&irq_err_count);
14438 + atomic_inc_unchecked(&irq_err_count);
14439 /*
14440 * Theoretically we do not have to handle this IRQ,
14441 * but in Linux this does not cause problems and is
14442 diff -urNp linux-2.6.32.41/arch/x86/kernel/init_task.c linux-2.6.32.41/arch/x86/kernel/init_task.c
14443 --- linux-2.6.32.41/arch/x86/kernel/init_task.c 2011-03-27 14:31:47.000000000 -0400
14444 +++ linux-2.6.32.41/arch/x86/kernel/init_task.c 2011-04-17 15:56:46.000000000 -0400
14445 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
14446 * way process stacks are handled. This is done by having a special
14447 * "init_task" linker map entry..
14448 */
14449 -union thread_union init_thread_union __init_task_data =
14450 - { INIT_THREAD_INFO(init_task) };
14451 +union thread_union init_thread_union __init_task_data;
14452
14453 /*
14454 * Initial task structure.
14455 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
14456 * section. Since TSS's are completely CPU-local, we want them
14457 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
14458 */
14459 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
14460 -
14461 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
14462 +EXPORT_SYMBOL(init_tss);
14463 diff -urNp linux-2.6.32.41/arch/x86/kernel/ioport.c linux-2.6.32.41/arch/x86/kernel/ioport.c
14464 --- linux-2.6.32.41/arch/x86/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
14465 +++ linux-2.6.32.41/arch/x86/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
14466 @@ -6,6 +6,7 @@
14467 #include <linux/sched.h>
14468 #include <linux/kernel.h>
14469 #include <linux/capability.h>
14470 +#include <linux/security.h>
14471 #include <linux/errno.h>
14472 #include <linux/types.h>
14473 #include <linux/ioport.h>
14474 @@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long
14475
14476 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
14477 return -EINVAL;
14478 +#ifdef CONFIG_GRKERNSEC_IO
14479 + if (turn_on && grsec_disable_privio) {
14480 + gr_handle_ioperm();
14481 + return -EPERM;
14482 + }
14483 +#endif
14484 if (turn_on && !capable(CAP_SYS_RAWIO))
14485 return -EPERM;
14486
14487 @@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long
14488 * because the ->io_bitmap_max value must match the bitmap
14489 * contents:
14490 */
14491 - tss = &per_cpu(init_tss, get_cpu());
14492 + tss = init_tss + get_cpu();
14493
14494 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
14495
14496 @@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, s
14497 return -EINVAL;
14498 /* Trying to gain more privileges? */
14499 if (level > old) {
14500 +#ifdef CONFIG_GRKERNSEC_IO
14501 + if (grsec_disable_privio) {
14502 + gr_handle_iopl();
14503 + return -EPERM;
14504 + }
14505 +#endif
14506 if (!capable(CAP_SYS_RAWIO))
14507 return -EPERM;
14508 }
14509 diff -urNp linux-2.6.32.41/arch/x86/kernel/irq_32.c linux-2.6.32.41/arch/x86/kernel/irq_32.c
14510 --- linux-2.6.32.41/arch/x86/kernel/irq_32.c 2011-03-27 14:31:47.000000000 -0400
14511 +++ linux-2.6.32.41/arch/x86/kernel/irq_32.c 2011-04-23 13:26:46.000000000 -0400
14512 @@ -35,7 +35,7 @@ static int check_stack_overflow(void)
14513 __asm__ __volatile__("andl %%esp,%0" :
14514 "=r" (sp) : "0" (THREAD_SIZE - 1));
14515
14516 - return sp < (sizeof(struct thread_info) + STACK_WARN);
14517 + return sp < STACK_WARN;
14518 }
14519
14520 static void print_stack_overflow(void)
14521 @@ -54,9 +54,9 @@ static inline void print_stack_overflow(
14522 * per-CPU IRQ handling contexts (thread information and stack)
14523 */
14524 union irq_ctx {
14525 - struct thread_info tinfo;
14526 - u32 stack[THREAD_SIZE/sizeof(u32)];
14527 -} __attribute__((aligned(PAGE_SIZE)));
14528 + unsigned long previous_esp;
14529 + u32 stack[THREAD_SIZE/sizeof(u32)];
14530 +} __attribute__((aligned(THREAD_SIZE)));
14531
14532 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
14533 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
14534 @@ -78,10 +78,9 @@ static void call_on_stack(void *func, vo
14535 static inline int
14536 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
14537 {
14538 - union irq_ctx *curctx, *irqctx;
14539 + union irq_ctx *irqctx;
14540 u32 *isp, arg1, arg2;
14541
14542 - curctx = (union irq_ctx *) current_thread_info();
14543 irqctx = __get_cpu_var(hardirq_ctx);
14544
14545 /*
14546 @@ -90,21 +89,17 @@ execute_on_irq_stack(int overflow, struc
14547 * handler) we can't do that and just have to keep using the
14548 * current stack (which is the irq stack already after all)
14549 */
14550 - if (unlikely(curctx == irqctx))
14551 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
14552 return 0;
14553
14554 /* build the stack frame on the IRQ stack */
14555 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
14556 - irqctx->tinfo.task = curctx->tinfo.task;
14557 - irqctx->tinfo.previous_esp = current_stack_pointer;
14558 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
14559 + irqctx->previous_esp = current_stack_pointer;
14560 + add_preempt_count(HARDIRQ_OFFSET);
14561
14562 - /*
14563 - * Copy the softirq bits in preempt_count so that the
14564 - * softirq checks work in the hardirq context.
14565 - */
14566 - irqctx->tinfo.preempt_count =
14567 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
14568 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
14569 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14570 + __set_fs(MAKE_MM_SEG(0));
14571 +#endif
14572
14573 if (unlikely(overflow))
14574 call_on_stack(print_stack_overflow, isp);
14575 @@ -116,6 +111,12 @@ execute_on_irq_stack(int overflow, struc
14576 : "0" (irq), "1" (desc), "2" (isp),
14577 "D" (desc->handle_irq)
14578 : "memory", "cc", "ecx");
14579 +
14580 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14581 + __set_fs(current_thread_info()->addr_limit);
14582 +#endif
14583 +
14584 + sub_preempt_count(HARDIRQ_OFFSET);
14585 return 1;
14586 }
14587
14588 @@ -124,28 +125,11 @@ execute_on_irq_stack(int overflow, struc
14589 */
14590 void __cpuinit irq_ctx_init(int cpu)
14591 {
14592 - union irq_ctx *irqctx;
14593 -
14594 if (per_cpu(hardirq_ctx, cpu))
14595 return;
14596
14597 - irqctx = &per_cpu(hardirq_stack, cpu);
14598 - irqctx->tinfo.task = NULL;
14599 - irqctx->tinfo.exec_domain = NULL;
14600 - irqctx->tinfo.cpu = cpu;
14601 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
14602 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
14603 -
14604 - per_cpu(hardirq_ctx, cpu) = irqctx;
14605 -
14606 - irqctx = &per_cpu(softirq_stack, cpu);
14607 - irqctx->tinfo.task = NULL;
14608 - irqctx->tinfo.exec_domain = NULL;
14609 - irqctx->tinfo.cpu = cpu;
14610 - irqctx->tinfo.preempt_count = 0;
14611 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
14612 -
14613 - per_cpu(softirq_ctx, cpu) = irqctx;
14614 + per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
14615 + per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
14616
14617 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
14618 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
14619 @@ -159,7 +143,6 @@ void irq_ctx_exit(int cpu)
14620 asmlinkage void do_softirq(void)
14621 {
14622 unsigned long flags;
14623 - struct thread_info *curctx;
14624 union irq_ctx *irqctx;
14625 u32 *isp;
14626
14627 @@ -169,15 +152,22 @@ asmlinkage void do_softirq(void)
14628 local_irq_save(flags);
14629
14630 if (local_softirq_pending()) {
14631 - curctx = current_thread_info();
14632 irqctx = __get_cpu_var(softirq_ctx);
14633 - irqctx->tinfo.task = curctx->task;
14634 - irqctx->tinfo.previous_esp = current_stack_pointer;
14635 + irqctx->previous_esp = current_stack_pointer;
14636
14637 /* build the stack frame on the softirq stack */
14638 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
14639 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
14640 +
14641 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14642 + __set_fs(MAKE_MM_SEG(0));
14643 +#endif
14644
14645 call_on_stack(__do_softirq, isp);
14646 +
14647 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14648 + __set_fs(current_thread_info()->addr_limit);
14649 +#endif
14650 +
14651 /*
14652 * Shouldnt happen, we returned above if in_interrupt():
14653 */
14654 diff -urNp linux-2.6.32.41/arch/x86/kernel/irq.c linux-2.6.32.41/arch/x86/kernel/irq.c
14655 --- linux-2.6.32.41/arch/x86/kernel/irq.c 2011-03-27 14:31:47.000000000 -0400
14656 +++ linux-2.6.32.41/arch/x86/kernel/irq.c 2011-05-04 17:56:28.000000000 -0400
14657 @@ -15,7 +15,7 @@
14658 #include <asm/mce.h>
14659 #include <asm/hw_irq.h>
14660
14661 -atomic_t irq_err_count;
14662 +atomic_unchecked_t irq_err_count;
14663
14664 /* Function pointer for generic interrupt vector handling */
14665 void (*generic_interrupt_extension)(void) = NULL;
14666 @@ -114,9 +114,9 @@ static int show_other_interrupts(struct
14667 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
14668 seq_printf(p, " Machine check polls\n");
14669 #endif
14670 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
14671 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
14672 #if defined(CONFIG_X86_IO_APIC)
14673 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
14674 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
14675 #endif
14676 return 0;
14677 }
14678 @@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
14679
14680 u64 arch_irq_stat(void)
14681 {
14682 - u64 sum = atomic_read(&irq_err_count);
14683 + u64 sum = atomic_read_unchecked(&irq_err_count);
14684
14685 #ifdef CONFIG_X86_IO_APIC
14686 - sum += atomic_read(&irq_mis_count);
14687 + sum += atomic_read_unchecked(&irq_mis_count);
14688 #endif
14689 return sum;
14690 }
14691 diff -urNp linux-2.6.32.41/arch/x86/kernel/kgdb.c linux-2.6.32.41/arch/x86/kernel/kgdb.c
14692 --- linux-2.6.32.41/arch/x86/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
14693 +++ linux-2.6.32.41/arch/x86/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
14694 @@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vec
14695
14696 /* clear the trace bit */
14697 linux_regs->flags &= ~X86_EFLAGS_TF;
14698 - atomic_set(&kgdb_cpu_doing_single_step, -1);
14699 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
14700
14701 /* set the trace bit if we're stepping */
14702 if (remcomInBuffer[0] == 's') {
14703 linux_regs->flags |= X86_EFLAGS_TF;
14704 kgdb_single_step = 1;
14705 - atomic_set(&kgdb_cpu_doing_single_step,
14706 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
14707 raw_smp_processor_id());
14708 }
14709
14710 @@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args
14711 break;
14712
14713 case DIE_DEBUG:
14714 - if (atomic_read(&kgdb_cpu_doing_single_step) ==
14715 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
14716 raw_smp_processor_id()) {
14717 if (user_mode(regs))
14718 return single_step_cont(regs, args);
14719 @@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception
14720 return instruction_pointer(regs);
14721 }
14722
14723 -struct kgdb_arch arch_kgdb_ops = {
14724 +const struct kgdb_arch arch_kgdb_ops = {
14725 /* Breakpoint instruction: */
14726 .gdb_bpt_instr = { 0xcc },
14727 .flags = KGDB_HW_BREAKPOINT,
14728 diff -urNp linux-2.6.32.41/arch/x86/kernel/kprobes.c linux-2.6.32.41/arch/x86/kernel/kprobes.c
14729 --- linux-2.6.32.41/arch/x86/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
14730 +++ linux-2.6.32.41/arch/x86/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
14731 @@ -166,9 +166,13 @@ static void __kprobes set_jmp_op(void *f
14732 char op;
14733 s32 raddr;
14734 } __attribute__((packed)) * jop;
14735 - jop = (struct __arch_jmp_op *)from;
14736 +
14737 + jop = (struct __arch_jmp_op *)(ktla_ktva(from));
14738 +
14739 + pax_open_kernel();
14740 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
14741 jop->op = RELATIVEJUMP_INSTRUCTION;
14742 + pax_close_kernel();
14743 }
14744
14745 /*
14746 @@ -193,7 +197,7 @@ static int __kprobes can_boost(kprobe_op
14747 kprobe_opcode_t opcode;
14748 kprobe_opcode_t *orig_opcodes = opcodes;
14749
14750 - if (search_exception_tables((unsigned long)opcodes))
14751 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
14752 return 0; /* Page fault may occur on this address. */
14753
14754 retry:
14755 @@ -337,7 +341,9 @@ static void __kprobes fix_riprel(struct
14756 disp = (u8 *) p->addr + *((s32 *) insn) -
14757 (u8 *) p->ainsn.insn;
14758 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
14759 + pax_open_kernel();
14760 *(s32 *)insn = (s32) disp;
14761 + pax_close_kernel();
14762 }
14763 }
14764 #endif
14765 @@ -345,16 +351,18 @@ static void __kprobes fix_riprel(struct
14766
14767 static void __kprobes arch_copy_kprobe(struct kprobe *p)
14768 {
14769 - memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
14770 + pax_open_kernel();
14771 + memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
14772 + pax_close_kernel();
14773
14774 fix_riprel(p);
14775
14776 - if (can_boost(p->addr))
14777 + if (can_boost(ktla_ktva(p->addr)))
14778 p->ainsn.boostable = 0;
14779 else
14780 p->ainsn.boostable = -1;
14781
14782 - p->opcode = *p->addr;
14783 + p->opcode = *(ktla_ktva(p->addr));
14784 }
14785
14786 int __kprobes arch_prepare_kprobe(struct kprobe *p)
14787 @@ -432,7 +440,7 @@ static void __kprobes prepare_singlestep
14788 if (p->opcode == BREAKPOINT_INSTRUCTION)
14789 regs->ip = (unsigned long)p->addr;
14790 else
14791 - regs->ip = (unsigned long)p->ainsn.insn;
14792 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
14793 }
14794
14795 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
14796 @@ -453,7 +461,7 @@ static void __kprobes setup_singlestep(s
14797 if (p->ainsn.boostable == 1 && !p->post_handler) {
14798 /* Boost up -- we can execute copied instructions directly */
14799 reset_current_kprobe();
14800 - regs->ip = (unsigned long)p->ainsn.insn;
14801 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
14802 preempt_enable_no_resched();
14803 return;
14804 }
14805 @@ -523,7 +531,7 @@ static int __kprobes kprobe_handler(stru
14806 struct kprobe_ctlblk *kcb;
14807
14808 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
14809 - if (*addr != BREAKPOINT_INSTRUCTION) {
14810 + if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
14811 /*
14812 * The breakpoint instruction was removed right
14813 * after we hit it. Another cpu has removed
14814 @@ -775,7 +783,7 @@ static void __kprobes resume_execution(s
14815 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
14816 {
14817 unsigned long *tos = stack_addr(regs);
14818 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
14819 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
14820 unsigned long orig_ip = (unsigned long)p->addr;
14821 kprobe_opcode_t *insn = p->ainsn.insn;
14822
14823 @@ -958,7 +966,7 @@ int __kprobes kprobe_exceptions_notify(s
14824 struct die_args *args = data;
14825 int ret = NOTIFY_DONE;
14826
14827 - if (args->regs && user_mode_vm(args->regs))
14828 + if (args->regs && user_mode(args->regs))
14829 return ret;
14830
14831 switch (val) {
14832 diff -urNp linux-2.6.32.41/arch/x86/kernel/ldt.c linux-2.6.32.41/arch/x86/kernel/ldt.c
14833 --- linux-2.6.32.41/arch/x86/kernel/ldt.c 2011-03-27 14:31:47.000000000 -0400
14834 +++ linux-2.6.32.41/arch/x86/kernel/ldt.c 2011-04-17 15:56:46.000000000 -0400
14835 @@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, i
14836 if (reload) {
14837 #ifdef CONFIG_SMP
14838 preempt_disable();
14839 - load_LDT(pc);
14840 + load_LDT_nolock(pc);
14841 if (!cpumask_equal(mm_cpumask(current->mm),
14842 cpumask_of(smp_processor_id())))
14843 smp_call_function(flush_ldt, current->mm, 1);
14844 preempt_enable();
14845 #else
14846 - load_LDT(pc);
14847 + load_LDT_nolock(pc);
14848 #endif
14849 }
14850 if (oldsize) {
14851 @@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t
14852 return err;
14853
14854 for (i = 0; i < old->size; i++)
14855 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
14856 + write_ldt_entry(new->ldt, i, old->ldt + i);
14857 return 0;
14858 }
14859
14860 @@ -115,6 +115,24 @@ int init_new_context(struct task_struct
14861 retval = copy_ldt(&mm->context, &old_mm->context);
14862 mutex_unlock(&old_mm->context.lock);
14863 }
14864 +
14865 + if (tsk == current) {
14866 + mm->context.vdso = 0;
14867 +
14868 +#ifdef CONFIG_X86_32
14869 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
14870 + mm->context.user_cs_base = 0UL;
14871 + mm->context.user_cs_limit = ~0UL;
14872 +
14873 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
14874 + cpus_clear(mm->context.cpu_user_cs_mask);
14875 +#endif
14876 +
14877 +#endif
14878 +#endif
14879 +
14880 + }
14881 +
14882 return retval;
14883 }
14884
14885 @@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, u
14886 }
14887 }
14888
14889 +#ifdef CONFIG_PAX_SEGMEXEC
14890 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
14891 + error = -EINVAL;
14892 + goto out_unlock;
14893 + }
14894 +#endif
14895 +
14896 fill_ldt(&ldt, &ldt_info);
14897 if (oldmode)
14898 ldt.avl = 0;
14899 diff -urNp linux-2.6.32.41/arch/x86/kernel/machine_kexec_32.c linux-2.6.32.41/arch/x86/kernel/machine_kexec_32.c
14900 --- linux-2.6.32.41/arch/x86/kernel/machine_kexec_32.c 2011-03-27 14:31:47.000000000 -0400
14901 +++ linux-2.6.32.41/arch/x86/kernel/machine_kexec_32.c 2011-04-17 15:56:46.000000000 -0400
14902 @@ -26,7 +26,7 @@
14903 #include <asm/system.h>
14904 #include <asm/cacheflush.h>
14905
14906 -static void set_idt(void *newidt, __u16 limit)
14907 +static void set_idt(struct desc_struct *newidt, __u16 limit)
14908 {
14909 struct desc_ptr curidt;
14910
14911 @@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16
14912 }
14913
14914
14915 -static void set_gdt(void *newgdt, __u16 limit)
14916 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
14917 {
14918 struct desc_ptr curgdt;
14919
14920 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
14921 }
14922
14923 control_page = page_address(image->control_code_page);
14924 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
14925 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
14926
14927 relocate_kernel_ptr = control_page;
14928 page_list[PA_CONTROL_PAGE] = __pa(control_page);
14929 diff -urNp linux-2.6.32.41/arch/x86/kernel/microcode_amd.c linux-2.6.32.41/arch/x86/kernel/microcode_amd.c
14930 --- linux-2.6.32.41/arch/x86/kernel/microcode_amd.c 2011-04-17 17:00:52.000000000 -0400
14931 +++ linux-2.6.32.41/arch/x86/kernel/microcode_amd.c 2011-04-17 17:03:05.000000000 -0400
14932 @@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int c
14933 uci->mc = NULL;
14934 }
14935
14936 -static struct microcode_ops microcode_amd_ops = {
14937 +static const struct microcode_ops microcode_amd_ops = {
14938 .request_microcode_user = request_microcode_user,
14939 .request_microcode_fw = request_microcode_fw,
14940 .collect_cpu_info = collect_cpu_info_amd,
14941 @@ -372,7 +372,7 @@ static struct microcode_ops microcode_am
14942 .microcode_fini_cpu = microcode_fini_cpu_amd,
14943 };
14944
14945 -struct microcode_ops * __init init_amd_microcode(void)
14946 +const struct microcode_ops * __init init_amd_microcode(void)
14947 {
14948 return &microcode_amd_ops;
14949 }
14950 diff -urNp linux-2.6.32.41/arch/x86/kernel/microcode_core.c linux-2.6.32.41/arch/x86/kernel/microcode_core.c
14951 --- linux-2.6.32.41/arch/x86/kernel/microcode_core.c 2011-03-27 14:31:47.000000000 -0400
14952 +++ linux-2.6.32.41/arch/x86/kernel/microcode_core.c 2011-04-17 15:56:46.000000000 -0400
14953 @@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
14954
14955 #define MICROCODE_VERSION "2.00"
14956
14957 -static struct microcode_ops *microcode_ops;
14958 +static const struct microcode_ops *microcode_ops;
14959
14960 /*
14961 * Synchronization.
14962 diff -urNp linux-2.6.32.41/arch/x86/kernel/microcode_intel.c linux-2.6.32.41/arch/x86/kernel/microcode_intel.c
14963 --- linux-2.6.32.41/arch/x86/kernel/microcode_intel.c 2011-03-27 14:31:47.000000000 -0400
14964 +++ linux-2.6.32.41/arch/x86/kernel/microcode_intel.c 2011-04-17 15:56:46.000000000 -0400
14965 @@ -443,13 +443,13 @@ static enum ucode_state request_microcod
14966
14967 static int get_ucode_user(void *to, const void *from, size_t n)
14968 {
14969 - return copy_from_user(to, from, n);
14970 + return copy_from_user(to, (__force const void __user *)from, n);
14971 }
14972
14973 static enum ucode_state
14974 request_microcode_user(int cpu, const void __user *buf, size_t size)
14975 {
14976 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
14977 + return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
14978 }
14979
14980 static void microcode_fini_cpu(int cpu)
14981 @@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
14982 uci->mc = NULL;
14983 }
14984
14985 -static struct microcode_ops microcode_intel_ops = {
14986 +static const struct microcode_ops microcode_intel_ops = {
14987 .request_microcode_user = request_microcode_user,
14988 .request_microcode_fw = request_microcode_fw,
14989 .collect_cpu_info = collect_cpu_info,
14990 @@ -468,7 +468,7 @@ static struct microcode_ops microcode_in
14991 .microcode_fini_cpu = microcode_fini_cpu,
14992 };
14993
14994 -struct microcode_ops * __init init_intel_microcode(void)
14995 +const struct microcode_ops * __init init_intel_microcode(void)
14996 {
14997 return &microcode_intel_ops;
14998 }
14999 diff -urNp linux-2.6.32.41/arch/x86/kernel/module.c linux-2.6.32.41/arch/x86/kernel/module.c
15000 --- linux-2.6.32.41/arch/x86/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
15001 +++ linux-2.6.32.41/arch/x86/kernel/module.c 2011-04-17 15:56:46.000000000 -0400
15002 @@ -34,7 +34,7 @@
15003 #define DEBUGP(fmt...)
15004 #endif
15005
15006 -void *module_alloc(unsigned long size)
15007 +static void *__module_alloc(unsigned long size, pgprot_t prot)
15008 {
15009 struct vm_struct *area;
15010
15011 @@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
15012 if (!area)
15013 return NULL;
15014
15015 - return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
15016 - PAGE_KERNEL_EXEC);
15017 + return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
15018 +}
15019 +
15020 +void *module_alloc(unsigned long size)
15021 +{
15022 +
15023 +#ifdef CONFIG_PAX_KERNEXEC
15024 + return __module_alloc(size, PAGE_KERNEL);
15025 +#else
15026 + return __module_alloc(size, PAGE_KERNEL_EXEC);
15027 +#endif
15028 +
15029 }
15030
15031 /* Free memory returned from module_alloc */
15032 @@ -58,6 +68,40 @@ void module_free(struct module *mod, voi
15033 vfree(module_region);
15034 }
15035
15036 +#ifdef CONFIG_PAX_KERNEXEC
15037 +#ifdef CONFIG_X86_32
15038 +void *module_alloc_exec(unsigned long size)
15039 +{
15040 + struct vm_struct *area;
15041 +
15042 + if (size == 0)
15043 + return NULL;
15044 +
15045 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
15046 + return area ? area->addr : NULL;
15047 +}
15048 +EXPORT_SYMBOL(module_alloc_exec);
15049 +
15050 +void module_free_exec(struct module *mod, void *module_region)
15051 +{
15052 + vunmap(module_region);
15053 +}
15054 +EXPORT_SYMBOL(module_free_exec);
15055 +#else
15056 +void module_free_exec(struct module *mod, void *module_region)
15057 +{
15058 + module_free(mod, module_region);
15059 +}
15060 +EXPORT_SYMBOL(module_free_exec);
15061 +
15062 +void *module_alloc_exec(unsigned long size)
15063 +{
15064 + return __module_alloc(size, PAGE_KERNEL_RX);
15065 +}
15066 +EXPORT_SYMBOL(module_alloc_exec);
15067 +#endif
15068 +#endif
15069 +
15070 /* We don't need anything special. */
15071 int module_frob_arch_sections(Elf_Ehdr *hdr,
15072 Elf_Shdr *sechdrs,
15073 @@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15074 unsigned int i;
15075 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
15076 Elf32_Sym *sym;
15077 - uint32_t *location;
15078 + uint32_t *plocation, location;
15079
15080 DEBUGP("Applying relocate section %u to %u\n", relsec,
15081 sechdrs[relsec].sh_info);
15082 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
15083 /* This is where to make the change */
15084 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
15085 - + rel[i].r_offset;
15086 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
15087 + location = (uint32_t)plocation;
15088 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
15089 + plocation = ktla_ktva((void *)plocation);
15090 /* This is the symbol it is referring to. Note that all
15091 undefined symbols have been resolved. */
15092 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
15093 @@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15094 switch (ELF32_R_TYPE(rel[i].r_info)) {
15095 case R_386_32:
15096 /* We add the value into the location given */
15097 - *location += sym->st_value;
15098 + pax_open_kernel();
15099 + *plocation += sym->st_value;
15100 + pax_close_kernel();
15101 break;
15102 case R_386_PC32:
15103 /* Add the value, subtract its postition */
15104 - *location += sym->st_value - (uint32_t)location;
15105 + pax_open_kernel();
15106 + *plocation += sym->st_value - location;
15107 + pax_close_kernel();
15108 break;
15109 default:
15110 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
15111 @@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
15112 case R_X86_64_NONE:
15113 break;
15114 case R_X86_64_64:
15115 + pax_open_kernel();
15116 *(u64 *)loc = val;
15117 + pax_close_kernel();
15118 break;
15119 case R_X86_64_32:
15120 + pax_open_kernel();
15121 *(u32 *)loc = val;
15122 + pax_close_kernel();
15123 if (val != *(u32 *)loc)
15124 goto overflow;
15125 break;
15126 case R_X86_64_32S:
15127 + pax_open_kernel();
15128 *(s32 *)loc = val;
15129 + pax_close_kernel();
15130 if ((s64)val != *(s32 *)loc)
15131 goto overflow;
15132 break;
15133 case R_X86_64_PC32:
15134 val -= (u64)loc;
15135 + pax_open_kernel();
15136 *(u32 *)loc = val;
15137 + pax_close_kernel();
15138 +
15139 #if 0
15140 if ((s64)val != *(s32 *)loc)
15141 goto overflow;
15142 diff -urNp linux-2.6.32.41/arch/x86/kernel/paravirt.c linux-2.6.32.41/arch/x86/kernel/paravirt.c
15143 --- linux-2.6.32.41/arch/x86/kernel/paravirt.c 2011-03-27 14:31:47.000000000 -0400
15144 +++ linux-2.6.32.41/arch/x86/kernel/paravirt.c 2011-05-16 21:46:57.000000000 -0400
15145 @@ -122,7 +122,7 @@ unsigned paravirt_patch_jmp(void *insnbu
15146 * corresponding structure. */
15147 static void *get_call_destination(u8 type)
15148 {
15149 - struct paravirt_patch_template tmpl = {
15150 + const struct paravirt_patch_template tmpl = {
15151 .pv_init_ops = pv_init_ops,
15152 .pv_time_ops = pv_time_ops,
15153 .pv_cpu_ops = pv_cpu_ops,
15154 @@ -133,6 +133,9 @@ static void *get_call_destination(u8 typ
15155 .pv_lock_ops = pv_lock_ops,
15156 #endif
15157 };
15158 +
15159 + pax_track_stack();
15160 +
15161 return *((void **)&tmpl + type);
15162 }
15163
15164 @@ -145,14 +148,14 @@ unsigned paravirt_patch_default(u8 type,
15165 if (opfunc == NULL)
15166 /* If there's no function, patch it with a ud2a (BUG) */
15167 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
15168 - else if (opfunc == _paravirt_nop)
15169 + else if (opfunc == (void *)_paravirt_nop)
15170 /* If the operation is a nop, then nop the callsite */
15171 ret = paravirt_patch_nop();
15172
15173 /* identity functions just return their single argument */
15174 - else if (opfunc == _paravirt_ident_32)
15175 + else if (opfunc == (void *)_paravirt_ident_32)
15176 ret = paravirt_patch_ident_32(insnbuf, len);
15177 - else if (opfunc == _paravirt_ident_64)
15178 + else if (opfunc == (void *)_paravirt_ident_64)
15179 ret = paravirt_patch_ident_64(insnbuf, len);
15180
15181 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
15182 @@ -178,7 +181,7 @@ unsigned paravirt_patch_insns(void *insn
15183 if (insn_len > len || start == NULL)
15184 insn_len = len;
15185 else
15186 - memcpy(insnbuf, start, insn_len);
15187 + memcpy(insnbuf, ktla_ktva(start), insn_len);
15188
15189 return insn_len;
15190 }
15191 @@ -294,22 +297,22 @@ void arch_flush_lazy_mmu_mode(void)
15192 preempt_enable();
15193 }
15194
15195 -struct pv_info pv_info = {
15196 +struct pv_info pv_info __read_only = {
15197 .name = "bare hardware",
15198 .paravirt_enabled = 0,
15199 .kernel_rpl = 0,
15200 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
15201 };
15202
15203 -struct pv_init_ops pv_init_ops = {
15204 +struct pv_init_ops pv_init_ops __read_only = {
15205 .patch = native_patch,
15206 };
15207
15208 -struct pv_time_ops pv_time_ops = {
15209 +struct pv_time_ops pv_time_ops __read_only = {
15210 .sched_clock = native_sched_clock,
15211 };
15212
15213 -struct pv_irq_ops pv_irq_ops = {
15214 +struct pv_irq_ops pv_irq_ops __read_only = {
15215 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
15216 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
15217 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
15218 @@ -321,7 +324,7 @@ struct pv_irq_ops pv_irq_ops = {
15219 #endif
15220 };
15221
15222 -struct pv_cpu_ops pv_cpu_ops = {
15223 +struct pv_cpu_ops pv_cpu_ops __read_only = {
15224 .cpuid = native_cpuid,
15225 .get_debugreg = native_get_debugreg,
15226 .set_debugreg = native_set_debugreg,
15227 @@ -382,7 +385,7 @@ struct pv_cpu_ops pv_cpu_ops = {
15228 .end_context_switch = paravirt_nop,
15229 };
15230
15231 -struct pv_apic_ops pv_apic_ops = {
15232 +struct pv_apic_ops pv_apic_ops __read_only = {
15233 #ifdef CONFIG_X86_LOCAL_APIC
15234 .startup_ipi_hook = paravirt_nop,
15235 #endif
15236 @@ -396,7 +399,7 @@ struct pv_apic_ops pv_apic_ops = {
15237 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
15238 #endif
15239
15240 -struct pv_mmu_ops pv_mmu_ops = {
15241 +struct pv_mmu_ops pv_mmu_ops __read_only = {
15242
15243 .read_cr2 = native_read_cr2,
15244 .write_cr2 = native_write_cr2,
15245 @@ -467,6 +470,12 @@ struct pv_mmu_ops pv_mmu_ops = {
15246 },
15247
15248 .set_fixmap = native_set_fixmap,
15249 +
15250 +#ifdef CONFIG_PAX_KERNEXEC
15251 + .pax_open_kernel = native_pax_open_kernel,
15252 + .pax_close_kernel = native_pax_close_kernel,
15253 +#endif
15254 +
15255 };
15256
15257 EXPORT_SYMBOL_GPL(pv_time_ops);
15258 diff -urNp linux-2.6.32.41/arch/x86/kernel/paravirt-spinlocks.c linux-2.6.32.41/arch/x86/kernel/paravirt-spinlocks.c
15259 --- linux-2.6.32.41/arch/x86/kernel/paravirt-spinlocks.c 2011-03-27 14:31:47.000000000 -0400
15260 +++ linux-2.6.32.41/arch/x86/kernel/paravirt-spinlocks.c 2011-04-17 15:56:46.000000000 -0400
15261 @@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *
15262 __raw_spin_lock(lock);
15263 }
15264
15265 -struct pv_lock_ops pv_lock_ops = {
15266 +struct pv_lock_ops pv_lock_ops __read_only = {
15267 #ifdef CONFIG_SMP
15268 .spin_is_locked = __ticket_spin_is_locked,
15269 .spin_is_contended = __ticket_spin_is_contended,
15270 diff -urNp linux-2.6.32.41/arch/x86/kernel/pci-calgary_64.c linux-2.6.32.41/arch/x86/kernel/pci-calgary_64.c
15271 --- linux-2.6.32.41/arch/x86/kernel/pci-calgary_64.c 2011-03-27 14:31:47.000000000 -0400
15272 +++ linux-2.6.32.41/arch/x86/kernel/pci-calgary_64.c 2011-04-17 15:56:46.000000000 -0400
15273 @@ -477,7 +477,7 @@ static void calgary_free_coherent(struct
15274 free_pages((unsigned long)vaddr, get_order(size));
15275 }
15276
15277 -static struct dma_map_ops calgary_dma_ops = {
15278 +static const struct dma_map_ops calgary_dma_ops = {
15279 .alloc_coherent = calgary_alloc_coherent,
15280 .free_coherent = calgary_free_coherent,
15281 .map_sg = calgary_map_sg,
15282 diff -urNp linux-2.6.32.41/arch/x86/kernel/pci-dma.c linux-2.6.32.41/arch/x86/kernel/pci-dma.c
15283 --- linux-2.6.32.41/arch/x86/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
15284 +++ linux-2.6.32.41/arch/x86/kernel/pci-dma.c 2011-04-17 15:56:46.000000000 -0400
15285 @@ -14,7 +14,7 @@
15286
15287 static int forbid_dac __read_mostly;
15288
15289 -struct dma_map_ops *dma_ops;
15290 +const struct dma_map_ops *dma_ops;
15291 EXPORT_SYMBOL(dma_ops);
15292
15293 static int iommu_sac_force __read_mostly;
15294 @@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
15295
15296 int dma_supported(struct device *dev, u64 mask)
15297 {
15298 - struct dma_map_ops *ops = get_dma_ops(dev);
15299 + const struct dma_map_ops *ops = get_dma_ops(dev);
15300
15301 #ifdef CONFIG_PCI
15302 if (mask > 0xffffffff && forbid_dac > 0) {
15303 diff -urNp linux-2.6.32.41/arch/x86/kernel/pci-gart_64.c linux-2.6.32.41/arch/x86/kernel/pci-gart_64.c
15304 --- linux-2.6.32.41/arch/x86/kernel/pci-gart_64.c 2011-03-27 14:31:47.000000000 -0400
15305 +++ linux-2.6.32.41/arch/x86/kernel/pci-gart_64.c 2011-04-17 15:56:46.000000000 -0400
15306 @@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct ag
15307 return -1;
15308 }
15309
15310 -static struct dma_map_ops gart_dma_ops = {
15311 +static const struct dma_map_ops gart_dma_ops = {
15312 .map_sg = gart_map_sg,
15313 .unmap_sg = gart_unmap_sg,
15314 .map_page = gart_map_page,
15315 diff -urNp linux-2.6.32.41/arch/x86/kernel/pci-nommu.c linux-2.6.32.41/arch/x86/kernel/pci-nommu.c
15316 --- linux-2.6.32.41/arch/x86/kernel/pci-nommu.c 2011-03-27 14:31:47.000000000 -0400
15317 +++ linux-2.6.32.41/arch/x86/kernel/pci-nommu.c 2011-04-17 15:56:46.000000000 -0400
15318 @@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(str
15319 flush_write_buffers();
15320 }
15321
15322 -struct dma_map_ops nommu_dma_ops = {
15323 +const struct dma_map_ops nommu_dma_ops = {
15324 .alloc_coherent = dma_generic_alloc_coherent,
15325 .free_coherent = nommu_free_coherent,
15326 .map_sg = nommu_map_sg,
15327 diff -urNp linux-2.6.32.41/arch/x86/kernel/pci-swiotlb.c linux-2.6.32.41/arch/x86/kernel/pci-swiotlb.c
15328 --- linux-2.6.32.41/arch/x86/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
15329 +++ linux-2.6.32.41/arch/x86/kernel/pci-swiotlb.c 2011-04-17 15:56:46.000000000 -0400
15330 @@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(
15331 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
15332 }
15333
15334 -static struct dma_map_ops swiotlb_dma_ops = {
15335 +static const struct dma_map_ops swiotlb_dma_ops = {
15336 .mapping_error = swiotlb_dma_mapping_error,
15337 .alloc_coherent = x86_swiotlb_alloc_coherent,
15338 .free_coherent = swiotlb_free_coherent,
15339 diff -urNp linux-2.6.32.41/arch/x86/kernel/process_32.c linux-2.6.32.41/arch/x86/kernel/process_32.c
15340 --- linux-2.6.32.41/arch/x86/kernel/process_32.c 2011-03-27 14:31:47.000000000 -0400
15341 +++ linux-2.6.32.41/arch/x86/kernel/process_32.c 2011-05-16 21:46:57.000000000 -0400
15342 @@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __as
15343 unsigned long thread_saved_pc(struct task_struct *tsk)
15344 {
15345 return ((unsigned long *)tsk->thread.sp)[3];
15346 +//XXX return tsk->thread.eip;
15347 }
15348
15349 #ifndef CONFIG_SMP
15350 @@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, i
15351 unsigned short ss, gs;
15352 const char *board;
15353
15354 - if (user_mode_vm(regs)) {
15355 + if (user_mode(regs)) {
15356 sp = regs->sp;
15357 ss = regs->ss & 0xffff;
15358 - gs = get_user_gs(regs);
15359 } else {
15360 sp = (unsigned long) (&regs->sp);
15361 savesegment(ss, ss);
15362 - savesegment(gs, gs);
15363 }
15364 + gs = get_user_gs(regs);
15365
15366 printk("\n");
15367
15368 @@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), voi
15369 regs.bx = (unsigned long) fn;
15370 regs.dx = (unsigned long) arg;
15371
15372 - regs.ds = __USER_DS;
15373 - regs.es = __USER_DS;
15374 + regs.ds = __KERNEL_DS;
15375 + regs.es = __KERNEL_DS;
15376 regs.fs = __KERNEL_PERCPU;
15377 - regs.gs = __KERNEL_STACK_CANARY;
15378 + savesegment(gs, regs.gs);
15379 regs.orig_ax = -1;
15380 regs.ip = (unsigned long) kernel_thread_helper;
15381 regs.cs = __KERNEL_CS | get_kernel_rpl();
15382 @@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flag
15383 struct task_struct *tsk;
15384 int err;
15385
15386 - childregs = task_pt_regs(p);
15387 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
15388 *childregs = *regs;
15389 childregs->ax = 0;
15390 childregs->sp = sp;
15391
15392 p->thread.sp = (unsigned long) childregs;
15393 p->thread.sp0 = (unsigned long) (childregs+1);
15394 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
15395
15396 p->thread.ip = (unsigned long) ret_from_fork;
15397
15398 @@ -346,7 +347,7 @@ __switch_to(struct task_struct *prev_p,
15399 struct thread_struct *prev = &prev_p->thread,
15400 *next = &next_p->thread;
15401 int cpu = smp_processor_id();
15402 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
15403 + struct tss_struct *tss = init_tss + cpu;
15404 bool preload_fpu;
15405
15406 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
15407 @@ -381,6 +382,10 @@ __switch_to(struct task_struct *prev_p,
15408 */
15409 lazy_save_gs(prev->gs);
15410
15411 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15412 + __set_fs(task_thread_info(next_p)->addr_limit);
15413 +#endif
15414 +
15415 /*
15416 * Load the per-thread Thread-Local Storage descriptor.
15417 */
15418 @@ -416,6 +421,9 @@ __switch_to(struct task_struct *prev_p,
15419 */
15420 arch_end_context_switch(next_p);
15421
15422 + percpu_write(current_task, next_p);
15423 + percpu_write(current_tinfo, &next_p->tinfo);
15424 +
15425 if (preload_fpu)
15426 __math_state_restore();
15427
15428 @@ -425,8 +433,6 @@ __switch_to(struct task_struct *prev_p,
15429 if (prev->gs | next->gs)
15430 lazy_load_gs(next->gs);
15431
15432 - percpu_write(current_task, next_p);
15433 -
15434 return prev_p;
15435 }
15436
15437 @@ -496,4 +502,3 @@ unsigned long get_wchan(struct task_stru
15438 } while (count++ < 16);
15439 return 0;
15440 }
15441 -
15442 diff -urNp linux-2.6.32.41/arch/x86/kernel/process_64.c linux-2.6.32.41/arch/x86/kernel/process_64.c
15443 --- linux-2.6.32.41/arch/x86/kernel/process_64.c 2011-03-27 14:31:47.000000000 -0400
15444 +++ linux-2.6.32.41/arch/x86/kernel/process_64.c 2011-05-16 21:46:57.000000000 -0400
15445 @@ -91,7 +91,7 @@ static void __exit_idle(void)
15446 void exit_idle(void)
15447 {
15448 /* idle loop has pid 0 */
15449 - if (current->pid)
15450 + if (task_pid_nr(current))
15451 return;
15452 __exit_idle();
15453 }
15454 @@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, i
15455 if (!board)
15456 board = "";
15457 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
15458 - current->pid, current->comm, print_tainted(),
15459 + task_pid_nr(current), current->comm, print_tainted(),
15460 init_utsname()->release,
15461 (int)strcspn(init_utsname()->version, " "),
15462 init_utsname()->version, board);
15463 @@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flag
15464 struct pt_regs *childregs;
15465 struct task_struct *me = current;
15466
15467 - childregs = ((struct pt_regs *)
15468 - (THREAD_SIZE + task_stack_page(p))) - 1;
15469 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
15470 *childregs = *regs;
15471
15472 childregs->ax = 0;
15473 @@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flag
15474 p->thread.sp = (unsigned long) childregs;
15475 p->thread.sp0 = (unsigned long) (childregs+1);
15476 p->thread.usersp = me->thread.usersp;
15477 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
15478
15479 set_tsk_thread_flag(p, TIF_FORK);
15480
15481 @@ -380,7 +380,7 @@ __switch_to(struct task_struct *prev_p,
15482 struct thread_struct *prev = &prev_p->thread;
15483 struct thread_struct *next = &next_p->thread;
15484 int cpu = smp_processor_id();
15485 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
15486 + struct tss_struct *tss = init_tss + cpu;
15487 unsigned fsindex, gsindex;
15488 bool preload_fpu;
15489
15490 @@ -476,10 +476,9 @@ __switch_to(struct task_struct *prev_p,
15491 prev->usersp = percpu_read(old_rsp);
15492 percpu_write(old_rsp, next->usersp);
15493 percpu_write(current_task, next_p);
15494 + percpu_write(current_tinfo, &next_p->tinfo);
15495
15496 - percpu_write(kernel_stack,
15497 - (unsigned long)task_stack_page(next_p) +
15498 - THREAD_SIZE - KERNEL_STACK_OFFSET);
15499 + percpu_write(kernel_stack, next->sp0);
15500
15501 /*
15502 * Now maybe reload the debug registers and handle I/O bitmaps
15503 @@ -560,12 +559,11 @@ unsigned long get_wchan(struct task_stru
15504 if (!p || p == current || p->state == TASK_RUNNING)
15505 return 0;
15506 stack = (unsigned long)task_stack_page(p);
15507 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
15508 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
15509 return 0;
15510 fp = *(u64 *)(p->thread.sp);
15511 do {
15512 - if (fp < (unsigned long)stack ||
15513 - fp >= (unsigned long)stack+THREAD_SIZE)
15514 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
15515 return 0;
15516 ip = *(u64 *)(fp+8);
15517 if (!in_sched_functions(ip))
15518 diff -urNp linux-2.6.32.41/arch/x86/kernel/process.c linux-2.6.32.41/arch/x86/kernel/process.c
15519 --- linux-2.6.32.41/arch/x86/kernel/process.c 2011-04-22 19:16:29.000000000 -0400
15520 +++ linux-2.6.32.41/arch/x86/kernel/process.c 2011-05-22 23:02:03.000000000 -0400
15521 @@ -51,16 +51,33 @@ void free_thread_xstate(struct task_stru
15522
15523 void free_thread_info(struct thread_info *ti)
15524 {
15525 - free_thread_xstate(ti->task);
15526 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
15527 }
15528
15529 +static struct kmem_cache *task_struct_cachep;
15530 +
15531 void arch_task_cache_init(void)
15532 {
15533 - task_xstate_cachep =
15534 - kmem_cache_create("task_xstate", xstate_size,
15535 + /* create a slab on which task_structs can be allocated */
15536 + task_struct_cachep =
15537 + kmem_cache_create("task_struct", sizeof(struct task_struct),
15538 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
15539 +
15540 + task_xstate_cachep =
15541 + kmem_cache_create("task_xstate", xstate_size,
15542 __alignof__(union thread_xstate),
15543 - SLAB_PANIC | SLAB_NOTRACK, NULL);
15544 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
15545 +}
15546 +
15547 +struct task_struct *alloc_task_struct(void)
15548 +{
15549 + return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
15550 +}
15551 +
15552 +void free_task_struct(struct task_struct *task)
15553 +{
15554 + free_thread_xstate(task);
15555 + kmem_cache_free(task_struct_cachep, task);
15556 }
15557
15558 /*
15559 @@ -73,7 +90,7 @@ void exit_thread(void)
15560 unsigned long *bp = t->io_bitmap_ptr;
15561
15562 if (bp) {
15563 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
15564 + struct tss_struct *tss = init_tss + get_cpu();
15565
15566 t->io_bitmap_ptr = NULL;
15567 clear_thread_flag(TIF_IO_BITMAP);
15568 @@ -93,6 +110,9 @@ void flush_thread(void)
15569
15570 clear_tsk_thread_flag(tsk, TIF_DEBUG);
15571
15572 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
15573 + loadsegment(gs, 0);
15574 +#endif
15575 tsk->thread.debugreg0 = 0;
15576 tsk->thread.debugreg1 = 0;
15577 tsk->thread.debugreg2 = 0;
15578 @@ -307,7 +327,7 @@ void default_idle(void)
15579 EXPORT_SYMBOL(default_idle);
15580 #endif
15581
15582 -void stop_this_cpu(void *dummy)
15583 +__noreturn void stop_this_cpu(void *dummy)
15584 {
15585 local_irq_disable();
15586 /*
15587 @@ -568,16 +588,35 @@ static int __init idle_setup(char *str)
15588 }
15589 early_param("idle", idle_setup);
15590
15591 -unsigned long arch_align_stack(unsigned long sp)
15592 +#ifdef CONFIG_PAX_RANDKSTACK
15593 +asmlinkage void pax_randomize_kstack(void)
15594 {
15595 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
15596 - sp -= get_random_int() % 8192;
15597 - return sp & ~0xf;
15598 -}
15599 + struct thread_struct *thread = &current->thread;
15600 + unsigned long time;
15601
15602 -unsigned long arch_randomize_brk(struct mm_struct *mm)
15603 -{
15604 - unsigned long range_end = mm->brk + 0x02000000;
15605 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
15606 + if (!randomize_va_space)
15607 + return;
15608 +
15609 + rdtscl(time);
15610 +
15611 + /* P4 seems to return a 0 LSB, ignore it */
15612 +#ifdef CONFIG_MPENTIUM4
15613 + time &= 0x3EUL;
15614 + time <<= 2;
15615 +#elif defined(CONFIG_X86_64)
15616 + time &= 0xFUL;
15617 + time <<= 4;
15618 +#else
15619 + time &= 0x1FUL;
15620 + time <<= 3;
15621 +#endif
15622 +
15623 + thread->sp0 ^= time;
15624 + load_sp0(init_tss + smp_processor_id(), thread);
15625 +
15626 +#ifdef CONFIG_X86_64
15627 + percpu_write(kernel_stack, thread->sp0);
15628 +#endif
15629 }
15630 +#endif
15631
15632 diff -urNp linux-2.6.32.41/arch/x86/kernel/ptrace.c linux-2.6.32.41/arch/x86/kernel/ptrace.c
15633 --- linux-2.6.32.41/arch/x86/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
15634 +++ linux-2.6.32.41/arch/x86/kernel/ptrace.c 2011-04-17 15:56:46.000000000 -0400
15635 @@ -925,7 +925,7 @@ static const struct user_regset_view use
15636 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
15637 {
15638 int ret;
15639 - unsigned long __user *datap = (unsigned long __user *)data;
15640 + unsigned long __user *datap = (__force unsigned long __user *)data;
15641
15642 switch (request) {
15643 /* read the word at location addr in the USER area. */
15644 @@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *chi
15645 if (addr < 0)
15646 return -EIO;
15647 ret = do_get_thread_area(child, addr,
15648 - (struct user_desc __user *) data);
15649 + (__force struct user_desc __user *) data);
15650 break;
15651
15652 case PTRACE_SET_THREAD_AREA:
15653 if (addr < 0)
15654 return -EIO;
15655 ret = do_set_thread_area(child, addr,
15656 - (struct user_desc __user *) data, 0);
15657 + (__force struct user_desc __user *) data, 0);
15658 break;
15659 #endif
15660
15661 @@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *chi
15662 #ifdef CONFIG_X86_PTRACE_BTS
15663 case PTRACE_BTS_CONFIG:
15664 ret = ptrace_bts_config
15665 - (child, data, (struct ptrace_bts_config __user *)addr);
15666 + (child, data, (__force struct ptrace_bts_config __user *)addr);
15667 break;
15668
15669 case PTRACE_BTS_STATUS:
15670 ret = ptrace_bts_status
15671 - (child, data, (struct ptrace_bts_config __user *)addr);
15672 + (child, data, (__force struct ptrace_bts_config __user *)addr);
15673 break;
15674
15675 case PTRACE_BTS_SIZE:
15676 @@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *chi
15677
15678 case PTRACE_BTS_GET:
15679 ret = ptrace_bts_read_record
15680 - (child, data, (struct bts_struct __user *) addr);
15681 + (child, data, (__force struct bts_struct __user *) addr);
15682 break;
15683
15684 case PTRACE_BTS_CLEAR:
15685 @@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *chi
15686
15687 case PTRACE_BTS_DRAIN:
15688 ret = ptrace_bts_drain
15689 - (child, data, (struct bts_struct __user *) addr);
15690 + (child, data, (__force struct bts_struct __user *) addr);
15691 break;
15692 #endif /* CONFIG_X86_PTRACE_BTS */
15693
15694 @@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *ts
15695 info.si_code = si_code;
15696
15697 /* User-mode ip? */
15698 - info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
15699 + info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
15700
15701 /* Send us the fake SIGTRAP */
15702 force_sig_info(SIGTRAP, &info, tsk);
15703 @@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *ts
15704 * We must return the syscall number to actually look up in the table.
15705 * This can be -1L to skip running any syscall at all.
15706 */
15707 -asmregparm long syscall_trace_enter(struct pt_regs *regs)
15708 +long syscall_trace_enter(struct pt_regs *regs)
15709 {
15710 long ret = 0;
15711
15712 @@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(stru
15713 return ret ?: regs->orig_ax;
15714 }
15715
15716 -asmregparm void syscall_trace_leave(struct pt_regs *regs)
15717 +void syscall_trace_leave(struct pt_regs *regs)
15718 {
15719 if (unlikely(current->audit_context))
15720 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
15721 diff -urNp linux-2.6.32.41/arch/x86/kernel/reboot.c linux-2.6.32.41/arch/x86/kernel/reboot.c
15722 --- linux-2.6.32.41/arch/x86/kernel/reboot.c 2011-03-27 14:31:47.000000000 -0400
15723 +++ linux-2.6.32.41/arch/x86/kernel/reboot.c 2011-05-22 23:02:03.000000000 -0400
15724 @@ -33,7 +33,7 @@ void (*pm_power_off)(void);
15725 EXPORT_SYMBOL(pm_power_off);
15726
15727 static const struct desc_ptr no_idt = {};
15728 -static int reboot_mode;
15729 +static unsigned short reboot_mode;
15730 enum reboot_type reboot_type = BOOT_KBD;
15731 int reboot_force;
15732
15733 @@ -292,12 +292,12 @@ core_initcall(reboot_init);
15734 controller to pulse the CPU reset line, which is more thorough, but
15735 doesn't work with at least one type of 486 motherboard. It is easy
15736 to stop this code working; hence the copious comments. */
15737 -static const unsigned long long
15738 -real_mode_gdt_entries [3] =
15739 +static struct desc_struct
15740 +real_mode_gdt_entries [3] __read_only =
15741 {
15742 - 0x0000000000000000ULL, /* Null descriptor */
15743 - 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
15744 - 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
15745 + GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
15746 + GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
15747 + GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
15748 };
15749
15750 static const struct desc_ptr
15751 @@ -346,7 +346,7 @@ static const unsigned char jump_to_bios
15752 * specified by the code and length parameters.
15753 * We assume that length will aways be less that 100!
15754 */
15755 -void machine_real_restart(const unsigned char *code, int length)
15756 +__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
15757 {
15758 local_irq_disable();
15759
15760 @@ -366,8 +366,8 @@ void machine_real_restart(const unsigned
15761 /* Remap the kernel at virtual address zero, as well as offset zero
15762 from the kernel segment. This assumes the kernel segment starts at
15763 virtual address PAGE_OFFSET. */
15764 - memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
15765 - sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
15766 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
15767 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
15768
15769 /*
15770 * Use `swapper_pg_dir' as our page directory.
15771 @@ -379,16 +379,15 @@ void machine_real_restart(const unsigned
15772 boot)". This seems like a fairly standard thing that gets set by
15773 REBOOT.COM programs, and the previous reset routine did this
15774 too. */
15775 - *((unsigned short *)0x472) = reboot_mode;
15776 + *(unsigned short *)(__va(0x472)) = reboot_mode;
15777
15778 /* For the switch to real mode, copy some code to low memory. It has
15779 to be in the first 64k because it is running in 16-bit mode, and it
15780 has to have the same physical and virtual address, because it turns
15781 off paging. Copy it near the end of the first page, out of the way
15782 of BIOS variables. */
15783 - memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
15784 - real_mode_switch, sizeof (real_mode_switch));
15785 - memcpy((void *)(0x1000 - 100), code, length);
15786 + memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
15787 + memcpy(__va(0x1000 - 100), code, length);
15788
15789 /* Set up the IDT for real mode. */
15790 load_idt(&real_mode_idt);
15791 @@ -416,6 +415,7 @@ void machine_real_restart(const unsigned
15792 __asm__ __volatile__ ("ljmp $0x0008,%0"
15793 :
15794 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
15795 + do { } while (1);
15796 }
15797 #ifdef CONFIG_APM_MODULE
15798 EXPORT_SYMBOL(machine_real_restart);
15799 @@ -536,7 +536,7 @@ void __attribute__((weak)) mach_reboot_f
15800 {
15801 }
15802
15803 -static void native_machine_emergency_restart(void)
15804 +__noreturn static void native_machine_emergency_restart(void)
15805 {
15806 int i;
15807
15808 @@ -651,13 +651,13 @@ void native_machine_shutdown(void)
15809 #endif
15810 }
15811
15812 -static void __machine_emergency_restart(int emergency)
15813 +static __noreturn void __machine_emergency_restart(int emergency)
15814 {
15815 reboot_emergency = emergency;
15816 machine_ops.emergency_restart();
15817 }
15818
15819 -static void native_machine_restart(char *__unused)
15820 +static __noreturn void native_machine_restart(char *__unused)
15821 {
15822 printk("machine restart\n");
15823
15824 @@ -666,7 +666,7 @@ static void native_machine_restart(char
15825 __machine_emergency_restart(0);
15826 }
15827
15828 -static void native_machine_halt(void)
15829 +static __noreturn void native_machine_halt(void)
15830 {
15831 /* stop other cpus and apics */
15832 machine_shutdown();
15833 @@ -677,7 +677,7 @@ static void native_machine_halt(void)
15834 stop_this_cpu(NULL);
15835 }
15836
15837 -static void native_machine_power_off(void)
15838 +__noreturn static void native_machine_power_off(void)
15839 {
15840 if (pm_power_off) {
15841 if (!reboot_force)
15842 @@ -686,6 +686,7 @@ static void native_machine_power_off(voi
15843 }
15844 /* a fallback in case there is no PM info available */
15845 tboot_shutdown(TB_SHUTDOWN_HALT);
15846 + do { } while (1);
15847 }
15848
15849 struct machine_ops machine_ops = {
15850 diff -urNp linux-2.6.32.41/arch/x86/kernel/setup.c linux-2.6.32.41/arch/x86/kernel/setup.c
15851 --- linux-2.6.32.41/arch/x86/kernel/setup.c 2011-04-17 17:00:52.000000000 -0400
15852 +++ linux-2.6.32.41/arch/x86/kernel/setup.c 2011-04-17 17:03:05.000000000 -0400
15853 @@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
15854
15855 if (!boot_params.hdr.root_flags)
15856 root_mountflags &= ~MS_RDONLY;
15857 - init_mm.start_code = (unsigned long) _text;
15858 - init_mm.end_code = (unsigned long) _etext;
15859 + init_mm.start_code = ktla_ktva((unsigned long) _text);
15860 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
15861 init_mm.end_data = (unsigned long) _edata;
15862 init_mm.brk = _brk_end;
15863
15864 - code_resource.start = virt_to_phys(_text);
15865 - code_resource.end = virt_to_phys(_etext)-1;
15866 - data_resource.start = virt_to_phys(_etext);
15867 + code_resource.start = virt_to_phys(ktla_ktva(_text));
15868 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
15869 + data_resource.start = virt_to_phys(_sdata);
15870 data_resource.end = virt_to_phys(_edata)-1;
15871 bss_resource.start = virt_to_phys(&__bss_start);
15872 bss_resource.end = virt_to_phys(&__bss_stop)-1;
15873 diff -urNp linux-2.6.32.41/arch/x86/kernel/setup_percpu.c linux-2.6.32.41/arch/x86/kernel/setup_percpu.c
15874 --- linux-2.6.32.41/arch/x86/kernel/setup_percpu.c 2011-03-27 14:31:47.000000000 -0400
15875 +++ linux-2.6.32.41/arch/x86/kernel/setup_percpu.c 2011-06-04 20:36:29.000000000 -0400
15876 @@ -25,19 +25,17 @@
15877 # define DBG(x...)
15878 #endif
15879
15880 -DEFINE_PER_CPU(int, cpu_number);
15881 +#ifdef CONFIG_SMP
15882 +DEFINE_PER_CPU(unsigned int, cpu_number);
15883 EXPORT_PER_CPU_SYMBOL(cpu_number);
15884 +#endif
15885
15886 -#ifdef CONFIG_X86_64
15887 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
15888 -#else
15889 -#define BOOT_PERCPU_OFFSET 0
15890 -#endif
15891
15892 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
15893 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
15894
15895 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
15896 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
15897 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
15898 };
15899 EXPORT_SYMBOL(__per_cpu_offset);
15900 @@ -159,10 +157,10 @@ static inline void setup_percpu_segment(
15901 {
15902 #ifdef CONFIG_X86_32
15903 struct desc_struct gdt;
15904 + unsigned long base = per_cpu_offset(cpu);
15905
15906 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
15907 - 0x2 | DESCTYPE_S, 0x8);
15908 - gdt.s = 1;
15909 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
15910 + 0x83 | DESCTYPE_S, 0xC);
15911 write_gdt_entry(get_cpu_gdt_table(cpu),
15912 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
15913 #endif
15914 @@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
15915 /* alrighty, percpu areas up and running */
15916 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
15917 for_each_possible_cpu(cpu) {
15918 +#ifdef CONFIG_CC_STACKPROTECTOR
15919 +#ifdef CONFIG_X86_32
15920 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
15921 +#endif
15922 +#endif
15923 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
15924 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
15925 per_cpu(cpu_number, cpu) = cpu;
15926 @@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
15927 early_per_cpu_map(x86_cpu_to_node_map, cpu);
15928 #endif
15929 #endif
15930 +#ifdef CONFIG_CC_STACKPROTECTOR
15931 +#ifdef CONFIG_X86_32
15932 + if (!cpu)
15933 + per_cpu(stack_canary.canary, cpu) = canary;
15934 +#endif
15935 +#endif
15936 /*
15937 * Up to this point, the boot CPU has been using .data.init
15938 * area. Reload any changed state for the boot CPU.
15939 diff -urNp linux-2.6.32.41/arch/x86/kernel/signal.c linux-2.6.32.41/arch/x86/kernel/signal.c
15940 --- linux-2.6.32.41/arch/x86/kernel/signal.c 2011-03-27 14:31:47.000000000 -0400
15941 +++ linux-2.6.32.41/arch/x86/kernel/signal.c 2011-05-22 23:02:03.000000000 -0400
15942 @@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsi
15943 * Align the stack pointer according to the i386 ABI,
15944 * i.e. so that on function entry ((sp + 4) & 15) == 0.
15945 */
15946 - sp = ((sp + 4) & -16ul) - 4;
15947 + sp = ((sp - 12) & -16ul) - 4;
15948 #else /* !CONFIG_X86_32 */
15949 sp = round_down(sp, 16) - 8;
15950 #endif
15951 @@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, str
15952 * Return an always-bogus address instead so we will die with SIGSEGV.
15953 */
15954 if (onsigstack && !likely(on_sig_stack(sp)))
15955 - return (void __user *)-1L;
15956 + return (__force void __user *)-1L;
15957
15958 /* save i387 state */
15959 if (used_math() && save_i387_xstate(*fpstate) < 0)
15960 - return (void __user *)-1L;
15961 + return (__force void __user *)-1L;
15962
15963 return (void __user *)sp;
15964 }
15965 @@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigactio
15966 }
15967
15968 if (current->mm->context.vdso)
15969 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
15970 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
15971 else
15972 - restorer = &frame->retcode;
15973 + restorer = (void __user *)&frame->retcode;
15974 if (ka->sa.sa_flags & SA_RESTORER)
15975 restorer = ka->sa.sa_restorer;
15976
15977 @@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigactio
15978 * reasons and because gdb uses it as a signature to notice
15979 * signal handler stack frames.
15980 */
15981 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
15982 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
15983
15984 if (err)
15985 return -EFAULT;
15986 @@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, str
15987 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
15988
15989 /* Set up to return from userspace. */
15990 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
15991 + if (current->mm->context.vdso)
15992 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
15993 + else
15994 + restorer = (void __user *)&frame->retcode;
15995 if (ka->sa.sa_flags & SA_RESTORER)
15996 restorer = ka->sa.sa_restorer;
15997 put_user_ex(restorer, &frame->pretcode);
15998 @@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, str
15999 * reasons and because gdb uses it as a signature to notice
16000 * signal handler stack frames.
16001 */
16002 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
16003 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
16004 } put_user_catch(err);
16005
16006 if (err)
16007 @@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *re
16008 int signr;
16009 sigset_t *oldset;
16010
16011 + pax_track_stack();
16012 +
16013 /*
16014 * We want the common case to go fast, which is why we may in certain
16015 * cases get here from kernel mode. Just return without doing anything
16016 @@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *re
16017 * X86_32: vm86 regs switched out by assembly code before reaching
16018 * here, so testing against kernel CS suffices.
16019 */
16020 - if (!user_mode(regs))
16021 + if (!user_mode_novm(regs))
16022 return;
16023
16024 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
16025 diff -urNp linux-2.6.32.41/arch/x86/kernel/smpboot.c linux-2.6.32.41/arch/x86/kernel/smpboot.c
16026 --- linux-2.6.32.41/arch/x86/kernel/smpboot.c 2011-03-27 14:31:47.000000000 -0400
16027 +++ linux-2.6.32.41/arch/x86/kernel/smpboot.c 2011-05-11 18:25:15.000000000 -0400
16028 @@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct
16029 */
16030 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
16031
16032 -void cpu_hotplug_driver_lock()
16033 +void cpu_hotplug_driver_lock(void)
16034 {
16035 - mutex_lock(&x86_cpu_hotplug_driver_mutex);
16036 + mutex_lock(&x86_cpu_hotplug_driver_mutex);
16037 }
16038
16039 -void cpu_hotplug_driver_unlock()
16040 +void cpu_hotplug_driver_unlock(void)
16041 {
16042 - mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16043 + mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16044 }
16045
16046 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
16047 @@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int api
16048 set_idle_for_cpu(cpu, c_idle.idle);
16049 do_rest:
16050 per_cpu(current_task, cpu) = c_idle.idle;
16051 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
16052 #ifdef CONFIG_X86_32
16053 /* Stack for startup_32 can be just as for start_secondary onwards */
16054 irq_ctx_init(cpu);
16055 @@ -750,11 +751,13 @@ do_rest:
16056 #else
16057 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
16058 initial_gs = per_cpu_offset(cpu);
16059 - per_cpu(kernel_stack, cpu) =
16060 - (unsigned long)task_stack_page(c_idle.idle) -
16061 - KERNEL_STACK_OFFSET + THREAD_SIZE;
16062 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
16063 #endif
16064 +
16065 + pax_open_kernel();
16066 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
16067 + pax_close_kernel();
16068 +
16069 initial_code = (unsigned long)start_secondary;
16070 stack_start.sp = (void *) c_idle.idle->thread.sp;
16071
16072 @@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int
16073
16074 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
16075
16076 +#ifdef CONFIG_PAX_PER_CPU_PGD
16077 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
16078 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16079 + KERNEL_PGD_PTRS);
16080 +#endif
16081 +
16082 err = do_boot_cpu(apicid, cpu);
16083
16084 if (err) {
16085 diff -urNp linux-2.6.32.41/arch/x86/kernel/step.c linux-2.6.32.41/arch/x86/kernel/step.c
16086 --- linux-2.6.32.41/arch/x86/kernel/step.c 2011-03-27 14:31:47.000000000 -0400
16087 +++ linux-2.6.32.41/arch/x86/kernel/step.c 2011-04-17 15:56:46.000000000 -0400
16088 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
16089 struct desc_struct *desc;
16090 unsigned long base;
16091
16092 - seg &= ~7UL;
16093 + seg >>= 3;
16094
16095 mutex_lock(&child->mm->context.lock);
16096 - if (unlikely((seg >> 3) >= child->mm->context.size))
16097 + if (unlikely(seg >= child->mm->context.size))
16098 addr = -1L; /* bogus selector, access would fault */
16099 else {
16100 desc = child->mm->context.ldt + seg;
16101 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
16102 addr += base;
16103 }
16104 mutex_unlock(&child->mm->context.lock);
16105 - }
16106 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
16107 + addr = ktla_ktva(addr);
16108
16109 return addr;
16110 }
16111 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
16112 unsigned char opcode[15];
16113 unsigned long addr = convert_ip_to_linear(child, regs);
16114
16115 + if (addr == -EINVAL)
16116 + return 0;
16117 +
16118 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
16119 for (i = 0; i < copied; i++) {
16120 switch (opcode[i]) {
16121 @@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
16122
16123 #ifdef CONFIG_X86_64
16124 case 0x40 ... 0x4f:
16125 - if (regs->cs != __USER_CS)
16126 + if ((regs->cs & 0xffff) != __USER_CS)
16127 /* 32-bit mode: register increment */
16128 return 0;
16129 /* 64-bit mode: REX prefix */
16130 diff -urNp linux-2.6.32.41/arch/x86/kernel/syscall_table_32.S linux-2.6.32.41/arch/x86/kernel/syscall_table_32.S
16131 --- linux-2.6.32.41/arch/x86/kernel/syscall_table_32.S 2011-03-27 14:31:47.000000000 -0400
16132 +++ linux-2.6.32.41/arch/x86/kernel/syscall_table_32.S 2011-04-17 15:56:46.000000000 -0400
16133 @@ -1,3 +1,4 @@
16134 +.section .rodata,"a",@progbits
16135 ENTRY(sys_call_table)
16136 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
16137 .long sys_exit
16138 diff -urNp linux-2.6.32.41/arch/x86/kernel/sys_i386_32.c linux-2.6.32.41/arch/x86/kernel/sys_i386_32.c
16139 --- linux-2.6.32.41/arch/x86/kernel/sys_i386_32.c 2011-03-27 14:31:47.000000000 -0400
16140 +++ linux-2.6.32.41/arch/x86/kernel/sys_i386_32.c 2011-04-17 15:56:46.000000000 -0400
16141 @@ -24,6 +24,21 @@
16142
16143 #include <asm/syscalls.h>
16144
16145 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
16146 +{
16147 + unsigned long pax_task_size = TASK_SIZE;
16148 +
16149 +#ifdef CONFIG_PAX_SEGMEXEC
16150 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
16151 + pax_task_size = SEGMEXEC_TASK_SIZE;
16152 +#endif
16153 +
16154 + if (len > pax_task_size || addr > pax_task_size - len)
16155 + return -EINVAL;
16156 +
16157 + return 0;
16158 +}
16159 +
16160 /*
16161 * Perform the select(nd, in, out, ex, tv) and mmap() system
16162 * calls. Linux/i386 didn't use to be able to handle more than
16163 @@ -58,6 +73,212 @@ out:
16164 return err;
16165 }
16166
16167 +unsigned long
16168 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
16169 + unsigned long len, unsigned long pgoff, unsigned long flags)
16170 +{
16171 + struct mm_struct *mm = current->mm;
16172 + struct vm_area_struct *vma;
16173 + unsigned long start_addr, pax_task_size = TASK_SIZE;
16174 +
16175 +#ifdef CONFIG_PAX_SEGMEXEC
16176 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
16177 + pax_task_size = SEGMEXEC_TASK_SIZE;
16178 +#endif
16179 +
16180 + pax_task_size -= PAGE_SIZE;
16181 +
16182 + if (len > pax_task_size)
16183 + return -ENOMEM;
16184 +
16185 + if (flags & MAP_FIXED)
16186 + return addr;
16187 +
16188 +#ifdef CONFIG_PAX_RANDMMAP
16189 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16190 +#endif
16191 +
16192 + if (addr) {
16193 + addr = PAGE_ALIGN(addr);
16194 + if (pax_task_size - len >= addr) {
16195 + vma = find_vma(mm, addr);
16196 + if (check_heap_stack_gap(vma, addr, len))
16197 + return addr;
16198 + }
16199 + }
16200 + if (len > mm->cached_hole_size) {
16201 + start_addr = addr = mm->free_area_cache;
16202 + } else {
16203 + start_addr = addr = mm->mmap_base;
16204 + mm->cached_hole_size = 0;
16205 + }
16206 +
16207 +#ifdef CONFIG_PAX_PAGEEXEC
16208 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
16209 + start_addr = 0x00110000UL;
16210 +
16211 +#ifdef CONFIG_PAX_RANDMMAP
16212 + if (mm->pax_flags & MF_PAX_RANDMMAP)
16213 + start_addr += mm->delta_mmap & 0x03FFF000UL;
16214 +#endif
16215 +
16216 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
16217 + start_addr = addr = mm->mmap_base;
16218 + else
16219 + addr = start_addr;
16220 + }
16221 +#endif
16222 +
16223 +full_search:
16224 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
16225 + /* At this point: (!vma || addr < vma->vm_end). */
16226 + if (pax_task_size - len < addr) {
16227 + /*
16228 + * Start a new search - just in case we missed
16229 + * some holes.
16230 + */
16231 + if (start_addr != mm->mmap_base) {
16232 + start_addr = addr = mm->mmap_base;
16233 + mm->cached_hole_size = 0;
16234 + goto full_search;
16235 + }
16236 + return -ENOMEM;
16237 + }
16238 + if (check_heap_stack_gap(vma, addr, len))
16239 + break;
16240 + if (addr + mm->cached_hole_size < vma->vm_start)
16241 + mm->cached_hole_size = vma->vm_start - addr;
16242 + addr = vma->vm_end;
16243 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
16244 + start_addr = addr = mm->mmap_base;
16245 + mm->cached_hole_size = 0;
16246 + goto full_search;
16247 + }
16248 + }
16249 +
16250 + /*
16251 + * Remember the place where we stopped the search:
16252 + */
16253 + mm->free_area_cache = addr + len;
16254 + return addr;
16255 +}
16256 +
16257 +unsigned long
16258 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16259 + const unsigned long len, const unsigned long pgoff,
16260 + const unsigned long flags)
16261 +{
16262 + struct vm_area_struct *vma;
16263 + struct mm_struct *mm = current->mm;
16264 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
16265 +
16266 +#ifdef CONFIG_PAX_SEGMEXEC
16267 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
16268 + pax_task_size = SEGMEXEC_TASK_SIZE;
16269 +#endif
16270 +
16271 + pax_task_size -= PAGE_SIZE;
16272 +
16273 + /* requested length too big for entire address space */
16274 + if (len > pax_task_size)
16275 + return -ENOMEM;
16276 +
16277 + if (flags & MAP_FIXED)
16278 + return addr;
16279 +
16280 +#ifdef CONFIG_PAX_PAGEEXEC
16281 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
16282 + goto bottomup;
16283 +#endif
16284 +
16285 +#ifdef CONFIG_PAX_RANDMMAP
16286 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16287 +#endif
16288 +
16289 + /* requesting a specific address */
16290 + if (addr) {
16291 + addr = PAGE_ALIGN(addr);
16292 + if (pax_task_size - len >= addr) {
16293 + vma = find_vma(mm, addr);
16294 + if (check_heap_stack_gap(vma, addr, len))
16295 + return addr;
16296 + }
16297 + }
16298 +
16299 + /* check if free_area_cache is useful for us */
16300 + if (len <= mm->cached_hole_size) {
16301 + mm->cached_hole_size = 0;
16302 + mm->free_area_cache = mm->mmap_base;
16303 + }
16304 +
16305 + /* either no address requested or can't fit in requested address hole */
16306 + addr = mm->free_area_cache;
16307 +
16308 + /* make sure it can fit in the remaining address space */
16309 + if (addr > len) {
16310 + vma = find_vma(mm, addr-len);
16311 + if (check_heap_stack_gap(vma, addr - len, len))
16312 + /* remember the address as a hint for next time */
16313 + return (mm->free_area_cache = addr-len);
16314 + }
16315 +
16316 + if (mm->mmap_base < len)
16317 + goto bottomup;
16318 +
16319 + addr = mm->mmap_base-len;
16320 +
16321 + do {
16322 + /*
16323 + * Lookup failure means no vma is above this address,
16324 + * else if new region fits below vma->vm_start,
16325 + * return with success:
16326 + */
16327 + vma = find_vma(mm, addr);
16328 + if (check_heap_stack_gap(vma, addr, len))
16329 + /* remember the address as a hint for next time */
16330 + return (mm->free_area_cache = addr);
16331 +
16332 + /* remember the largest hole we saw so far */
16333 + if (addr + mm->cached_hole_size < vma->vm_start)
16334 + mm->cached_hole_size = vma->vm_start - addr;
16335 +
16336 + /* try just below the current vma->vm_start */
16337 + addr = skip_heap_stack_gap(vma, len);
16338 + } while (!IS_ERR_VALUE(addr));
16339 +
16340 +bottomup:
16341 + /*
16342 + * A failed mmap() very likely causes application failure,
16343 + * so fall back to the bottom-up function here. This scenario
16344 + * can happen with large stack limits and large mmap()
16345 + * allocations.
16346 + */
16347 +
16348 +#ifdef CONFIG_PAX_SEGMEXEC
16349 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
16350 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
16351 + else
16352 +#endif
16353 +
16354 + mm->mmap_base = TASK_UNMAPPED_BASE;
16355 +
16356 +#ifdef CONFIG_PAX_RANDMMAP
16357 + if (mm->pax_flags & MF_PAX_RANDMMAP)
16358 + mm->mmap_base += mm->delta_mmap;
16359 +#endif
16360 +
16361 + mm->free_area_cache = mm->mmap_base;
16362 + mm->cached_hole_size = ~0UL;
16363 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
16364 + /*
16365 + * Restore the topdown base:
16366 + */
16367 + mm->mmap_base = base;
16368 + mm->free_area_cache = base;
16369 + mm->cached_hole_size = ~0UL;
16370 +
16371 + return addr;
16372 +}
16373
16374 struct sel_arg_struct {
16375 unsigned long n;
16376 @@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int fi
16377 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
16378 case SEMTIMEDOP:
16379 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
16380 - (const struct timespec __user *)fifth);
16381 + (__force const struct timespec __user *)fifth);
16382
16383 case SEMGET:
16384 return sys_semget(first, second, third);
16385 @@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int fi
16386 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
16387 if (ret)
16388 return ret;
16389 - return put_user(raddr, (ulong __user *) third);
16390 + return put_user(raddr, (__force ulong __user *) third);
16391 }
16392 case 1: /* iBCS2 emulator entry point */
16393 if (!segment_eq(get_fs(), get_ds()))
16394 @@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldol
16395
16396 return error;
16397 }
16398 -
16399 -
16400 -/*
16401 - * Do a system call from kernel instead of calling sys_execve so we
16402 - * end up with proper pt_regs.
16403 - */
16404 -int kernel_execve(const char *filename, char *const argv[], char *const envp[])
16405 -{
16406 - long __res;
16407 - asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
16408 - : "=a" (__res)
16409 - : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
16410 - return __res;
16411 -}
16412 diff -urNp linux-2.6.32.41/arch/x86/kernel/sys_x86_64.c linux-2.6.32.41/arch/x86/kernel/sys_x86_64.c
16413 --- linux-2.6.32.41/arch/x86/kernel/sys_x86_64.c 2011-03-27 14:31:47.000000000 -0400
16414 +++ linux-2.6.32.41/arch/x86/kernel/sys_x86_64.c 2011-04-17 15:56:46.000000000 -0400
16415 @@ -32,8 +32,8 @@ out:
16416 return error;
16417 }
16418
16419 -static void find_start_end(unsigned long flags, unsigned long *begin,
16420 - unsigned long *end)
16421 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
16422 + unsigned long *begin, unsigned long *end)
16423 {
16424 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
16425 unsigned long new_begin;
16426 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long
16427 *begin = new_begin;
16428 }
16429 } else {
16430 - *begin = TASK_UNMAPPED_BASE;
16431 + *begin = mm->mmap_base;
16432 *end = TASK_SIZE;
16433 }
16434 }
16435 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
16436 if (flags & MAP_FIXED)
16437 return addr;
16438
16439 - find_start_end(flags, &begin, &end);
16440 + find_start_end(mm, flags, &begin, &end);
16441
16442 if (len > end)
16443 return -ENOMEM;
16444
16445 +#ifdef CONFIG_PAX_RANDMMAP
16446 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16447 +#endif
16448 +
16449 if (addr) {
16450 addr = PAGE_ALIGN(addr);
16451 vma = find_vma(mm, addr);
16452 - if (end - len >= addr &&
16453 - (!vma || addr + len <= vma->vm_start))
16454 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
16455 return addr;
16456 }
16457 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
16458 @@ -106,7 +109,7 @@ full_search:
16459 }
16460 return -ENOMEM;
16461 }
16462 - if (!vma || addr + len <= vma->vm_start) {
16463 + if (check_heap_stack_gap(vma, addr, len)) {
16464 /*
16465 * Remember the place where we stopped the search:
16466 */
16467 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
16468 {
16469 struct vm_area_struct *vma;
16470 struct mm_struct *mm = current->mm;
16471 - unsigned long addr = addr0;
16472 + unsigned long base = mm->mmap_base, addr = addr0;
16473
16474 /* requested length too big for entire address space */
16475 if (len > TASK_SIZE)
16476 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
16477 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
16478 goto bottomup;
16479
16480 +#ifdef CONFIG_PAX_RANDMMAP
16481 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16482 +#endif
16483 +
16484 /* requesting a specific address */
16485 if (addr) {
16486 addr = PAGE_ALIGN(addr);
16487 - vma = find_vma(mm, addr);
16488 - if (TASK_SIZE - len >= addr &&
16489 - (!vma || addr + len <= vma->vm_start))
16490 - return addr;
16491 + if (TASK_SIZE - len >= addr) {
16492 + vma = find_vma(mm, addr);
16493 + if (check_heap_stack_gap(vma, addr, len))
16494 + return addr;
16495 + }
16496 }
16497
16498 /* check if free_area_cache is useful for us */
16499 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
16500 /* make sure it can fit in the remaining address space */
16501 if (addr > len) {
16502 vma = find_vma(mm, addr-len);
16503 - if (!vma || addr <= vma->vm_start)
16504 + if (check_heap_stack_gap(vma, addr - len, len))
16505 /* remember the address as a hint for next time */
16506 return mm->free_area_cache = addr-len;
16507 }
16508 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
16509 * return with success:
16510 */
16511 vma = find_vma(mm, addr);
16512 - if (!vma || addr+len <= vma->vm_start)
16513 + if (check_heap_stack_gap(vma, addr, len))
16514 /* remember the address as a hint for next time */
16515 return mm->free_area_cache = addr;
16516
16517 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
16518 mm->cached_hole_size = vma->vm_start - addr;
16519
16520 /* try just below the current vma->vm_start */
16521 - addr = vma->vm_start-len;
16522 - } while (len < vma->vm_start);
16523 + addr = skip_heap_stack_gap(vma, len);
16524 + } while (!IS_ERR_VALUE(addr));
16525
16526 bottomup:
16527 /*
16528 @@ -198,13 +206,21 @@ bottomup:
16529 * can happen with large stack limits and large mmap()
16530 * allocations.
16531 */
16532 + mm->mmap_base = TASK_UNMAPPED_BASE;
16533 +
16534 +#ifdef CONFIG_PAX_RANDMMAP
16535 + if (mm->pax_flags & MF_PAX_RANDMMAP)
16536 + mm->mmap_base += mm->delta_mmap;
16537 +#endif
16538 +
16539 + mm->free_area_cache = mm->mmap_base;
16540 mm->cached_hole_size = ~0UL;
16541 - mm->free_area_cache = TASK_UNMAPPED_BASE;
16542 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
16543 /*
16544 * Restore the topdown base:
16545 */
16546 - mm->free_area_cache = mm->mmap_base;
16547 + mm->mmap_base = base;
16548 + mm->free_area_cache = base;
16549 mm->cached_hole_size = ~0UL;
16550
16551 return addr;
16552 diff -urNp linux-2.6.32.41/arch/x86/kernel/tboot.c linux-2.6.32.41/arch/x86/kernel/tboot.c
16553 --- linux-2.6.32.41/arch/x86/kernel/tboot.c 2011-03-27 14:31:47.000000000 -0400
16554 +++ linux-2.6.32.41/arch/x86/kernel/tboot.c 2011-05-22 23:02:03.000000000 -0400
16555 @@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
16556
16557 void tboot_shutdown(u32 shutdown_type)
16558 {
16559 - void (*shutdown)(void);
16560 + void (* __noreturn shutdown)(void);
16561
16562 if (!tboot_enabled())
16563 return;
16564 @@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
16565
16566 switch_to_tboot_pt();
16567
16568 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
16569 + shutdown = (void *)tboot->shutdown_entry;
16570 shutdown();
16571
16572 /* should not reach here */
16573 @@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
16574 tboot_shutdown(acpi_shutdown_map[sleep_state]);
16575 }
16576
16577 -static atomic_t ap_wfs_count;
16578 +static atomic_unchecked_t ap_wfs_count;
16579
16580 static int tboot_wait_for_aps(int num_aps)
16581 {
16582 @@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(
16583 {
16584 switch (action) {
16585 case CPU_DYING:
16586 - atomic_inc(&ap_wfs_count);
16587 + atomic_inc_unchecked(&ap_wfs_count);
16588 if (num_online_cpus() == 1)
16589 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
16590 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
16591 return NOTIFY_BAD;
16592 break;
16593 }
16594 @@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
16595
16596 tboot_create_trampoline();
16597
16598 - atomic_set(&ap_wfs_count, 0);
16599 + atomic_set_unchecked(&ap_wfs_count, 0);
16600 register_hotcpu_notifier(&tboot_cpu_notifier);
16601 return 0;
16602 }
16603 diff -urNp linux-2.6.32.41/arch/x86/kernel/time.c linux-2.6.32.41/arch/x86/kernel/time.c
16604 --- linux-2.6.32.41/arch/x86/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
16605 +++ linux-2.6.32.41/arch/x86/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
16606 @@ -26,17 +26,13 @@
16607 int timer_ack;
16608 #endif
16609
16610 -#ifdef CONFIG_X86_64
16611 -volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
16612 -#endif
16613 -
16614 unsigned long profile_pc(struct pt_regs *regs)
16615 {
16616 unsigned long pc = instruction_pointer(regs);
16617
16618 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
16619 + if (!user_mode(regs) && in_lock_functions(pc)) {
16620 #ifdef CONFIG_FRAME_POINTER
16621 - return *(unsigned long *)(regs->bp + sizeof(long));
16622 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
16623 #else
16624 unsigned long *sp =
16625 (unsigned long *)kernel_stack_pointer(regs);
16626 @@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
16627 * or above a saved flags. Eflags has bits 22-31 zero,
16628 * kernel addresses don't.
16629 */
16630 +
16631 +#ifdef CONFIG_PAX_KERNEXEC
16632 + return ktla_ktva(sp[0]);
16633 +#else
16634 if (sp[0] >> 22)
16635 return sp[0];
16636 if (sp[1] >> 22)
16637 return sp[1];
16638 #endif
16639 +
16640 +#endif
16641 }
16642 return pc;
16643 }
16644 diff -urNp linux-2.6.32.41/arch/x86/kernel/tls.c linux-2.6.32.41/arch/x86/kernel/tls.c
16645 --- linux-2.6.32.41/arch/x86/kernel/tls.c 2011-03-27 14:31:47.000000000 -0400
16646 +++ linux-2.6.32.41/arch/x86/kernel/tls.c 2011-04-17 15:56:46.000000000 -0400
16647 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
16648 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
16649 return -EINVAL;
16650
16651 +#ifdef CONFIG_PAX_SEGMEXEC
16652 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
16653 + return -EINVAL;
16654 +#endif
16655 +
16656 set_tls_desc(p, idx, &info, 1);
16657
16658 return 0;
16659 diff -urNp linux-2.6.32.41/arch/x86/kernel/trampoline_32.S linux-2.6.32.41/arch/x86/kernel/trampoline_32.S
16660 --- linux-2.6.32.41/arch/x86/kernel/trampoline_32.S 2011-03-27 14:31:47.000000000 -0400
16661 +++ linux-2.6.32.41/arch/x86/kernel/trampoline_32.S 2011-04-17 15:56:46.000000000 -0400
16662 @@ -32,6 +32,12 @@
16663 #include <asm/segment.h>
16664 #include <asm/page_types.h>
16665
16666 +#ifdef CONFIG_PAX_KERNEXEC
16667 +#define ta(X) (X)
16668 +#else
16669 +#define ta(X) ((X) - __PAGE_OFFSET)
16670 +#endif
16671 +
16672 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
16673 __CPUINITRODATA
16674 .code16
16675 @@ -60,7 +66,7 @@ r_base = .
16676 inc %ax # protected mode (PE) bit
16677 lmsw %ax # into protected mode
16678 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
16679 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
16680 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
16681
16682 # These need to be in the same 64K segment as the above;
16683 # hence we don't use the boot_gdt_descr defined in head.S
16684 diff -urNp linux-2.6.32.41/arch/x86/kernel/trampoline_64.S linux-2.6.32.41/arch/x86/kernel/trampoline_64.S
16685 --- linux-2.6.32.41/arch/x86/kernel/trampoline_64.S 2011-03-27 14:31:47.000000000 -0400
16686 +++ linux-2.6.32.41/arch/x86/kernel/trampoline_64.S 2011-04-17 15:56:46.000000000 -0400
16687 @@ -91,7 +91,7 @@ startup_32:
16688 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
16689 movl %eax, %ds
16690
16691 - movl $X86_CR4_PAE, %eax
16692 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
16693 movl %eax, %cr4 # Enable PAE mode
16694
16695 # Setup trampoline 4 level pagetables
16696 @@ -138,7 +138,7 @@ tidt:
16697 # so the kernel can live anywhere
16698 .balign 4
16699 tgdt:
16700 - .short tgdt_end - tgdt # gdt limit
16701 + .short tgdt_end - tgdt - 1 # gdt limit
16702 .long tgdt - r_base
16703 .short 0
16704 .quad 0x00cf9b000000ffff # __KERNEL32_CS
16705 diff -urNp linux-2.6.32.41/arch/x86/kernel/traps.c linux-2.6.32.41/arch/x86/kernel/traps.c
16706 --- linux-2.6.32.41/arch/x86/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
16707 +++ linux-2.6.32.41/arch/x86/kernel/traps.c 2011-04-17 15:56:46.000000000 -0400
16708 @@ -69,12 +69,6 @@ asmlinkage int system_call(void);
16709
16710 /* Do we ignore FPU interrupts ? */
16711 char ignore_fpu_irq;
16712 -
16713 -/*
16714 - * The IDT has to be page-aligned to simplify the Pentium
16715 - * F0 0F bug workaround.
16716 - */
16717 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
16718 #endif
16719
16720 DECLARE_BITMAP(used_vectors, NR_VECTORS);
16721 @@ -112,19 +106,19 @@ static inline void preempt_conditional_c
16722 static inline void
16723 die_if_kernel(const char *str, struct pt_regs *regs, long err)
16724 {
16725 - if (!user_mode_vm(regs))
16726 + if (!user_mode(regs))
16727 die(str, regs, err);
16728 }
16729 #endif
16730
16731 static void __kprobes
16732 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
16733 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
16734 long error_code, siginfo_t *info)
16735 {
16736 struct task_struct *tsk = current;
16737
16738 #ifdef CONFIG_X86_32
16739 - if (regs->flags & X86_VM_MASK) {
16740 + if (v8086_mode(regs)) {
16741 /*
16742 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
16743 * On nmi (interrupt 2), do_trap should not be called.
16744 @@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str
16745 }
16746 #endif
16747
16748 - if (!user_mode(regs))
16749 + if (!user_mode_novm(regs))
16750 goto kernel_trap;
16751
16752 #ifdef CONFIG_X86_32
16753 @@ -158,7 +152,7 @@ trap_signal:
16754 printk_ratelimit()) {
16755 printk(KERN_INFO
16756 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
16757 - tsk->comm, tsk->pid, str,
16758 + tsk->comm, task_pid_nr(tsk), str,
16759 regs->ip, regs->sp, error_code);
16760 print_vma_addr(" in ", regs->ip);
16761 printk("\n");
16762 @@ -175,8 +169,20 @@ kernel_trap:
16763 if (!fixup_exception(regs)) {
16764 tsk->thread.error_code = error_code;
16765 tsk->thread.trap_no = trapnr;
16766 +
16767 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16768 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
16769 + str = "PAX: suspicious stack segment fault";
16770 +#endif
16771 +
16772 die(str, regs, error_code);
16773 }
16774 +
16775 +#ifdef CONFIG_PAX_REFCOUNT
16776 + if (trapnr == 4)
16777 + pax_report_refcount_overflow(regs);
16778 +#endif
16779 +
16780 return;
16781
16782 #ifdef CONFIG_X86_32
16783 @@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *re
16784 conditional_sti(regs);
16785
16786 #ifdef CONFIG_X86_32
16787 - if (regs->flags & X86_VM_MASK)
16788 + if (v8086_mode(regs))
16789 goto gp_in_vm86;
16790 #endif
16791
16792 tsk = current;
16793 - if (!user_mode(regs))
16794 + if (!user_mode_novm(regs))
16795 goto gp_in_kernel;
16796
16797 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
16798 + if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
16799 + struct mm_struct *mm = tsk->mm;
16800 + unsigned long limit;
16801 +
16802 + down_write(&mm->mmap_sem);
16803 + limit = mm->context.user_cs_limit;
16804 + if (limit < TASK_SIZE) {
16805 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
16806 + up_write(&mm->mmap_sem);
16807 + return;
16808 + }
16809 + up_write(&mm->mmap_sem);
16810 + }
16811 +#endif
16812 +
16813 tsk->thread.error_code = error_code;
16814 tsk->thread.trap_no = 13;
16815
16816 @@ -305,6 +327,13 @@ gp_in_kernel:
16817 if (notify_die(DIE_GPF, "general protection fault", regs,
16818 error_code, 13, SIGSEGV) == NOTIFY_STOP)
16819 return;
16820 +
16821 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16822 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
16823 + die("PAX: suspicious general protection fault", regs, error_code);
16824 + else
16825 +#endif
16826 +
16827 die("general protection fault", regs, error_code);
16828 }
16829
16830 @@ -558,7 +587,7 @@ dotraplinkage void __kprobes do_debug(st
16831 }
16832
16833 #ifdef CONFIG_X86_32
16834 - if (regs->flags & X86_VM_MASK)
16835 + if (v8086_mode(regs))
16836 goto debug_vm86;
16837 #endif
16838
16839 @@ -570,7 +599,7 @@ dotraplinkage void __kprobes do_debug(st
16840 * kernel space (but re-enable TF when returning to user mode).
16841 */
16842 if (condition & DR_STEP) {
16843 - if (!user_mode(regs))
16844 + if (!user_mode_novm(regs))
16845 goto clear_TF_reenable;
16846 }
16847
16848 @@ -757,7 +786,7 @@ do_simd_coprocessor_error(struct pt_regs
16849 * Handle strange cache flush from user space exception
16850 * in all other cases. This is undocumented behaviour.
16851 */
16852 - if (regs->flags & X86_VM_MASK) {
16853 + if (v8086_mode(regs)) {
16854 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
16855 return;
16856 }
16857 @@ -798,7 +827,7 @@ asmlinkage void __attribute__((weak)) sm
16858 void __math_state_restore(void)
16859 {
16860 struct thread_info *thread = current_thread_info();
16861 - struct task_struct *tsk = thread->task;
16862 + struct task_struct *tsk = current;
16863
16864 /*
16865 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
16866 @@ -825,8 +854,7 @@ void __math_state_restore(void)
16867 */
16868 asmlinkage void math_state_restore(void)
16869 {
16870 - struct thread_info *thread = current_thread_info();
16871 - struct task_struct *tsk = thread->task;
16872 + struct task_struct *tsk = current;
16873
16874 if (!tsk_used_math(tsk)) {
16875 local_irq_enable();
16876 diff -urNp linux-2.6.32.41/arch/x86/kernel/vm86_32.c linux-2.6.32.41/arch/x86/kernel/vm86_32.c
16877 --- linux-2.6.32.41/arch/x86/kernel/vm86_32.c 2011-03-27 14:31:47.000000000 -0400
16878 +++ linux-2.6.32.41/arch/x86/kernel/vm86_32.c 2011-04-17 15:56:46.000000000 -0400
16879 @@ -41,6 +41,7 @@
16880 #include <linux/ptrace.h>
16881 #include <linux/audit.h>
16882 #include <linux/stddef.h>
16883 +#include <linux/grsecurity.h>
16884
16885 #include <asm/uaccess.h>
16886 #include <asm/io.h>
16887 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
16888 do_exit(SIGSEGV);
16889 }
16890
16891 - tss = &per_cpu(init_tss, get_cpu());
16892 + tss = init_tss + get_cpu();
16893 current->thread.sp0 = current->thread.saved_sp0;
16894 current->thread.sysenter_cs = __KERNEL_CS;
16895 load_sp0(tss, &current->thread);
16896 @@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
16897 struct task_struct *tsk;
16898 int tmp, ret = -EPERM;
16899
16900 +#ifdef CONFIG_GRKERNSEC_VM86
16901 + if (!capable(CAP_SYS_RAWIO)) {
16902 + gr_handle_vm86();
16903 + goto out;
16904 + }
16905 +#endif
16906 +
16907 tsk = current;
16908 if (tsk->thread.saved_sp0)
16909 goto out;
16910 @@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
16911 int tmp, ret;
16912 struct vm86plus_struct __user *v86;
16913
16914 +#ifdef CONFIG_GRKERNSEC_VM86
16915 + if (!capable(CAP_SYS_RAWIO)) {
16916 + gr_handle_vm86();
16917 + ret = -EPERM;
16918 + goto out;
16919 + }
16920 +#endif
16921 +
16922 tsk = current;
16923 switch (regs->bx) {
16924 case VM86_REQUEST_IRQ:
16925 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
16926 tsk->thread.saved_fs = info->regs32->fs;
16927 tsk->thread.saved_gs = get_user_gs(info->regs32);
16928
16929 - tss = &per_cpu(init_tss, get_cpu());
16930 + tss = init_tss + get_cpu();
16931 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
16932 if (cpu_has_sep)
16933 tsk->thread.sysenter_cs = 0;
16934 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
16935 goto cannot_handle;
16936 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
16937 goto cannot_handle;
16938 - intr_ptr = (unsigned long __user *) (i << 2);
16939 + intr_ptr = (__force unsigned long __user *) (i << 2);
16940 if (get_user(segoffs, intr_ptr))
16941 goto cannot_handle;
16942 if ((segoffs >> 16) == BIOSSEG)
16943 diff -urNp linux-2.6.32.41/arch/x86/kernel/vmi_32.c linux-2.6.32.41/arch/x86/kernel/vmi_32.c
16944 --- linux-2.6.32.41/arch/x86/kernel/vmi_32.c 2011-03-27 14:31:47.000000000 -0400
16945 +++ linux-2.6.32.41/arch/x86/kernel/vmi_32.c 2011-04-17 15:56:46.000000000 -0400
16946 @@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1)))
16947 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
16948
16949 #define call_vrom_func(rom,func) \
16950 - (((VROMFUNC *)(rom->func))())
16951 + (((VROMFUNC *)(ktva_ktla(rom.func)))())
16952
16953 #define call_vrom_long_func(rom,func,arg) \
16954 - (((VROMLONGFUNC *)(rom->func)) (arg))
16955 +({\
16956 + u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
16957 + struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
16958 + __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
16959 + __reloc;\
16960 +})
16961
16962 -static struct vrom_header *vmi_rom;
16963 +static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
16964 static int disable_pge;
16965 static int disable_pse;
16966 static int disable_sep;
16967 @@ -76,10 +81,10 @@ static struct {
16968 void (*set_initial_ap_state)(int, int);
16969 void (*halt)(void);
16970 void (*set_lazy_mode)(int mode);
16971 -} vmi_ops;
16972 +} vmi_ops __read_only;
16973
16974 /* Cached VMI operations */
16975 -struct vmi_timer_ops vmi_timer_ops;
16976 +struct vmi_timer_ops vmi_timer_ops __read_only;
16977
16978 /*
16979 * VMI patching routines.
16980 @@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
16981 static inline void patch_offset(void *insnbuf,
16982 unsigned long ip, unsigned long dest)
16983 {
16984 - *(unsigned long *)(insnbuf+1) = dest-ip-5;
16985 + *(unsigned long *)(insnbuf+1) = dest-ip-5;
16986 }
16987
16988 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
16989 @@ -102,6 +107,7 @@ static unsigned patch_internal(int call,
16990 {
16991 u64 reloc;
16992 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
16993 +
16994 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
16995 switch(rel->type) {
16996 case VMI_RELOCATION_CALL_REL:
16997 @@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud
16998
16999 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
17000 {
17001 - const pte_t pte = { .pte = 0 };
17002 + const pte_t pte = __pte(0ULL);
17003 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
17004 }
17005
17006 static void vmi_pmd_clear(pmd_t *pmd)
17007 {
17008 - const pte_t pte = { .pte = 0 };
17009 + const pte_t pte = __pte(0ULL);
17010 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
17011 }
17012 #endif
17013 @@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, un
17014 ap.ss = __KERNEL_DS;
17015 ap.esp = (unsigned long) start_esp;
17016
17017 - ap.ds = __USER_DS;
17018 - ap.es = __USER_DS;
17019 + ap.ds = __KERNEL_DS;
17020 + ap.es = __KERNEL_DS;
17021 ap.fs = __KERNEL_PERCPU;
17022 - ap.gs = __KERNEL_STACK_CANARY;
17023 + savesegment(gs, ap.gs);
17024
17025 ap.eflags = 0;
17026
17027 @@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
17028 paravirt_leave_lazy_mmu();
17029 }
17030
17031 +#ifdef CONFIG_PAX_KERNEXEC
17032 +static unsigned long vmi_pax_open_kernel(void)
17033 +{
17034 + return 0;
17035 +}
17036 +
17037 +static unsigned long vmi_pax_close_kernel(void)
17038 +{
17039 + return 0;
17040 +}
17041 +#endif
17042 +
17043 static inline int __init check_vmi_rom(struct vrom_header *rom)
17044 {
17045 struct pci_header *pci;
17046 @@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(s
17047 return 0;
17048 if (rom->vrom_signature != VMI_SIGNATURE)
17049 return 0;
17050 + if (rom->rom_length * 512 > sizeof(*rom)) {
17051 + printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
17052 + return 0;
17053 + }
17054 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
17055 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
17056 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
17057 @@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(v
17058 struct vrom_header *romstart;
17059 romstart = (struct vrom_header *)isa_bus_to_virt(base);
17060 if (check_vmi_rom(romstart)) {
17061 - vmi_rom = romstart;
17062 + vmi_rom = *romstart;
17063 return 1;
17064 }
17065 }
17066 @@ -836,6 +858,11 @@ static inline int __init activate_vmi(vo
17067
17068 para_fill(pv_irq_ops.safe_halt, Halt);
17069
17070 +#ifdef CONFIG_PAX_KERNEXEC
17071 + pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
17072 + pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
17073 +#endif
17074 +
17075 /*
17076 * Alternative instruction rewriting doesn't happen soon enough
17077 * to convert VMI_IRET to a call instead of a jump; so we have
17078 @@ -853,16 +880,16 @@ static inline int __init activate_vmi(vo
17079
17080 void __init vmi_init(void)
17081 {
17082 - if (!vmi_rom)
17083 + if (!vmi_rom.rom_signature)
17084 probe_vmi_rom();
17085 else
17086 - check_vmi_rom(vmi_rom);
17087 + check_vmi_rom(&vmi_rom);
17088
17089 /* In case probing for or validating the ROM failed, basil */
17090 - if (!vmi_rom)
17091 + if (!vmi_rom.rom_signature)
17092 return;
17093
17094 - reserve_top_address(-vmi_rom->virtual_top);
17095 + reserve_top_address(-vmi_rom.virtual_top);
17096
17097 #ifdef CONFIG_X86_IO_APIC
17098 /* This is virtual hardware; timer routing is wired correctly */
17099 @@ -874,7 +901,7 @@ void __init vmi_activate(void)
17100 {
17101 unsigned long flags;
17102
17103 - if (!vmi_rom)
17104 + if (!vmi_rom.rom_signature)
17105 return;
17106
17107 local_irq_save(flags);
17108 diff -urNp linux-2.6.32.41/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.41/arch/x86/kernel/vmlinux.lds.S
17109 --- linux-2.6.32.41/arch/x86/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
17110 +++ linux-2.6.32.41/arch/x86/kernel/vmlinux.lds.S 2011-04-17 15:56:46.000000000 -0400
17111 @@ -26,6 +26,13 @@
17112 #include <asm/page_types.h>
17113 #include <asm/cache.h>
17114 #include <asm/boot.h>
17115 +#include <asm/segment.h>
17116 +
17117 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17118 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
17119 +#else
17120 +#define __KERNEL_TEXT_OFFSET 0
17121 +#endif
17122
17123 #undef i386 /* in case the preprocessor is a 32bit one */
17124
17125 @@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
17126 #ifdef CONFIG_X86_32
17127 OUTPUT_ARCH(i386)
17128 ENTRY(phys_startup_32)
17129 -jiffies = jiffies_64;
17130 #else
17131 OUTPUT_ARCH(i386:x86-64)
17132 ENTRY(phys_startup_64)
17133 -jiffies_64 = jiffies;
17134 #endif
17135
17136 PHDRS {
17137 text PT_LOAD FLAGS(5); /* R_E */
17138 - data PT_LOAD FLAGS(7); /* RWE */
17139 +#ifdef CONFIG_X86_32
17140 + module PT_LOAD FLAGS(5); /* R_E */
17141 +#endif
17142 +#ifdef CONFIG_XEN
17143 + rodata PT_LOAD FLAGS(5); /* R_E */
17144 +#else
17145 + rodata PT_LOAD FLAGS(4); /* R__ */
17146 +#endif
17147 + data PT_LOAD FLAGS(6); /* RW_ */
17148 #ifdef CONFIG_X86_64
17149 user PT_LOAD FLAGS(5); /* R_E */
17150 +#endif
17151 + init.begin PT_LOAD FLAGS(6); /* RW_ */
17152 #ifdef CONFIG_SMP
17153 percpu PT_LOAD FLAGS(6); /* RW_ */
17154 #endif
17155 + text.init PT_LOAD FLAGS(5); /* R_E */
17156 + text.exit PT_LOAD FLAGS(5); /* R_E */
17157 init PT_LOAD FLAGS(7); /* RWE */
17158 -#endif
17159 note PT_NOTE FLAGS(0); /* ___ */
17160 }
17161
17162 SECTIONS
17163 {
17164 #ifdef CONFIG_X86_32
17165 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
17166 - phys_startup_32 = startup_32 - LOAD_OFFSET;
17167 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
17168 #else
17169 - . = __START_KERNEL;
17170 - phys_startup_64 = startup_64 - LOAD_OFFSET;
17171 + . = __START_KERNEL;
17172 #endif
17173
17174 /* Text and read-only data */
17175 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
17176 - _text = .;
17177 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17178 /* bootstrapping code */
17179 +#ifdef CONFIG_X86_32
17180 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17181 +#else
17182 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17183 +#endif
17184 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17185 + _text = .;
17186 HEAD_TEXT
17187 #ifdef CONFIG_X86_32
17188 . = ALIGN(PAGE_SIZE);
17189 @@ -82,28 +102,71 @@ SECTIONS
17190 IRQENTRY_TEXT
17191 *(.fixup)
17192 *(.gnu.warning)
17193 - /* End of text section */
17194 - _etext = .;
17195 } :text = 0x9090
17196
17197 - NOTES :text :note
17198 + . += __KERNEL_TEXT_OFFSET;
17199 +
17200 +#ifdef CONFIG_X86_32
17201 + . = ALIGN(PAGE_SIZE);
17202 + .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
17203 + *(.vmi.rom)
17204 + } :module
17205 +
17206 + . = ALIGN(PAGE_SIZE);
17207 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
17208 +
17209 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
17210 + MODULES_EXEC_VADDR = .;
17211 + BYTE(0)
17212 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
17213 + . = ALIGN(HPAGE_SIZE);
17214 + MODULES_EXEC_END = . - 1;
17215 +#endif
17216 +
17217 + } :module
17218 +#endif
17219
17220 - EXCEPTION_TABLE(16) :text = 0x9090
17221 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
17222 + /* End of text section */
17223 + _etext = . - __KERNEL_TEXT_OFFSET;
17224 + }
17225 +
17226 +#ifdef CONFIG_X86_32
17227 + . = ALIGN(PAGE_SIZE);
17228 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
17229 + *(.idt)
17230 + . = ALIGN(PAGE_SIZE);
17231 + *(.empty_zero_page)
17232 + *(.swapper_pg_fixmap)
17233 + *(.swapper_pg_pmd)
17234 + *(.swapper_pg_dir)
17235 + *(.trampoline_pg_dir)
17236 + } :rodata
17237 +#endif
17238 +
17239 + . = ALIGN(PAGE_SIZE);
17240 + NOTES :rodata :note
17241 +
17242 + EXCEPTION_TABLE(16) :rodata
17243
17244 RO_DATA(PAGE_SIZE)
17245
17246 /* Data */
17247 .data : AT(ADDR(.data) - LOAD_OFFSET) {
17248 +
17249 +#ifdef CONFIG_PAX_KERNEXEC
17250 + . = ALIGN(HPAGE_SIZE);
17251 +#else
17252 + . = ALIGN(PAGE_SIZE);
17253 +#endif
17254 +
17255 /* Start of data section */
17256 _sdata = .;
17257
17258 /* init_task */
17259 INIT_TASK_DATA(THREAD_SIZE)
17260
17261 -#ifdef CONFIG_X86_32
17262 - /* 32 bit has nosave before _edata */
17263 NOSAVE_DATA
17264 -#endif
17265
17266 PAGE_ALIGNED_DATA(PAGE_SIZE)
17267
17268 @@ -112,6 +175,8 @@ SECTIONS
17269 DATA_DATA
17270 CONSTRUCTORS
17271
17272 + jiffies = jiffies_64;
17273 +
17274 /* rarely changed data like cpu maps */
17275 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
17276
17277 @@ -166,12 +231,6 @@ SECTIONS
17278 }
17279 vgetcpu_mode = VVIRT(.vgetcpu_mode);
17280
17281 - . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
17282 - .jiffies : AT(VLOAD(.jiffies)) {
17283 - *(.jiffies)
17284 - }
17285 - jiffies = VVIRT(.jiffies);
17286 -
17287 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
17288 *(.vsyscall_3)
17289 }
17290 @@ -187,12 +246,19 @@ SECTIONS
17291 #endif /* CONFIG_X86_64 */
17292
17293 /* Init code and data - will be freed after init */
17294 - . = ALIGN(PAGE_SIZE);
17295 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
17296 + BYTE(0)
17297 +
17298 +#ifdef CONFIG_PAX_KERNEXEC
17299 + . = ALIGN(HPAGE_SIZE);
17300 +#else
17301 + . = ALIGN(PAGE_SIZE);
17302 +#endif
17303 +
17304 __init_begin = .; /* paired with __init_end */
17305 - }
17306 + } :init.begin
17307
17308 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
17309 +#ifdef CONFIG_SMP
17310 /*
17311 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
17312 * output PHDR, so the next output section - .init.text - should
17313 @@ -201,12 +267,27 @@ SECTIONS
17314 PERCPU_VADDR(0, :percpu)
17315 #endif
17316
17317 - INIT_TEXT_SECTION(PAGE_SIZE)
17318 -#ifdef CONFIG_X86_64
17319 - :init
17320 -#endif
17321 + . = ALIGN(PAGE_SIZE);
17322 + init_begin = .;
17323 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
17324 + VMLINUX_SYMBOL(_sinittext) = .;
17325 + INIT_TEXT
17326 + VMLINUX_SYMBOL(_einittext) = .;
17327 + . = ALIGN(PAGE_SIZE);
17328 + } :text.init
17329
17330 - INIT_DATA_SECTION(16)
17331 + /*
17332 + * .exit.text is discard at runtime, not link time, to deal with
17333 + * references from .altinstructions and .eh_frame
17334 + */
17335 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17336 + EXIT_TEXT
17337 + . = ALIGN(16);
17338 + } :text.exit
17339 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
17340 +
17341 + . = ALIGN(PAGE_SIZE);
17342 + INIT_DATA_SECTION(16) :init
17343
17344 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
17345 __x86_cpu_dev_start = .;
17346 @@ -232,19 +313,11 @@ SECTIONS
17347 *(.altinstr_replacement)
17348 }
17349
17350 - /*
17351 - * .exit.text is discard at runtime, not link time, to deal with
17352 - * references from .altinstructions and .eh_frame
17353 - */
17354 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
17355 - EXIT_TEXT
17356 - }
17357 -
17358 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
17359 EXIT_DATA
17360 }
17361
17362 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
17363 +#ifndef CONFIG_SMP
17364 PERCPU(PAGE_SIZE)
17365 #endif
17366
17367 @@ -267,12 +340,6 @@ SECTIONS
17368 . = ALIGN(PAGE_SIZE);
17369 }
17370
17371 -#ifdef CONFIG_X86_64
17372 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
17373 - NOSAVE_DATA
17374 - }
17375 -#endif
17376 -
17377 /* BSS */
17378 . = ALIGN(PAGE_SIZE);
17379 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
17380 @@ -288,6 +355,7 @@ SECTIONS
17381 __brk_base = .;
17382 . += 64 * 1024; /* 64k alignment slop space */
17383 *(.brk_reservation) /* areas brk users have reserved */
17384 + . = ALIGN(HPAGE_SIZE);
17385 __brk_limit = .;
17386 }
17387
17388 @@ -316,13 +384,12 @@ SECTIONS
17389 * for the boot processor.
17390 */
17391 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
17392 -INIT_PER_CPU(gdt_page);
17393 INIT_PER_CPU(irq_stack_union);
17394
17395 /*
17396 * Build-time check on the image size:
17397 */
17398 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
17399 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
17400 "kernel image bigger than KERNEL_IMAGE_SIZE");
17401
17402 #ifdef CONFIG_SMP
17403 diff -urNp linux-2.6.32.41/arch/x86/kernel/vsyscall_64.c linux-2.6.32.41/arch/x86/kernel/vsyscall_64.c
17404 --- linux-2.6.32.41/arch/x86/kernel/vsyscall_64.c 2011-03-27 14:31:47.000000000 -0400
17405 +++ linux-2.6.32.41/arch/x86/kernel/vsyscall_64.c 2011-04-23 12:56:10.000000000 -0400
17406 @@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wa
17407
17408 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
17409 /* copy vsyscall data */
17410 + strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
17411 vsyscall_gtod_data.clock.vread = clock->vread;
17412 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
17413 vsyscall_gtod_data.clock.mask = clock->mask;
17414 @@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s
17415 We do this here because otherwise user space would do it on
17416 its own in a likely inferior way (no access to jiffies).
17417 If you don't like it pass NULL. */
17418 - if (tcache && tcache->blob[0] == (j = __jiffies)) {
17419 + if (tcache && tcache->blob[0] == (j = jiffies)) {
17420 p = tcache->blob[1];
17421 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
17422 /* Load per CPU data from RDTSCP */
17423 diff -urNp linux-2.6.32.41/arch/x86/kernel/x8664_ksyms_64.c linux-2.6.32.41/arch/x86/kernel/x8664_ksyms_64.c
17424 --- linux-2.6.32.41/arch/x86/kernel/x8664_ksyms_64.c 2011-03-27 14:31:47.000000000 -0400
17425 +++ linux-2.6.32.41/arch/x86/kernel/x8664_ksyms_64.c 2011-04-17 15:56:46.000000000 -0400
17426 @@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
17427
17428 EXPORT_SYMBOL(copy_user_generic);
17429 EXPORT_SYMBOL(__copy_user_nocache);
17430 -EXPORT_SYMBOL(copy_from_user);
17431 -EXPORT_SYMBOL(copy_to_user);
17432 EXPORT_SYMBOL(__copy_from_user_inatomic);
17433
17434 EXPORT_SYMBOL(copy_page);
17435 diff -urNp linux-2.6.32.41/arch/x86/kernel/xsave.c linux-2.6.32.41/arch/x86/kernel/xsave.c
17436 --- linux-2.6.32.41/arch/x86/kernel/xsave.c 2011-03-27 14:31:47.000000000 -0400
17437 +++ linux-2.6.32.41/arch/x86/kernel/xsave.c 2011-04-17 15:56:46.000000000 -0400
17438 @@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_
17439 fx_sw_user->xstate_size > fx_sw_user->extended_size)
17440 return -1;
17441
17442 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
17443 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
17444 fx_sw_user->extended_size -
17445 FP_XSTATE_MAGIC2_SIZE));
17446 /*
17447 @@ -196,7 +196,7 @@ fx_only:
17448 * the other extended state.
17449 */
17450 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
17451 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
17452 + return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
17453 }
17454
17455 /*
17456 @@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf
17457 if (task_thread_info(tsk)->status & TS_XSAVE)
17458 err = restore_user_xstate(buf);
17459 else
17460 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
17461 + err = fxrstor_checking((struct i387_fxsave_struct __user *)
17462 buf);
17463 if (unlikely(err)) {
17464 /*
17465 diff -urNp linux-2.6.32.41/arch/x86/kvm/emulate.c linux-2.6.32.41/arch/x86/kvm/emulate.c
17466 --- linux-2.6.32.41/arch/x86/kvm/emulate.c 2011-03-27 14:31:47.000000000 -0400
17467 +++ linux-2.6.32.41/arch/x86/kvm/emulate.c 2011-04-17 15:56:46.000000000 -0400
17468 @@ -81,8 +81,8 @@
17469 #define Src2CL (1<<29)
17470 #define Src2ImmByte (2<<29)
17471 #define Src2One (3<<29)
17472 -#define Src2Imm16 (4<<29)
17473 -#define Src2Mask (7<<29)
17474 +#define Src2Imm16 (4U<<29)
17475 +#define Src2Mask (7U<<29)
17476
17477 enum {
17478 Group1_80, Group1_81, Group1_82, Group1_83,
17479 @@ -411,6 +411,7 @@ static u32 group2_table[] = {
17480
17481 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
17482 do { \
17483 + unsigned long _tmp; \
17484 __asm__ __volatile__ ( \
17485 _PRE_EFLAGS("0", "4", "2") \
17486 _op _suffix " %"_x"3,%1; " \
17487 @@ -424,8 +425,6 @@ static u32 group2_table[] = {
17488 /* Raw emulation: instruction has two explicit operands. */
17489 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
17490 do { \
17491 - unsigned long _tmp; \
17492 - \
17493 switch ((_dst).bytes) { \
17494 case 2: \
17495 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
17496 @@ -441,7 +440,6 @@ static u32 group2_table[] = {
17497
17498 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
17499 do { \
17500 - unsigned long _tmp; \
17501 switch ((_dst).bytes) { \
17502 case 1: \
17503 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
17504 diff -urNp linux-2.6.32.41/arch/x86/kvm/lapic.c linux-2.6.32.41/arch/x86/kvm/lapic.c
17505 --- linux-2.6.32.41/arch/x86/kvm/lapic.c 2011-03-27 14:31:47.000000000 -0400
17506 +++ linux-2.6.32.41/arch/x86/kvm/lapic.c 2011-04-17 15:56:46.000000000 -0400
17507 @@ -52,7 +52,7 @@
17508 #define APIC_BUS_CYCLE_NS 1
17509
17510 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
17511 -#define apic_debug(fmt, arg...)
17512 +#define apic_debug(fmt, arg...) do {} while (0)
17513
17514 #define APIC_LVT_NUM 6
17515 /* 14 is the version for Xeon and Pentium 8.4.8*/
17516 diff -urNp linux-2.6.32.41/arch/x86/kvm/paging_tmpl.h linux-2.6.32.41/arch/x86/kvm/paging_tmpl.h
17517 --- linux-2.6.32.41/arch/x86/kvm/paging_tmpl.h 2011-03-27 14:31:47.000000000 -0400
17518 +++ linux-2.6.32.41/arch/x86/kvm/paging_tmpl.h 2011-05-16 21:46:57.000000000 -0400
17519 @@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_
17520 int level = PT_PAGE_TABLE_LEVEL;
17521 unsigned long mmu_seq;
17522
17523 + pax_track_stack();
17524 +
17525 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
17526 kvm_mmu_audit(vcpu, "pre page fault");
17527
17528 diff -urNp linux-2.6.32.41/arch/x86/kvm/svm.c linux-2.6.32.41/arch/x86/kvm/svm.c
17529 --- linux-2.6.32.41/arch/x86/kvm/svm.c 2011-03-27 14:31:47.000000000 -0400
17530 +++ linux-2.6.32.41/arch/x86/kvm/svm.c 2011-04-17 15:56:46.000000000 -0400
17531 @@ -2483,9 +2483,12 @@ static int handle_exit(struct kvm_run *k
17532 static void reload_tss(struct kvm_vcpu *vcpu)
17533 {
17534 int cpu = raw_smp_processor_id();
17535 -
17536 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
17537 +
17538 + pax_open_kernel();
17539 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
17540 + pax_close_kernel();
17541 +
17542 load_TR_desc();
17543 }
17544
17545 @@ -2946,7 +2949,7 @@ static bool svm_gb_page_enable(void)
17546 return true;
17547 }
17548
17549 -static struct kvm_x86_ops svm_x86_ops = {
17550 +static const struct kvm_x86_ops svm_x86_ops = {
17551 .cpu_has_kvm_support = has_svm,
17552 .disabled_by_bios = is_disabled,
17553 .hardware_setup = svm_hardware_setup,
17554 diff -urNp linux-2.6.32.41/arch/x86/kvm/vmx.c linux-2.6.32.41/arch/x86/kvm/vmx.c
17555 --- linux-2.6.32.41/arch/x86/kvm/vmx.c 2011-03-27 14:31:47.000000000 -0400
17556 +++ linux-2.6.32.41/arch/x86/kvm/vmx.c 2011-05-04 17:56:20.000000000 -0400
17557 @@ -570,7 +570,11 @@ static void reload_tss(void)
17558
17559 kvm_get_gdt(&gdt);
17560 descs = (void *)gdt.base;
17561 +
17562 + pax_open_kernel();
17563 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
17564 + pax_close_kernel();
17565 +
17566 load_TR_desc();
17567 }
17568
17569 @@ -1409,8 +1413,11 @@ static __init int hardware_setup(void)
17570 if (!cpu_has_vmx_flexpriority())
17571 flexpriority_enabled = 0;
17572
17573 - if (!cpu_has_vmx_tpr_shadow())
17574 - kvm_x86_ops->update_cr8_intercept = NULL;
17575 + if (!cpu_has_vmx_tpr_shadow()) {
17576 + pax_open_kernel();
17577 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
17578 + pax_close_kernel();
17579 + }
17580
17581 if (enable_ept && !cpu_has_vmx_ept_2m_page())
17582 kvm_disable_largepages();
17583 @@ -2361,7 +2368,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
17584 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
17585
17586 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
17587 - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
17588 + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
17589 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
17590 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
17591 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
17592 @@ -3717,6 +3724,12 @@ static void vmx_vcpu_run(struct kvm_vcpu
17593 "jmp .Lkvm_vmx_return \n\t"
17594 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
17595 ".Lkvm_vmx_return: "
17596 +
17597 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17598 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
17599 + ".Lkvm_vmx_return2: "
17600 +#endif
17601 +
17602 /* Save guest registers, load host registers, keep flags */
17603 "xchg %0, (%%"R"sp) \n\t"
17604 "mov %%"R"ax, %c[rax](%0) \n\t"
17605 @@ -3763,8 +3776,13 @@ static void vmx_vcpu_run(struct kvm_vcpu
17606 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
17607 #endif
17608 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
17609 +
17610 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17611 + ,[cs]"i"(__KERNEL_CS)
17612 +#endif
17613 +
17614 : "cc", "memory"
17615 - , R"bx", R"di", R"si"
17616 + , R"ax", R"bx", R"di", R"si"
17617 #ifdef CONFIG_X86_64
17618 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
17619 #endif
17620 @@ -3781,7 +3799,16 @@ static void vmx_vcpu_run(struct kvm_vcpu
17621 if (vmx->rmode.irq.pending)
17622 fixup_rmode_irq(vmx);
17623
17624 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
17625 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
17626 +
17627 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17628 + loadsegment(fs, __KERNEL_PERCPU);
17629 +#endif
17630 +
17631 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17632 + __set_fs(current_thread_info()->addr_limit);
17633 +#endif
17634 +
17635 vmx->launched = 1;
17636
17637 vmx_complete_interrupts(vmx);
17638 @@ -3956,7 +3983,7 @@ static bool vmx_gb_page_enable(void)
17639 return false;
17640 }
17641
17642 -static struct kvm_x86_ops vmx_x86_ops = {
17643 +static const struct kvm_x86_ops vmx_x86_ops = {
17644 .cpu_has_kvm_support = cpu_has_kvm_support,
17645 .disabled_by_bios = vmx_disabled_by_bios,
17646 .hardware_setup = hardware_setup,
17647 diff -urNp linux-2.6.32.41/arch/x86/kvm/x86.c linux-2.6.32.41/arch/x86/kvm/x86.c
17648 --- linux-2.6.32.41/arch/x86/kvm/x86.c 2011-05-10 22:12:01.000000000 -0400
17649 +++ linux-2.6.32.41/arch/x86/kvm/x86.c 2011-05-10 22:12:26.000000000 -0400
17650 @@ -82,7 +82,7 @@ static void update_cr8_intercept(struct
17651 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
17652 struct kvm_cpuid_entry2 __user *entries);
17653
17654 -struct kvm_x86_ops *kvm_x86_ops;
17655 +const struct kvm_x86_ops *kvm_x86_ops;
17656 EXPORT_SYMBOL_GPL(kvm_x86_ops);
17657
17658 int ignore_msrs = 0;
17659 @@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
17660 struct kvm_cpuid2 *cpuid,
17661 struct kvm_cpuid_entry2 __user *entries)
17662 {
17663 - int r;
17664 + int r, i;
17665
17666 r = -E2BIG;
17667 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
17668 goto out;
17669 r = -EFAULT;
17670 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
17671 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
17672 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
17673 goto out;
17674 + for (i = 0; i < cpuid->nent; ++i) {
17675 + struct kvm_cpuid_entry2 cpuid_entry;
17676 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
17677 + goto out;
17678 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
17679 + }
17680 vcpu->arch.cpuid_nent = cpuid->nent;
17681 kvm_apic_set_version(vcpu);
17682 return 0;
17683 @@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
17684 struct kvm_cpuid2 *cpuid,
17685 struct kvm_cpuid_entry2 __user *entries)
17686 {
17687 - int r;
17688 + int r, i;
17689
17690 vcpu_load(vcpu);
17691 r = -E2BIG;
17692 if (cpuid->nent < vcpu->arch.cpuid_nent)
17693 goto out;
17694 r = -EFAULT;
17695 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
17696 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
17697 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
17698 goto out;
17699 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
17700 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
17701 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
17702 + goto out;
17703 + }
17704 return 0;
17705
17706 out:
17707 @@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
17708 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
17709 struct kvm_interrupt *irq)
17710 {
17711 - if (irq->irq < 0 || irq->irq >= 256)
17712 + if (irq->irq >= 256)
17713 return -EINVAL;
17714 if (irqchip_in_kernel(vcpu->kvm))
17715 return -ENXIO;
17716 @@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cp
17717 .notifier_call = kvmclock_cpufreq_notifier
17718 };
17719
17720 -int kvm_arch_init(void *opaque)
17721 +int kvm_arch_init(const void *opaque)
17722 {
17723 int r, cpu;
17724 - struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
17725 + const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
17726
17727 if (kvm_x86_ops) {
17728 printk(KERN_ERR "kvm: already loaded the other module\n");
17729 diff -urNp linux-2.6.32.41/arch/x86/lib/atomic64_32.c linux-2.6.32.41/arch/x86/lib/atomic64_32.c
17730 --- linux-2.6.32.41/arch/x86/lib/atomic64_32.c 2011-03-27 14:31:47.000000000 -0400
17731 +++ linux-2.6.32.41/arch/x86/lib/atomic64_32.c 2011-05-04 17:56:28.000000000 -0400
17732 @@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u6
17733 }
17734 EXPORT_SYMBOL(atomic64_cmpxchg);
17735
17736 +u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
17737 +{
17738 + return cmpxchg8b(&ptr->counter, old_val, new_val);
17739 +}
17740 +EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
17741 +
17742 /**
17743 * atomic64_xchg - xchg atomic64 variable
17744 * @ptr: pointer to type atomic64_t
17745 @@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 n
17746 EXPORT_SYMBOL(atomic64_xchg);
17747
17748 /**
17749 + * atomic64_xchg_unchecked - xchg atomic64 variable
17750 + * @ptr: pointer to type atomic64_unchecked_t
17751 + * @new_val: value to assign
17752 + *
17753 + * Atomically xchgs the value of @ptr to @new_val and returns
17754 + * the old value.
17755 + */
17756 +u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
17757 +{
17758 + /*
17759 + * Try first with a (possibly incorrect) assumption about
17760 + * what we have there. We'll do two loops most likely,
17761 + * but we'll get an ownership MESI transaction straight away
17762 + * instead of a read transaction followed by a
17763 + * flush-for-ownership transaction:
17764 + */
17765 + u64 old_val, real_val = 0;
17766 +
17767 + do {
17768 + old_val = real_val;
17769 +
17770 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
17771 +
17772 + } while (real_val != old_val);
17773 +
17774 + return old_val;
17775 +}
17776 +EXPORT_SYMBOL(atomic64_xchg_unchecked);
17777 +
17778 +/**
17779 * atomic64_set - set atomic64 variable
17780 * @ptr: pointer to type atomic64_t
17781 * @new_val: value to assign
17782 @@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 n
17783 EXPORT_SYMBOL(atomic64_set);
17784
17785 /**
17786 -EXPORT_SYMBOL(atomic64_read);
17787 + * atomic64_unchecked_set - set atomic64 variable
17788 + * @ptr: pointer to type atomic64_unchecked_t
17789 + * @new_val: value to assign
17790 + *
17791 + * Atomically sets the value of @ptr to @new_val.
17792 + */
17793 +void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
17794 +{
17795 + atomic64_xchg_unchecked(ptr, new_val);
17796 +}
17797 +EXPORT_SYMBOL(atomic64_set_unchecked);
17798 +
17799 +/**
17800 * atomic64_add_return - add and return
17801 * @delta: integer value to add
17802 * @ptr: pointer to type atomic64_t
17803 @@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 del
17804 }
17805 EXPORT_SYMBOL(atomic64_add_return);
17806
17807 +/**
17808 + * atomic64_add_return_unchecked - add and return
17809 + * @delta: integer value to add
17810 + * @ptr: pointer to type atomic64_unchecked_t
17811 + *
17812 + * Atomically adds @delta to @ptr and returns @delta + *@ptr
17813 + */
17814 +noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
17815 +{
17816 + /*
17817 + * Try first with a (possibly incorrect) assumption about
17818 + * what we have there. We'll do two loops most likely,
17819 + * but we'll get an ownership MESI transaction straight away
17820 + * instead of a read transaction followed by a
17821 + * flush-for-ownership transaction:
17822 + */
17823 + u64 old_val, new_val, real_val = 0;
17824 +
17825 + do {
17826 + old_val = real_val;
17827 + new_val = old_val + delta;
17828 +
17829 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
17830 +
17831 + } while (real_val != old_val);
17832 +
17833 + return new_val;
17834 +}
17835 +EXPORT_SYMBOL(atomic64_add_return_unchecked);
17836 +
17837 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
17838 {
17839 return atomic64_add_return(-delta, ptr);
17840 }
17841 EXPORT_SYMBOL(atomic64_sub_return);
17842
17843 +u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
17844 +{
17845 + return atomic64_add_return_unchecked(-delta, ptr);
17846 +}
17847 +EXPORT_SYMBOL(atomic64_sub_return_unchecked);
17848 +
17849 u64 atomic64_inc_return(atomic64_t *ptr)
17850 {
17851 return atomic64_add_return(1, ptr);
17852 }
17853 EXPORT_SYMBOL(atomic64_inc_return);
17854
17855 +u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
17856 +{
17857 + return atomic64_add_return_unchecked(1, ptr);
17858 +}
17859 +EXPORT_SYMBOL(atomic64_inc_return_unchecked);
17860 +
17861 u64 atomic64_dec_return(atomic64_t *ptr)
17862 {
17863 return atomic64_sub_return(1, ptr);
17864 }
17865 EXPORT_SYMBOL(atomic64_dec_return);
17866
17867 +u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
17868 +{
17869 + return atomic64_sub_return_unchecked(1, ptr);
17870 +}
17871 +EXPORT_SYMBOL(atomic64_dec_return_unchecked);
17872 +
17873 /**
17874 * atomic64_add - add integer to atomic64 variable
17875 * @delta: integer value to add
17876 @@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t
17877 EXPORT_SYMBOL(atomic64_add);
17878
17879 /**
17880 + * atomic64_add_unchecked - add integer to atomic64 variable
17881 + * @delta: integer value to add
17882 + * @ptr: pointer to type atomic64_unchecked_t
17883 + *
17884 + * Atomically adds @delta to @ptr.
17885 + */
17886 +void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
17887 +{
17888 + atomic64_add_return_unchecked(delta, ptr);
17889 +}
17890 +EXPORT_SYMBOL(atomic64_add_unchecked);
17891 +
17892 +/**
17893 * atomic64_sub - subtract the atomic64 variable
17894 * @delta: integer value to subtract
17895 * @ptr: pointer to type atomic64_t
17896 @@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t
17897 EXPORT_SYMBOL(atomic64_sub);
17898
17899 /**
17900 + * atomic64_sub_unchecked - subtract the atomic64 variable
17901 + * @delta: integer value to subtract
17902 + * @ptr: pointer to type atomic64_unchecked_t
17903 + *
17904 + * Atomically subtracts @delta from @ptr.
17905 + */
17906 +void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
17907 +{
17908 + atomic64_add_unchecked(-delta, ptr);
17909 +}
17910 +EXPORT_SYMBOL(atomic64_sub_unchecked);
17911 +
17912 +/**
17913 * atomic64_sub_and_test - subtract value from variable and test result
17914 * @delta: integer value to subtract
17915 * @ptr: pointer to type atomic64_t
17916 @@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
17917 EXPORT_SYMBOL(atomic64_inc);
17918
17919 /**
17920 + * atomic64_inc_unchecked - increment atomic64 variable
17921 + * @ptr: pointer to type atomic64_unchecked_t
17922 + *
17923 + * Atomically increments @ptr by 1.
17924 + */
17925 +void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
17926 +{
17927 + atomic64_add_unchecked(1, ptr);
17928 +}
17929 +EXPORT_SYMBOL(atomic64_inc_unchecked);
17930 +
17931 +/**
17932 * atomic64_dec - decrement atomic64 variable
17933 * @ptr: pointer to type atomic64_t
17934 *
17935 @@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
17936 EXPORT_SYMBOL(atomic64_dec);
17937
17938 /**
17939 + * atomic64_dec_unchecked - decrement atomic64 variable
17940 + * @ptr: pointer to type atomic64_unchecked_t
17941 + *
17942 + * Atomically decrements @ptr by 1.
17943 + */
17944 +void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
17945 +{
17946 + atomic64_sub_unchecked(1, ptr);
17947 +}
17948 +EXPORT_SYMBOL(atomic64_dec_unchecked);
17949 +
17950 +/**
17951 * atomic64_dec_and_test - decrement and test
17952 * @ptr: pointer to type atomic64_t
17953 *
17954 diff -urNp linux-2.6.32.41/arch/x86/lib/checksum_32.S linux-2.6.32.41/arch/x86/lib/checksum_32.S
17955 --- linux-2.6.32.41/arch/x86/lib/checksum_32.S 2011-03-27 14:31:47.000000000 -0400
17956 +++ linux-2.6.32.41/arch/x86/lib/checksum_32.S 2011-04-17 15:56:46.000000000 -0400
17957 @@ -28,7 +28,8 @@
17958 #include <linux/linkage.h>
17959 #include <asm/dwarf2.h>
17960 #include <asm/errno.h>
17961 -
17962 +#include <asm/segment.h>
17963 +
17964 /*
17965 * computes a partial checksum, e.g. for TCP/UDP fragments
17966 */
17967 @@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (
17968
17969 #define ARGBASE 16
17970 #define FP 12
17971 -
17972 -ENTRY(csum_partial_copy_generic)
17973 +
17974 +ENTRY(csum_partial_copy_generic_to_user)
17975 CFI_STARTPROC
17976 +
17977 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17978 + pushl %gs
17979 + CFI_ADJUST_CFA_OFFSET 4
17980 + popl %es
17981 + CFI_ADJUST_CFA_OFFSET -4
17982 + jmp csum_partial_copy_generic
17983 +#endif
17984 +
17985 +ENTRY(csum_partial_copy_generic_from_user)
17986 +
17987 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17988 + pushl %gs
17989 + CFI_ADJUST_CFA_OFFSET 4
17990 + popl %ds
17991 + CFI_ADJUST_CFA_OFFSET -4
17992 +#endif
17993 +
17994 +ENTRY(csum_partial_copy_generic)
17995 subl $4,%esp
17996 CFI_ADJUST_CFA_OFFSET 4
17997 pushl %edi
17998 @@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
17999 jmp 4f
18000 SRC(1: movw (%esi), %bx )
18001 addl $2, %esi
18002 -DST( movw %bx, (%edi) )
18003 +DST( movw %bx, %es:(%edi) )
18004 addl $2, %edi
18005 addw %bx, %ax
18006 adcl $0, %eax
18007 @@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
18008 SRC(1: movl (%esi), %ebx )
18009 SRC( movl 4(%esi), %edx )
18010 adcl %ebx, %eax
18011 -DST( movl %ebx, (%edi) )
18012 +DST( movl %ebx, %es:(%edi) )
18013 adcl %edx, %eax
18014 -DST( movl %edx, 4(%edi) )
18015 +DST( movl %edx, %es:4(%edi) )
18016
18017 SRC( movl 8(%esi), %ebx )
18018 SRC( movl 12(%esi), %edx )
18019 adcl %ebx, %eax
18020 -DST( movl %ebx, 8(%edi) )
18021 +DST( movl %ebx, %es:8(%edi) )
18022 adcl %edx, %eax
18023 -DST( movl %edx, 12(%edi) )
18024 +DST( movl %edx, %es:12(%edi) )
18025
18026 SRC( movl 16(%esi), %ebx )
18027 SRC( movl 20(%esi), %edx )
18028 adcl %ebx, %eax
18029 -DST( movl %ebx, 16(%edi) )
18030 +DST( movl %ebx, %es:16(%edi) )
18031 adcl %edx, %eax
18032 -DST( movl %edx, 20(%edi) )
18033 +DST( movl %edx, %es:20(%edi) )
18034
18035 SRC( movl 24(%esi), %ebx )
18036 SRC( movl 28(%esi), %edx )
18037 adcl %ebx, %eax
18038 -DST( movl %ebx, 24(%edi) )
18039 +DST( movl %ebx, %es:24(%edi) )
18040 adcl %edx, %eax
18041 -DST( movl %edx, 28(%edi) )
18042 +DST( movl %edx, %es:28(%edi) )
18043
18044 lea 32(%esi), %esi
18045 lea 32(%edi), %edi
18046 @@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
18047 shrl $2, %edx # This clears CF
18048 SRC(3: movl (%esi), %ebx )
18049 adcl %ebx, %eax
18050 -DST( movl %ebx, (%edi) )
18051 +DST( movl %ebx, %es:(%edi) )
18052 lea 4(%esi), %esi
18053 lea 4(%edi), %edi
18054 dec %edx
18055 @@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
18056 jb 5f
18057 SRC( movw (%esi), %cx )
18058 leal 2(%esi), %esi
18059 -DST( movw %cx, (%edi) )
18060 +DST( movw %cx, %es:(%edi) )
18061 leal 2(%edi), %edi
18062 je 6f
18063 shll $16,%ecx
18064 SRC(5: movb (%esi), %cl )
18065 -DST( movb %cl, (%edi) )
18066 +DST( movb %cl, %es:(%edi) )
18067 6: addl %ecx, %eax
18068 adcl $0, %eax
18069 7:
18070 @@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
18071
18072 6001:
18073 movl ARGBASE+20(%esp), %ebx # src_err_ptr
18074 - movl $-EFAULT, (%ebx)
18075 + movl $-EFAULT, %ss:(%ebx)
18076
18077 # zero the complete destination - computing the rest
18078 # is too much work
18079 @@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
18080
18081 6002:
18082 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
18083 - movl $-EFAULT,(%ebx)
18084 + movl $-EFAULT,%ss:(%ebx)
18085 jmp 5000b
18086
18087 .previous
18088
18089 + pushl %ss
18090 + CFI_ADJUST_CFA_OFFSET 4
18091 + popl %ds
18092 + CFI_ADJUST_CFA_OFFSET -4
18093 + pushl %ss
18094 + CFI_ADJUST_CFA_OFFSET 4
18095 + popl %es
18096 + CFI_ADJUST_CFA_OFFSET -4
18097 popl %ebx
18098 CFI_ADJUST_CFA_OFFSET -4
18099 CFI_RESTORE ebx
18100 @@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
18101 CFI_ADJUST_CFA_OFFSET -4
18102 ret
18103 CFI_ENDPROC
18104 -ENDPROC(csum_partial_copy_generic)
18105 +ENDPROC(csum_partial_copy_generic_to_user)
18106
18107 #else
18108
18109 /* Version for PentiumII/PPro */
18110
18111 #define ROUND1(x) \
18112 + nop; nop; nop; \
18113 SRC(movl x(%esi), %ebx ) ; \
18114 addl %ebx, %eax ; \
18115 - DST(movl %ebx, x(%edi) ) ;
18116 + DST(movl %ebx, %es:x(%edi)) ;
18117
18118 #define ROUND(x) \
18119 + nop; nop; nop; \
18120 SRC(movl x(%esi), %ebx ) ; \
18121 adcl %ebx, %eax ; \
18122 - DST(movl %ebx, x(%edi) ) ;
18123 + DST(movl %ebx, %es:x(%edi)) ;
18124
18125 #define ARGBASE 12
18126 -
18127 -ENTRY(csum_partial_copy_generic)
18128 +
18129 +ENTRY(csum_partial_copy_generic_to_user)
18130 CFI_STARTPROC
18131 +
18132 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18133 + pushl %gs
18134 + CFI_ADJUST_CFA_OFFSET 4
18135 + popl %es
18136 + CFI_ADJUST_CFA_OFFSET -4
18137 + jmp csum_partial_copy_generic
18138 +#endif
18139 +
18140 +ENTRY(csum_partial_copy_generic_from_user)
18141 +
18142 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18143 + pushl %gs
18144 + CFI_ADJUST_CFA_OFFSET 4
18145 + popl %ds
18146 + CFI_ADJUST_CFA_OFFSET -4
18147 +#endif
18148 +
18149 +ENTRY(csum_partial_copy_generic)
18150 pushl %ebx
18151 CFI_ADJUST_CFA_OFFSET 4
18152 CFI_REL_OFFSET ebx, 0
18153 @@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
18154 subl %ebx, %edi
18155 lea -1(%esi),%edx
18156 andl $-32,%edx
18157 - lea 3f(%ebx,%ebx), %ebx
18158 + lea 3f(%ebx,%ebx,2), %ebx
18159 testl %esi, %esi
18160 jmp *%ebx
18161 1: addl $64,%esi
18162 @@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
18163 jb 5f
18164 SRC( movw (%esi), %dx )
18165 leal 2(%esi), %esi
18166 -DST( movw %dx, (%edi) )
18167 +DST( movw %dx, %es:(%edi) )
18168 leal 2(%edi), %edi
18169 je 6f
18170 shll $16,%edx
18171 5:
18172 SRC( movb (%esi), %dl )
18173 -DST( movb %dl, (%edi) )
18174 +DST( movb %dl, %es:(%edi) )
18175 6: addl %edx, %eax
18176 adcl $0, %eax
18177 7:
18178 .section .fixup, "ax"
18179 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
18180 - movl $-EFAULT, (%ebx)
18181 + movl $-EFAULT, %ss:(%ebx)
18182 # zero the complete destination (computing the rest is too much work)
18183 movl ARGBASE+8(%esp),%edi # dst
18184 movl ARGBASE+12(%esp),%ecx # len
18185 @@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
18186 rep; stosb
18187 jmp 7b
18188 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
18189 - movl $-EFAULT, (%ebx)
18190 + movl $-EFAULT, %ss:(%ebx)
18191 jmp 7b
18192 .previous
18193
18194 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18195 + pushl %ss
18196 + CFI_ADJUST_CFA_OFFSET 4
18197 + popl %ds
18198 + CFI_ADJUST_CFA_OFFSET -4
18199 + pushl %ss
18200 + CFI_ADJUST_CFA_OFFSET 4
18201 + popl %es
18202 + CFI_ADJUST_CFA_OFFSET -4
18203 +#endif
18204 +
18205 popl %esi
18206 CFI_ADJUST_CFA_OFFSET -4
18207 CFI_RESTORE esi
18208 @@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
18209 CFI_RESTORE ebx
18210 ret
18211 CFI_ENDPROC
18212 -ENDPROC(csum_partial_copy_generic)
18213 +ENDPROC(csum_partial_copy_generic_to_user)
18214
18215 #undef ROUND
18216 #undef ROUND1
18217 diff -urNp linux-2.6.32.41/arch/x86/lib/clear_page_64.S linux-2.6.32.41/arch/x86/lib/clear_page_64.S
18218 --- linux-2.6.32.41/arch/x86/lib/clear_page_64.S 2011-03-27 14:31:47.000000000 -0400
18219 +++ linux-2.6.32.41/arch/x86/lib/clear_page_64.S 2011-04-17 15:56:46.000000000 -0400
18220 @@ -43,7 +43,7 @@ ENDPROC(clear_page)
18221
18222 #include <asm/cpufeature.h>
18223
18224 - .section .altinstr_replacement,"ax"
18225 + .section .altinstr_replacement,"a"
18226 1: .byte 0xeb /* jmp <disp8> */
18227 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
18228 2:
18229 diff -urNp linux-2.6.32.41/arch/x86/lib/copy_page_64.S linux-2.6.32.41/arch/x86/lib/copy_page_64.S
18230 --- linux-2.6.32.41/arch/x86/lib/copy_page_64.S 2011-03-27 14:31:47.000000000 -0400
18231 +++ linux-2.6.32.41/arch/x86/lib/copy_page_64.S 2011-04-17 15:56:46.000000000 -0400
18232 @@ -104,7 +104,7 @@ ENDPROC(copy_page)
18233
18234 #include <asm/cpufeature.h>
18235
18236 - .section .altinstr_replacement,"ax"
18237 + .section .altinstr_replacement,"a"
18238 1: .byte 0xeb /* jmp <disp8> */
18239 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
18240 2:
18241 diff -urNp linux-2.6.32.41/arch/x86/lib/copy_user_64.S linux-2.6.32.41/arch/x86/lib/copy_user_64.S
18242 --- linux-2.6.32.41/arch/x86/lib/copy_user_64.S 2011-03-27 14:31:47.000000000 -0400
18243 +++ linux-2.6.32.41/arch/x86/lib/copy_user_64.S 2011-04-17 15:56:46.000000000 -0400
18244 @@ -15,13 +15,14 @@
18245 #include <asm/asm-offsets.h>
18246 #include <asm/thread_info.h>
18247 #include <asm/cpufeature.h>
18248 +#include <asm/pgtable.h>
18249
18250 .macro ALTERNATIVE_JUMP feature,orig,alt
18251 0:
18252 .byte 0xe9 /* 32bit jump */
18253 .long \orig-1f /* by default jump to orig */
18254 1:
18255 - .section .altinstr_replacement,"ax"
18256 + .section .altinstr_replacement,"a"
18257 2: .byte 0xe9 /* near jump with 32bit immediate */
18258 .long \alt-1b /* offset */ /* or alternatively to alt */
18259 .previous
18260 @@ -64,49 +65,19 @@
18261 #endif
18262 .endm
18263
18264 -/* Standard copy_to_user with segment limit checking */
18265 -ENTRY(copy_to_user)
18266 - CFI_STARTPROC
18267 - GET_THREAD_INFO(%rax)
18268 - movq %rdi,%rcx
18269 - addq %rdx,%rcx
18270 - jc bad_to_user
18271 - cmpq TI_addr_limit(%rax),%rcx
18272 - jae bad_to_user
18273 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
18274 - CFI_ENDPROC
18275 -ENDPROC(copy_to_user)
18276 -
18277 -/* Standard copy_from_user with segment limit checking */
18278 -ENTRY(copy_from_user)
18279 - CFI_STARTPROC
18280 - GET_THREAD_INFO(%rax)
18281 - movq %rsi,%rcx
18282 - addq %rdx,%rcx
18283 - jc bad_from_user
18284 - cmpq TI_addr_limit(%rax),%rcx
18285 - jae bad_from_user
18286 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
18287 - CFI_ENDPROC
18288 -ENDPROC(copy_from_user)
18289 -
18290 ENTRY(copy_user_generic)
18291 CFI_STARTPROC
18292 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
18293 CFI_ENDPROC
18294 ENDPROC(copy_user_generic)
18295
18296 -ENTRY(__copy_from_user_inatomic)
18297 - CFI_STARTPROC
18298 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
18299 - CFI_ENDPROC
18300 -ENDPROC(__copy_from_user_inatomic)
18301 -
18302 .section .fixup,"ax"
18303 /* must zero dest */
18304 ENTRY(bad_from_user)
18305 bad_from_user:
18306 CFI_STARTPROC
18307 + testl %edx,%edx
18308 + js bad_to_user
18309 movl %edx,%ecx
18310 xorl %eax,%eax
18311 rep
18312 diff -urNp linux-2.6.32.41/arch/x86/lib/copy_user_nocache_64.S linux-2.6.32.41/arch/x86/lib/copy_user_nocache_64.S
18313 --- linux-2.6.32.41/arch/x86/lib/copy_user_nocache_64.S 2011-03-27 14:31:47.000000000 -0400
18314 +++ linux-2.6.32.41/arch/x86/lib/copy_user_nocache_64.S 2011-04-17 15:56:46.000000000 -0400
18315 @@ -14,6 +14,7 @@
18316 #include <asm/current.h>
18317 #include <asm/asm-offsets.h>
18318 #include <asm/thread_info.h>
18319 +#include <asm/pgtable.h>
18320
18321 .macro ALIGN_DESTINATION
18322 #ifdef FIX_ALIGNMENT
18323 @@ -50,6 +51,15 @@
18324 */
18325 ENTRY(__copy_user_nocache)
18326 CFI_STARTPROC
18327 +
18328 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18329 + mov $PAX_USER_SHADOW_BASE,%rcx
18330 + cmp %rcx,%rsi
18331 + jae 1f
18332 + add %rcx,%rsi
18333 +1:
18334 +#endif
18335 +
18336 cmpl $8,%edx
18337 jb 20f /* less then 8 bytes, go to byte copy loop */
18338 ALIGN_DESTINATION
18339 diff -urNp linux-2.6.32.41/arch/x86/lib/csum-wrappers_64.c linux-2.6.32.41/arch/x86/lib/csum-wrappers_64.c
18340 --- linux-2.6.32.41/arch/x86/lib/csum-wrappers_64.c 2011-03-27 14:31:47.000000000 -0400
18341 +++ linux-2.6.32.41/arch/x86/lib/csum-wrappers_64.c 2011-05-04 17:56:20.000000000 -0400
18342 @@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
18343 len -= 2;
18344 }
18345 }
18346 +
18347 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18348 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
18349 + src += PAX_USER_SHADOW_BASE;
18350 +#endif
18351 +
18352 isum = csum_partial_copy_generic((__force const void *)src,
18353 dst, len, isum, errp, NULL);
18354 if (unlikely(*errp))
18355 @@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
18356 }
18357
18358 *errp = 0;
18359 +
18360 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18361 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
18362 + dst += PAX_USER_SHADOW_BASE;
18363 +#endif
18364 +
18365 return csum_partial_copy_generic(src, (void __force *)dst,
18366 len, isum, NULL, errp);
18367 }
18368 diff -urNp linux-2.6.32.41/arch/x86/lib/getuser.S linux-2.6.32.41/arch/x86/lib/getuser.S
18369 --- linux-2.6.32.41/arch/x86/lib/getuser.S 2011-03-27 14:31:47.000000000 -0400
18370 +++ linux-2.6.32.41/arch/x86/lib/getuser.S 2011-04-17 15:56:46.000000000 -0400
18371 @@ -33,14 +33,35 @@
18372 #include <asm/asm-offsets.h>
18373 #include <asm/thread_info.h>
18374 #include <asm/asm.h>
18375 +#include <asm/segment.h>
18376 +#include <asm/pgtable.h>
18377 +
18378 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18379 +#define __copyuser_seg gs;
18380 +#else
18381 +#define __copyuser_seg
18382 +#endif
18383
18384 .text
18385 ENTRY(__get_user_1)
18386 CFI_STARTPROC
18387 +
18388 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18389 GET_THREAD_INFO(%_ASM_DX)
18390 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18391 jae bad_get_user
18392 -1: movzb (%_ASM_AX),%edx
18393 +
18394 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18395 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18396 + cmp %_ASM_DX,%_ASM_AX
18397 + jae 1234f
18398 + add %_ASM_DX,%_ASM_AX
18399 +1234:
18400 +#endif
18401 +
18402 +#endif
18403 +
18404 +1: __copyuser_seg movzb (%_ASM_AX),%edx
18405 xor %eax,%eax
18406 ret
18407 CFI_ENDPROC
18408 @@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
18409 ENTRY(__get_user_2)
18410 CFI_STARTPROC
18411 add $1,%_ASM_AX
18412 +
18413 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18414 jc bad_get_user
18415 GET_THREAD_INFO(%_ASM_DX)
18416 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18417 jae bad_get_user
18418 -2: movzwl -1(%_ASM_AX),%edx
18419 +
18420 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18421 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18422 + cmp %_ASM_DX,%_ASM_AX
18423 + jae 1234f
18424 + add %_ASM_DX,%_ASM_AX
18425 +1234:
18426 +#endif
18427 +
18428 +#endif
18429 +
18430 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
18431 xor %eax,%eax
18432 ret
18433 CFI_ENDPROC
18434 @@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
18435 ENTRY(__get_user_4)
18436 CFI_STARTPROC
18437 add $3,%_ASM_AX
18438 +
18439 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18440 jc bad_get_user
18441 GET_THREAD_INFO(%_ASM_DX)
18442 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18443 jae bad_get_user
18444 -3: mov -3(%_ASM_AX),%edx
18445 +
18446 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18447 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18448 + cmp %_ASM_DX,%_ASM_AX
18449 + jae 1234f
18450 + add %_ASM_DX,%_ASM_AX
18451 +1234:
18452 +#endif
18453 +
18454 +#endif
18455 +
18456 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
18457 xor %eax,%eax
18458 ret
18459 CFI_ENDPROC
18460 @@ -80,6 +127,15 @@ ENTRY(__get_user_8)
18461 GET_THREAD_INFO(%_ASM_DX)
18462 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18463 jae bad_get_user
18464 +
18465 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18466 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18467 + cmp %_ASM_DX,%_ASM_AX
18468 + jae 1234f
18469 + add %_ASM_DX,%_ASM_AX
18470 +1234:
18471 +#endif
18472 +
18473 4: movq -7(%_ASM_AX),%_ASM_DX
18474 xor %eax,%eax
18475 ret
18476 diff -urNp linux-2.6.32.41/arch/x86/lib/memcpy_64.S linux-2.6.32.41/arch/x86/lib/memcpy_64.S
18477 --- linux-2.6.32.41/arch/x86/lib/memcpy_64.S 2011-03-27 14:31:47.000000000 -0400
18478 +++ linux-2.6.32.41/arch/x86/lib/memcpy_64.S 2011-04-17 15:56:46.000000000 -0400
18479 @@ -128,7 +128,7 @@ ENDPROC(__memcpy)
18480 * It is also a lot simpler. Use this when possible:
18481 */
18482
18483 - .section .altinstr_replacement, "ax"
18484 + .section .altinstr_replacement, "a"
18485 1: .byte 0xeb /* jmp <disp8> */
18486 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
18487 2:
18488 diff -urNp linux-2.6.32.41/arch/x86/lib/memset_64.S linux-2.6.32.41/arch/x86/lib/memset_64.S
18489 --- linux-2.6.32.41/arch/x86/lib/memset_64.S 2011-03-27 14:31:47.000000000 -0400
18490 +++ linux-2.6.32.41/arch/x86/lib/memset_64.S 2011-04-17 15:56:46.000000000 -0400
18491 @@ -118,7 +118,7 @@ ENDPROC(__memset)
18492
18493 #include <asm/cpufeature.h>
18494
18495 - .section .altinstr_replacement,"ax"
18496 + .section .altinstr_replacement,"a"
18497 1: .byte 0xeb /* jmp <disp8> */
18498 .byte (memset_c - memset) - (2f - 1b) /* offset */
18499 2:
18500 diff -urNp linux-2.6.32.41/arch/x86/lib/mmx_32.c linux-2.6.32.41/arch/x86/lib/mmx_32.c
18501 --- linux-2.6.32.41/arch/x86/lib/mmx_32.c 2011-03-27 14:31:47.000000000 -0400
18502 +++ linux-2.6.32.41/arch/x86/lib/mmx_32.c 2011-04-17 15:56:46.000000000 -0400
18503 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
18504 {
18505 void *p;
18506 int i;
18507 + unsigned long cr0;
18508
18509 if (unlikely(in_interrupt()))
18510 return __memcpy(to, from, len);
18511 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
18512 kernel_fpu_begin();
18513
18514 __asm__ __volatile__ (
18515 - "1: prefetch (%0)\n" /* This set is 28 bytes */
18516 - " prefetch 64(%0)\n"
18517 - " prefetch 128(%0)\n"
18518 - " prefetch 192(%0)\n"
18519 - " prefetch 256(%0)\n"
18520 + "1: prefetch (%1)\n" /* This set is 28 bytes */
18521 + " prefetch 64(%1)\n"
18522 + " prefetch 128(%1)\n"
18523 + " prefetch 192(%1)\n"
18524 + " prefetch 256(%1)\n"
18525 "2: \n"
18526 ".section .fixup, \"ax\"\n"
18527 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18528 + "3: \n"
18529 +
18530 +#ifdef CONFIG_PAX_KERNEXEC
18531 + " movl %%cr0, %0\n"
18532 + " movl %0, %%eax\n"
18533 + " andl $0xFFFEFFFF, %%eax\n"
18534 + " movl %%eax, %%cr0\n"
18535 +#endif
18536 +
18537 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18538 +
18539 +#ifdef CONFIG_PAX_KERNEXEC
18540 + " movl %0, %%cr0\n"
18541 +#endif
18542 +
18543 " jmp 2b\n"
18544 ".previous\n"
18545 _ASM_EXTABLE(1b, 3b)
18546 - : : "r" (from));
18547 + : "=&r" (cr0) : "r" (from) : "ax");
18548
18549 for ( ; i > 5; i--) {
18550 __asm__ __volatile__ (
18551 - "1: prefetch 320(%0)\n"
18552 - "2: movq (%0), %%mm0\n"
18553 - " movq 8(%0), %%mm1\n"
18554 - " movq 16(%0), %%mm2\n"
18555 - " movq 24(%0), %%mm3\n"
18556 - " movq %%mm0, (%1)\n"
18557 - " movq %%mm1, 8(%1)\n"
18558 - " movq %%mm2, 16(%1)\n"
18559 - " movq %%mm3, 24(%1)\n"
18560 - " movq 32(%0), %%mm0\n"
18561 - " movq 40(%0), %%mm1\n"
18562 - " movq 48(%0), %%mm2\n"
18563 - " movq 56(%0), %%mm3\n"
18564 - " movq %%mm0, 32(%1)\n"
18565 - " movq %%mm1, 40(%1)\n"
18566 - " movq %%mm2, 48(%1)\n"
18567 - " movq %%mm3, 56(%1)\n"
18568 + "1: prefetch 320(%1)\n"
18569 + "2: movq (%1), %%mm0\n"
18570 + " movq 8(%1), %%mm1\n"
18571 + " movq 16(%1), %%mm2\n"
18572 + " movq 24(%1), %%mm3\n"
18573 + " movq %%mm0, (%2)\n"
18574 + " movq %%mm1, 8(%2)\n"
18575 + " movq %%mm2, 16(%2)\n"
18576 + " movq %%mm3, 24(%2)\n"
18577 + " movq 32(%1), %%mm0\n"
18578 + " movq 40(%1), %%mm1\n"
18579 + " movq 48(%1), %%mm2\n"
18580 + " movq 56(%1), %%mm3\n"
18581 + " movq %%mm0, 32(%2)\n"
18582 + " movq %%mm1, 40(%2)\n"
18583 + " movq %%mm2, 48(%2)\n"
18584 + " movq %%mm3, 56(%2)\n"
18585 ".section .fixup, \"ax\"\n"
18586 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18587 + "3:\n"
18588 +
18589 +#ifdef CONFIG_PAX_KERNEXEC
18590 + " movl %%cr0, %0\n"
18591 + " movl %0, %%eax\n"
18592 + " andl $0xFFFEFFFF, %%eax\n"
18593 + " movl %%eax, %%cr0\n"
18594 +#endif
18595 +
18596 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18597 +
18598 +#ifdef CONFIG_PAX_KERNEXEC
18599 + " movl %0, %%cr0\n"
18600 +#endif
18601 +
18602 " jmp 2b\n"
18603 ".previous\n"
18604 _ASM_EXTABLE(1b, 3b)
18605 - : : "r" (from), "r" (to) : "memory");
18606 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18607
18608 from += 64;
18609 to += 64;
18610 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
18611 static void fast_copy_page(void *to, void *from)
18612 {
18613 int i;
18614 + unsigned long cr0;
18615
18616 kernel_fpu_begin();
18617
18618 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
18619 * but that is for later. -AV
18620 */
18621 __asm__ __volatile__(
18622 - "1: prefetch (%0)\n"
18623 - " prefetch 64(%0)\n"
18624 - " prefetch 128(%0)\n"
18625 - " prefetch 192(%0)\n"
18626 - " prefetch 256(%0)\n"
18627 + "1: prefetch (%1)\n"
18628 + " prefetch 64(%1)\n"
18629 + " prefetch 128(%1)\n"
18630 + " prefetch 192(%1)\n"
18631 + " prefetch 256(%1)\n"
18632 "2: \n"
18633 ".section .fixup, \"ax\"\n"
18634 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18635 + "3: \n"
18636 +
18637 +#ifdef CONFIG_PAX_KERNEXEC
18638 + " movl %%cr0, %0\n"
18639 + " movl %0, %%eax\n"
18640 + " andl $0xFFFEFFFF, %%eax\n"
18641 + " movl %%eax, %%cr0\n"
18642 +#endif
18643 +
18644 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18645 +
18646 +#ifdef CONFIG_PAX_KERNEXEC
18647 + " movl %0, %%cr0\n"
18648 +#endif
18649 +
18650 " jmp 2b\n"
18651 ".previous\n"
18652 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
18653 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
18654
18655 for (i = 0; i < (4096-320)/64; i++) {
18656 __asm__ __volatile__ (
18657 - "1: prefetch 320(%0)\n"
18658 - "2: movq (%0), %%mm0\n"
18659 - " movntq %%mm0, (%1)\n"
18660 - " movq 8(%0), %%mm1\n"
18661 - " movntq %%mm1, 8(%1)\n"
18662 - " movq 16(%0), %%mm2\n"
18663 - " movntq %%mm2, 16(%1)\n"
18664 - " movq 24(%0), %%mm3\n"
18665 - " movntq %%mm3, 24(%1)\n"
18666 - " movq 32(%0), %%mm4\n"
18667 - " movntq %%mm4, 32(%1)\n"
18668 - " movq 40(%0), %%mm5\n"
18669 - " movntq %%mm5, 40(%1)\n"
18670 - " movq 48(%0), %%mm6\n"
18671 - " movntq %%mm6, 48(%1)\n"
18672 - " movq 56(%0), %%mm7\n"
18673 - " movntq %%mm7, 56(%1)\n"
18674 + "1: prefetch 320(%1)\n"
18675 + "2: movq (%1), %%mm0\n"
18676 + " movntq %%mm0, (%2)\n"
18677 + " movq 8(%1), %%mm1\n"
18678 + " movntq %%mm1, 8(%2)\n"
18679 + " movq 16(%1), %%mm2\n"
18680 + " movntq %%mm2, 16(%2)\n"
18681 + " movq 24(%1), %%mm3\n"
18682 + " movntq %%mm3, 24(%2)\n"
18683 + " movq 32(%1), %%mm4\n"
18684 + " movntq %%mm4, 32(%2)\n"
18685 + " movq 40(%1), %%mm5\n"
18686 + " movntq %%mm5, 40(%2)\n"
18687 + " movq 48(%1), %%mm6\n"
18688 + " movntq %%mm6, 48(%2)\n"
18689 + " movq 56(%1), %%mm7\n"
18690 + " movntq %%mm7, 56(%2)\n"
18691 ".section .fixup, \"ax\"\n"
18692 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18693 + "3:\n"
18694 +
18695 +#ifdef CONFIG_PAX_KERNEXEC
18696 + " movl %%cr0, %0\n"
18697 + " movl %0, %%eax\n"
18698 + " andl $0xFFFEFFFF, %%eax\n"
18699 + " movl %%eax, %%cr0\n"
18700 +#endif
18701 +
18702 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18703 +
18704 +#ifdef CONFIG_PAX_KERNEXEC
18705 + " movl %0, %%cr0\n"
18706 +#endif
18707 +
18708 " jmp 2b\n"
18709 ".previous\n"
18710 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
18711 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18712
18713 from += 64;
18714 to += 64;
18715 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
18716 static void fast_copy_page(void *to, void *from)
18717 {
18718 int i;
18719 + unsigned long cr0;
18720
18721 kernel_fpu_begin();
18722
18723 __asm__ __volatile__ (
18724 - "1: prefetch (%0)\n"
18725 - " prefetch 64(%0)\n"
18726 - " prefetch 128(%0)\n"
18727 - " prefetch 192(%0)\n"
18728 - " prefetch 256(%0)\n"
18729 + "1: prefetch (%1)\n"
18730 + " prefetch 64(%1)\n"
18731 + " prefetch 128(%1)\n"
18732 + " prefetch 192(%1)\n"
18733 + " prefetch 256(%1)\n"
18734 "2: \n"
18735 ".section .fixup, \"ax\"\n"
18736 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18737 + "3: \n"
18738 +
18739 +#ifdef CONFIG_PAX_KERNEXEC
18740 + " movl %%cr0, %0\n"
18741 + " movl %0, %%eax\n"
18742 + " andl $0xFFFEFFFF, %%eax\n"
18743 + " movl %%eax, %%cr0\n"
18744 +#endif
18745 +
18746 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18747 +
18748 +#ifdef CONFIG_PAX_KERNEXEC
18749 + " movl %0, %%cr0\n"
18750 +#endif
18751 +
18752 " jmp 2b\n"
18753 ".previous\n"
18754 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
18755 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
18756
18757 for (i = 0; i < 4096/64; i++) {
18758 __asm__ __volatile__ (
18759 - "1: prefetch 320(%0)\n"
18760 - "2: movq (%0), %%mm0\n"
18761 - " movq 8(%0), %%mm1\n"
18762 - " movq 16(%0), %%mm2\n"
18763 - " movq 24(%0), %%mm3\n"
18764 - " movq %%mm0, (%1)\n"
18765 - " movq %%mm1, 8(%1)\n"
18766 - " movq %%mm2, 16(%1)\n"
18767 - " movq %%mm3, 24(%1)\n"
18768 - " movq 32(%0), %%mm0\n"
18769 - " movq 40(%0), %%mm1\n"
18770 - " movq 48(%0), %%mm2\n"
18771 - " movq 56(%0), %%mm3\n"
18772 - " movq %%mm0, 32(%1)\n"
18773 - " movq %%mm1, 40(%1)\n"
18774 - " movq %%mm2, 48(%1)\n"
18775 - " movq %%mm3, 56(%1)\n"
18776 + "1: prefetch 320(%1)\n"
18777 + "2: movq (%1), %%mm0\n"
18778 + " movq 8(%1), %%mm1\n"
18779 + " movq 16(%1), %%mm2\n"
18780 + " movq 24(%1), %%mm3\n"
18781 + " movq %%mm0, (%2)\n"
18782 + " movq %%mm1, 8(%2)\n"
18783 + " movq %%mm2, 16(%2)\n"
18784 + " movq %%mm3, 24(%2)\n"
18785 + " movq 32(%1), %%mm0\n"
18786 + " movq 40(%1), %%mm1\n"
18787 + " movq 48(%1), %%mm2\n"
18788 + " movq 56(%1), %%mm3\n"
18789 + " movq %%mm0, 32(%2)\n"
18790 + " movq %%mm1, 40(%2)\n"
18791 + " movq %%mm2, 48(%2)\n"
18792 + " movq %%mm3, 56(%2)\n"
18793 ".section .fixup, \"ax\"\n"
18794 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18795 + "3:\n"
18796 +
18797 +#ifdef CONFIG_PAX_KERNEXEC
18798 + " movl %%cr0, %0\n"
18799 + " movl %0, %%eax\n"
18800 + " andl $0xFFFEFFFF, %%eax\n"
18801 + " movl %%eax, %%cr0\n"
18802 +#endif
18803 +
18804 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18805 +
18806 +#ifdef CONFIG_PAX_KERNEXEC
18807 + " movl %0, %%cr0\n"
18808 +#endif
18809 +
18810 " jmp 2b\n"
18811 ".previous\n"
18812 _ASM_EXTABLE(1b, 3b)
18813 - : : "r" (from), "r" (to) : "memory");
18814 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18815
18816 from += 64;
18817 to += 64;
18818 diff -urNp linux-2.6.32.41/arch/x86/lib/putuser.S linux-2.6.32.41/arch/x86/lib/putuser.S
18819 --- linux-2.6.32.41/arch/x86/lib/putuser.S 2011-03-27 14:31:47.000000000 -0400
18820 +++ linux-2.6.32.41/arch/x86/lib/putuser.S 2011-04-17 15:56:46.000000000 -0400
18821 @@ -15,7 +15,8 @@
18822 #include <asm/thread_info.h>
18823 #include <asm/errno.h>
18824 #include <asm/asm.h>
18825 -
18826 +#include <asm/segment.h>
18827 +#include <asm/pgtable.h>
18828
18829 /*
18830 * __put_user_X
18831 @@ -29,52 +30,119 @@
18832 * as they get called from within inline assembly.
18833 */
18834
18835 -#define ENTER CFI_STARTPROC ; \
18836 - GET_THREAD_INFO(%_ASM_BX)
18837 +#define ENTER CFI_STARTPROC
18838 #define EXIT ret ; \
18839 CFI_ENDPROC
18840
18841 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18842 +#define _DEST %_ASM_CX,%_ASM_BX
18843 +#else
18844 +#define _DEST %_ASM_CX
18845 +#endif
18846 +
18847 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18848 +#define __copyuser_seg gs;
18849 +#else
18850 +#define __copyuser_seg
18851 +#endif
18852 +
18853 .text
18854 ENTRY(__put_user_1)
18855 ENTER
18856 +
18857 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18858 + GET_THREAD_INFO(%_ASM_BX)
18859 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
18860 jae bad_put_user
18861 -1: movb %al,(%_ASM_CX)
18862 +
18863 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18864 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18865 + cmp %_ASM_BX,%_ASM_CX
18866 + jb 1234f
18867 + xor %ebx,%ebx
18868 +1234:
18869 +#endif
18870 +
18871 +#endif
18872 +
18873 +1: __copyuser_seg movb %al,(_DEST)
18874 xor %eax,%eax
18875 EXIT
18876 ENDPROC(__put_user_1)
18877
18878 ENTRY(__put_user_2)
18879 ENTER
18880 +
18881 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18882 + GET_THREAD_INFO(%_ASM_BX)
18883 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18884 sub $1,%_ASM_BX
18885 cmp %_ASM_BX,%_ASM_CX
18886 jae bad_put_user
18887 -2: movw %ax,(%_ASM_CX)
18888 +
18889 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18890 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18891 + cmp %_ASM_BX,%_ASM_CX
18892 + jb 1234f
18893 + xor %ebx,%ebx
18894 +1234:
18895 +#endif
18896 +
18897 +#endif
18898 +
18899 +2: __copyuser_seg movw %ax,(_DEST)
18900 xor %eax,%eax
18901 EXIT
18902 ENDPROC(__put_user_2)
18903
18904 ENTRY(__put_user_4)
18905 ENTER
18906 +
18907 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18908 + GET_THREAD_INFO(%_ASM_BX)
18909 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18910 sub $3,%_ASM_BX
18911 cmp %_ASM_BX,%_ASM_CX
18912 jae bad_put_user
18913 -3: movl %eax,(%_ASM_CX)
18914 +
18915 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18916 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18917 + cmp %_ASM_BX,%_ASM_CX
18918 + jb 1234f
18919 + xor %ebx,%ebx
18920 +1234:
18921 +#endif
18922 +
18923 +#endif
18924 +
18925 +3: __copyuser_seg movl %eax,(_DEST)
18926 xor %eax,%eax
18927 EXIT
18928 ENDPROC(__put_user_4)
18929
18930 ENTRY(__put_user_8)
18931 ENTER
18932 +
18933 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18934 + GET_THREAD_INFO(%_ASM_BX)
18935 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18936 sub $7,%_ASM_BX
18937 cmp %_ASM_BX,%_ASM_CX
18938 jae bad_put_user
18939 -4: mov %_ASM_AX,(%_ASM_CX)
18940 +
18941 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18942 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18943 + cmp %_ASM_BX,%_ASM_CX
18944 + jb 1234f
18945 + xor %ebx,%ebx
18946 +1234:
18947 +#endif
18948 +
18949 +#endif
18950 +
18951 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
18952 #ifdef CONFIG_X86_32
18953 -5: movl %edx,4(%_ASM_CX)
18954 +5: __copyuser_seg movl %edx,4(_DEST)
18955 #endif
18956 xor %eax,%eax
18957 EXIT
18958 diff -urNp linux-2.6.32.41/arch/x86/lib/usercopy_32.c linux-2.6.32.41/arch/x86/lib/usercopy_32.c
18959 --- linux-2.6.32.41/arch/x86/lib/usercopy_32.c 2011-03-27 14:31:47.000000000 -0400
18960 +++ linux-2.6.32.41/arch/x86/lib/usercopy_32.c 2011-04-23 21:12:28.000000000 -0400
18961 @@ -43,7 +43,7 @@ do { \
18962 __asm__ __volatile__( \
18963 " testl %1,%1\n" \
18964 " jz 2f\n" \
18965 - "0: lodsb\n" \
18966 + "0: "__copyuser_seg"lodsb\n" \
18967 " stosb\n" \
18968 " testb %%al,%%al\n" \
18969 " jz 1f\n" \
18970 @@ -128,10 +128,12 @@ do { \
18971 int __d0; \
18972 might_fault(); \
18973 __asm__ __volatile__( \
18974 + __COPYUSER_SET_ES \
18975 "0: rep; stosl\n" \
18976 " movl %2,%0\n" \
18977 "1: rep; stosb\n" \
18978 "2:\n" \
18979 + __COPYUSER_RESTORE_ES \
18980 ".section .fixup,\"ax\"\n" \
18981 "3: lea 0(%2,%0,4),%0\n" \
18982 " jmp 2b\n" \
18983 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
18984 might_fault();
18985
18986 __asm__ __volatile__(
18987 + __COPYUSER_SET_ES
18988 " testl %0, %0\n"
18989 " jz 3f\n"
18990 " andl %0,%%ecx\n"
18991 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
18992 " subl %%ecx,%0\n"
18993 " addl %0,%%eax\n"
18994 "1:\n"
18995 + __COPYUSER_RESTORE_ES
18996 ".section .fixup,\"ax\"\n"
18997 "2: xorl %%eax,%%eax\n"
18998 " jmp 1b\n"
18999 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
19000
19001 #ifdef CONFIG_X86_INTEL_USERCOPY
19002 static unsigned long
19003 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
19004 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
19005 {
19006 int d0, d1;
19007 __asm__ __volatile__(
19008 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
19009 " .align 2,0x90\n"
19010 "3: movl 0(%4), %%eax\n"
19011 "4: movl 4(%4), %%edx\n"
19012 - "5: movl %%eax, 0(%3)\n"
19013 - "6: movl %%edx, 4(%3)\n"
19014 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
19015 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
19016 "7: movl 8(%4), %%eax\n"
19017 "8: movl 12(%4),%%edx\n"
19018 - "9: movl %%eax, 8(%3)\n"
19019 - "10: movl %%edx, 12(%3)\n"
19020 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
19021 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
19022 "11: movl 16(%4), %%eax\n"
19023 "12: movl 20(%4), %%edx\n"
19024 - "13: movl %%eax, 16(%3)\n"
19025 - "14: movl %%edx, 20(%3)\n"
19026 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
19027 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
19028 "15: movl 24(%4), %%eax\n"
19029 "16: movl 28(%4), %%edx\n"
19030 - "17: movl %%eax, 24(%3)\n"
19031 - "18: movl %%edx, 28(%3)\n"
19032 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
19033 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
19034 "19: movl 32(%4), %%eax\n"
19035 "20: movl 36(%4), %%edx\n"
19036 - "21: movl %%eax, 32(%3)\n"
19037 - "22: movl %%edx, 36(%3)\n"
19038 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
19039 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
19040 "23: movl 40(%4), %%eax\n"
19041 "24: movl 44(%4), %%edx\n"
19042 - "25: movl %%eax, 40(%3)\n"
19043 - "26: movl %%edx, 44(%3)\n"
19044 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
19045 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
19046 "27: movl 48(%4), %%eax\n"
19047 "28: movl 52(%4), %%edx\n"
19048 - "29: movl %%eax, 48(%3)\n"
19049 - "30: movl %%edx, 52(%3)\n"
19050 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
19051 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
19052 "31: movl 56(%4), %%eax\n"
19053 "32: movl 60(%4), %%edx\n"
19054 - "33: movl %%eax, 56(%3)\n"
19055 - "34: movl %%edx, 60(%3)\n"
19056 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
19057 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
19058 " addl $-64, %0\n"
19059 " addl $64, %4\n"
19060 " addl $64, %3\n"
19061 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
19062 " shrl $2, %0\n"
19063 " andl $3, %%eax\n"
19064 " cld\n"
19065 + __COPYUSER_SET_ES
19066 "99: rep; movsl\n"
19067 "36: movl %%eax, %0\n"
19068 "37: rep; movsb\n"
19069 "100:\n"
19070 + __COPYUSER_RESTORE_ES
19071 + ".section .fixup,\"ax\"\n"
19072 + "101: lea 0(%%eax,%0,4),%0\n"
19073 + " jmp 100b\n"
19074 + ".previous\n"
19075 + ".section __ex_table,\"a\"\n"
19076 + " .align 4\n"
19077 + " .long 1b,100b\n"
19078 + " .long 2b,100b\n"
19079 + " .long 3b,100b\n"
19080 + " .long 4b,100b\n"
19081 + " .long 5b,100b\n"
19082 + " .long 6b,100b\n"
19083 + " .long 7b,100b\n"
19084 + " .long 8b,100b\n"
19085 + " .long 9b,100b\n"
19086 + " .long 10b,100b\n"
19087 + " .long 11b,100b\n"
19088 + " .long 12b,100b\n"
19089 + " .long 13b,100b\n"
19090 + " .long 14b,100b\n"
19091 + " .long 15b,100b\n"
19092 + " .long 16b,100b\n"
19093 + " .long 17b,100b\n"
19094 + " .long 18b,100b\n"
19095 + " .long 19b,100b\n"
19096 + " .long 20b,100b\n"
19097 + " .long 21b,100b\n"
19098 + " .long 22b,100b\n"
19099 + " .long 23b,100b\n"
19100 + " .long 24b,100b\n"
19101 + " .long 25b,100b\n"
19102 + " .long 26b,100b\n"
19103 + " .long 27b,100b\n"
19104 + " .long 28b,100b\n"
19105 + " .long 29b,100b\n"
19106 + " .long 30b,100b\n"
19107 + " .long 31b,100b\n"
19108 + " .long 32b,100b\n"
19109 + " .long 33b,100b\n"
19110 + " .long 34b,100b\n"
19111 + " .long 35b,100b\n"
19112 + " .long 36b,100b\n"
19113 + " .long 37b,100b\n"
19114 + " .long 99b,101b\n"
19115 + ".previous"
19116 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
19117 + : "1"(to), "2"(from), "0"(size)
19118 + : "eax", "edx", "memory");
19119 + return size;
19120 +}
19121 +
19122 +static unsigned long
19123 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
19124 +{
19125 + int d0, d1;
19126 + __asm__ __volatile__(
19127 + " .align 2,0x90\n"
19128 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
19129 + " cmpl $67, %0\n"
19130 + " jbe 3f\n"
19131 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
19132 + " .align 2,0x90\n"
19133 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
19134 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
19135 + "5: movl %%eax, 0(%3)\n"
19136 + "6: movl %%edx, 4(%3)\n"
19137 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
19138 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
19139 + "9: movl %%eax, 8(%3)\n"
19140 + "10: movl %%edx, 12(%3)\n"
19141 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
19142 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
19143 + "13: movl %%eax, 16(%3)\n"
19144 + "14: movl %%edx, 20(%3)\n"
19145 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
19146 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
19147 + "17: movl %%eax, 24(%3)\n"
19148 + "18: movl %%edx, 28(%3)\n"
19149 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
19150 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
19151 + "21: movl %%eax, 32(%3)\n"
19152 + "22: movl %%edx, 36(%3)\n"
19153 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
19154 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
19155 + "25: movl %%eax, 40(%3)\n"
19156 + "26: movl %%edx, 44(%3)\n"
19157 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
19158 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
19159 + "29: movl %%eax, 48(%3)\n"
19160 + "30: movl %%edx, 52(%3)\n"
19161 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
19162 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
19163 + "33: movl %%eax, 56(%3)\n"
19164 + "34: movl %%edx, 60(%3)\n"
19165 + " addl $-64, %0\n"
19166 + " addl $64, %4\n"
19167 + " addl $64, %3\n"
19168 + " cmpl $63, %0\n"
19169 + " ja 1b\n"
19170 + "35: movl %0, %%eax\n"
19171 + " shrl $2, %0\n"
19172 + " andl $3, %%eax\n"
19173 + " cld\n"
19174 + "99: rep; "__copyuser_seg" movsl\n"
19175 + "36: movl %%eax, %0\n"
19176 + "37: rep; "__copyuser_seg" movsb\n"
19177 + "100:\n"
19178 ".section .fixup,\"ax\"\n"
19179 "101: lea 0(%%eax,%0,4),%0\n"
19180 " jmp 100b\n"
19181 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
19182 int d0, d1;
19183 __asm__ __volatile__(
19184 " .align 2,0x90\n"
19185 - "0: movl 32(%4), %%eax\n"
19186 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19187 " cmpl $67, %0\n"
19188 " jbe 2f\n"
19189 - "1: movl 64(%4), %%eax\n"
19190 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19191 " .align 2,0x90\n"
19192 - "2: movl 0(%4), %%eax\n"
19193 - "21: movl 4(%4), %%edx\n"
19194 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19195 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19196 " movl %%eax, 0(%3)\n"
19197 " movl %%edx, 4(%3)\n"
19198 - "3: movl 8(%4), %%eax\n"
19199 - "31: movl 12(%4),%%edx\n"
19200 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19201 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19202 " movl %%eax, 8(%3)\n"
19203 " movl %%edx, 12(%3)\n"
19204 - "4: movl 16(%4), %%eax\n"
19205 - "41: movl 20(%4), %%edx\n"
19206 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19207 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19208 " movl %%eax, 16(%3)\n"
19209 " movl %%edx, 20(%3)\n"
19210 - "10: movl 24(%4), %%eax\n"
19211 - "51: movl 28(%4), %%edx\n"
19212 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19213 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19214 " movl %%eax, 24(%3)\n"
19215 " movl %%edx, 28(%3)\n"
19216 - "11: movl 32(%4), %%eax\n"
19217 - "61: movl 36(%4), %%edx\n"
19218 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19219 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19220 " movl %%eax, 32(%3)\n"
19221 " movl %%edx, 36(%3)\n"
19222 - "12: movl 40(%4), %%eax\n"
19223 - "71: movl 44(%4), %%edx\n"
19224 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19225 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19226 " movl %%eax, 40(%3)\n"
19227 " movl %%edx, 44(%3)\n"
19228 - "13: movl 48(%4), %%eax\n"
19229 - "81: movl 52(%4), %%edx\n"
19230 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19231 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19232 " movl %%eax, 48(%3)\n"
19233 " movl %%edx, 52(%3)\n"
19234 - "14: movl 56(%4), %%eax\n"
19235 - "91: movl 60(%4), %%edx\n"
19236 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19237 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19238 " movl %%eax, 56(%3)\n"
19239 " movl %%edx, 60(%3)\n"
19240 " addl $-64, %0\n"
19241 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
19242 " shrl $2, %0\n"
19243 " andl $3, %%eax\n"
19244 " cld\n"
19245 - "6: rep; movsl\n"
19246 + "6: rep; "__copyuser_seg" movsl\n"
19247 " movl %%eax,%0\n"
19248 - "7: rep; movsb\n"
19249 + "7: rep; "__copyuser_seg" movsb\n"
19250 "8:\n"
19251 ".section .fixup,\"ax\"\n"
19252 "9: lea 0(%%eax,%0,4),%0\n"
19253 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
19254
19255 __asm__ __volatile__(
19256 " .align 2,0x90\n"
19257 - "0: movl 32(%4), %%eax\n"
19258 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19259 " cmpl $67, %0\n"
19260 " jbe 2f\n"
19261 - "1: movl 64(%4), %%eax\n"
19262 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19263 " .align 2,0x90\n"
19264 - "2: movl 0(%4), %%eax\n"
19265 - "21: movl 4(%4), %%edx\n"
19266 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19267 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19268 " movnti %%eax, 0(%3)\n"
19269 " movnti %%edx, 4(%3)\n"
19270 - "3: movl 8(%4), %%eax\n"
19271 - "31: movl 12(%4),%%edx\n"
19272 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19273 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19274 " movnti %%eax, 8(%3)\n"
19275 " movnti %%edx, 12(%3)\n"
19276 - "4: movl 16(%4), %%eax\n"
19277 - "41: movl 20(%4), %%edx\n"
19278 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19279 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19280 " movnti %%eax, 16(%3)\n"
19281 " movnti %%edx, 20(%3)\n"
19282 - "10: movl 24(%4), %%eax\n"
19283 - "51: movl 28(%4), %%edx\n"
19284 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19285 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19286 " movnti %%eax, 24(%3)\n"
19287 " movnti %%edx, 28(%3)\n"
19288 - "11: movl 32(%4), %%eax\n"
19289 - "61: movl 36(%4), %%edx\n"
19290 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19291 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19292 " movnti %%eax, 32(%3)\n"
19293 " movnti %%edx, 36(%3)\n"
19294 - "12: movl 40(%4), %%eax\n"
19295 - "71: movl 44(%4), %%edx\n"
19296 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19297 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19298 " movnti %%eax, 40(%3)\n"
19299 " movnti %%edx, 44(%3)\n"
19300 - "13: movl 48(%4), %%eax\n"
19301 - "81: movl 52(%4), %%edx\n"
19302 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19303 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19304 " movnti %%eax, 48(%3)\n"
19305 " movnti %%edx, 52(%3)\n"
19306 - "14: movl 56(%4), %%eax\n"
19307 - "91: movl 60(%4), %%edx\n"
19308 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19309 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19310 " movnti %%eax, 56(%3)\n"
19311 " movnti %%edx, 60(%3)\n"
19312 " addl $-64, %0\n"
19313 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
19314 " shrl $2, %0\n"
19315 " andl $3, %%eax\n"
19316 " cld\n"
19317 - "6: rep; movsl\n"
19318 + "6: rep; "__copyuser_seg" movsl\n"
19319 " movl %%eax,%0\n"
19320 - "7: rep; movsb\n"
19321 + "7: rep; "__copyuser_seg" movsb\n"
19322 "8:\n"
19323 ".section .fixup,\"ax\"\n"
19324 "9: lea 0(%%eax,%0,4),%0\n"
19325 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
19326
19327 __asm__ __volatile__(
19328 " .align 2,0x90\n"
19329 - "0: movl 32(%4), %%eax\n"
19330 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19331 " cmpl $67, %0\n"
19332 " jbe 2f\n"
19333 - "1: movl 64(%4), %%eax\n"
19334 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19335 " .align 2,0x90\n"
19336 - "2: movl 0(%4), %%eax\n"
19337 - "21: movl 4(%4), %%edx\n"
19338 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19339 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19340 " movnti %%eax, 0(%3)\n"
19341 " movnti %%edx, 4(%3)\n"
19342 - "3: movl 8(%4), %%eax\n"
19343 - "31: movl 12(%4),%%edx\n"
19344 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19345 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19346 " movnti %%eax, 8(%3)\n"
19347 " movnti %%edx, 12(%3)\n"
19348 - "4: movl 16(%4), %%eax\n"
19349 - "41: movl 20(%4), %%edx\n"
19350 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19351 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19352 " movnti %%eax, 16(%3)\n"
19353 " movnti %%edx, 20(%3)\n"
19354 - "10: movl 24(%4), %%eax\n"
19355 - "51: movl 28(%4), %%edx\n"
19356 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19357 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19358 " movnti %%eax, 24(%3)\n"
19359 " movnti %%edx, 28(%3)\n"
19360 - "11: movl 32(%4), %%eax\n"
19361 - "61: movl 36(%4), %%edx\n"
19362 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19363 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19364 " movnti %%eax, 32(%3)\n"
19365 " movnti %%edx, 36(%3)\n"
19366 - "12: movl 40(%4), %%eax\n"
19367 - "71: movl 44(%4), %%edx\n"
19368 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19369 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19370 " movnti %%eax, 40(%3)\n"
19371 " movnti %%edx, 44(%3)\n"
19372 - "13: movl 48(%4), %%eax\n"
19373 - "81: movl 52(%4), %%edx\n"
19374 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19375 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19376 " movnti %%eax, 48(%3)\n"
19377 " movnti %%edx, 52(%3)\n"
19378 - "14: movl 56(%4), %%eax\n"
19379 - "91: movl 60(%4), %%edx\n"
19380 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19381 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19382 " movnti %%eax, 56(%3)\n"
19383 " movnti %%edx, 60(%3)\n"
19384 " addl $-64, %0\n"
19385 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
19386 " shrl $2, %0\n"
19387 " andl $3, %%eax\n"
19388 " cld\n"
19389 - "6: rep; movsl\n"
19390 + "6: rep; "__copyuser_seg" movsl\n"
19391 " movl %%eax,%0\n"
19392 - "7: rep; movsb\n"
19393 + "7: rep; "__copyuser_seg" movsb\n"
19394 "8:\n"
19395 ".section .fixup,\"ax\"\n"
19396 "9: lea 0(%%eax,%0,4),%0\n"
19397 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
19398 */
19399 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
19400 unsigned long size);
19401 -unsigned long __copy_user_intel(void __user *to, const void *from,
19402 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
19403 + unsigned long size);
19404 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
19405 unsigned long size);
19406 unsigned long __copy_user_zeroing_intel_nocache(void *to,
19407 const void __user *from, unsigned long size);
19408 #endif /* CONFIG_X86_INTEL_USERCOPY */
19409
19410 /* Generic arbitrary sized copy. */
19411 -#define __copy_user(to, from, size) \
19412 +#define __copy_user(to, from, size, prefix, set, restore) \
19413 do { \
19414 int __d0, __d1, __d2; \
19415 __asm__ __volatile__( \
19416 + set \
19417 " cmp $7,%0\n" \
19418 " jbe 1f\n" \
19419 " movl %1,%0\n" \
19420 " negl %0\n" \
19421 " andl $7,%0\n" \
19422 " subl %0,%3\n" \
19423 - "4: rep; movsb\n" \
19424 + "4: rep; "prefix"movsb\n" \
19425 " movl %3,%0\n" \
19426 " shrl $2,%0\n" \
19427 " andl $3,%3\n" \
19428 " .align 2,0x90\n" \
19429 - "0: rep; movsl\n" \
19430 + "0: rep; "prefix"movsl\n" \
19431 " movl %3,%0\n" \
19432 - "1: rep; movsb\n" \
19433 + "1: rep; "prefix"movsb\n" \
19434 "2:\n" \
19435 + restore \
19436 ".section .fixup,\"ax\"\n" \
19437 "5: addl %3,%0\n" \
19438 " jmp 2b\n" \
19439 @@ -682,14 +799,14 @@ do { \
19440 " negl %0\n" \
19441 " andl $7,%0\n" \
19442 " subl %0,%3\n" \
19443 - "4: rep; movsb\n" \
19444 + "4: rep; "__copyuser_seg"movsb\n" \
19445 " movl %3,%0\n" \
19446 " shrl $2,%0\n" \
19447 " andl $3,%3\n" \
19448 " .align 2,0x90\n" \
19449 - "0: rep; movsl\n" \
19450 + "0: rep; "__copyuser_seg"movsl\n" \
19451 " movl %3,%0\n" \
19452 - "1: rep; movsb\n" \
19453 + "1: rep; "__copyuser_seg"movsb\n" \
19454 "2:\n" \
19455 ".section .fixup,\"ax\"\n" \
19456 "5: addl %3,%0\n" \
19457 @@ -775,9 +892,9 @@ survive:
19458 }
19459 #endif
19460 if (movsl_is_ok(to, from, n))
19461 - __copy_user(to, from, n);
19462 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
19463 else
19464 - n = __copy_user_intel(to, from, n);
19465 + n = __generic_copy_to_user_intel(to, from, n);
19466 return n;
19467 }
19468 EXPORT_SYMBOL(__copy_to_user_ll);
19469 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
19470 unsigned long n)
19471 {
19472 if (movsl_is_ok(to, from, n))
19473 - __copy_user(to, from, n);
19474 + __copy_user(to, from, n, __copyuser_seg, "", "");
19475 else
19476 - n = __copy_user_intel((void __user *)to,
19477 - (const void *)from, n);
19478 + n = __generic_copy_from_user_intel(to, from, n);
19479 return n;
19480 }
19481 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
19482 @@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocach
19483 if (n > 64 && cpu_has_xmm2)
19484 n = __copy_user_intel_nocache(to, from, n);
19485 else
19486 - __copy_user(to, from, n);
19487 + __copy_user(to, from, n, __copyuser_seg, "", "");
19488 #else
19489 - __copy_user(to, from, n);
19490 + __copy_user(to, from, n, __copyuser_seg, "", "");
19491 #endif
19492 return n;
19493 }
19494 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
19495
19496 -/**
19497 - * copy_to_user: - Copy a block of data into user space.
19498 - * @to: Destination address, in user space.
19499 - * @from: Source address, in kernel space.
19500 - * @n: Number of bytes to copy.
19501 - *
19502 - * Context: User context only. This function may sleep.
19503 - *
19504 - * Copy data from kernel space to user space.
19505 - *
19506 - * Returns number of bytes that could not be copied.
19507 - * On success, this will be zero.
19508 - */
19509 -unsigned long
19510 -copy_to_user(void __user *to, const void *from, unsigned long n)
19511 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19512 +void __set_fs(mm_segment_t x)
19513 {
19514 - if (access_ok(VERIFY_WRITE, to, n))
19515 - n = __copy_to_user(to, from, n);
19516 - return n;
19517 + switch (x.seg) {
19518 + case 0:
19519 + loadsegment(gs, 0);
19520 + break;
19521 + case TASK_SIZE_MAX:
19522 + loadsegment(gs, __USER_DS);
19523 + break;
19524 + case -1UL:
19525 + loadsegment(gs, __KERNEL_DS);
19526 + break;
19527 + default:
19528 + BUG();
19529 + }
19530 + return;
19531 }
19532 -EXPORT_SYMBOL(copy_to_user);
19533 +EXPORT_SYMBOL(__set_fs);
19534
19535 -/**
19536 - * copy_from_user: - Copy a block of data from user space.
19537 - * @to: Destination address, in kernel space.
19538 - * @from: Source address, in user space.
19539 - * @n: Number of bytes to copy.
19540 - *
19541 - * Context: User context only. This function may sleep.
19542 - *
19543 - * Copy data from user space to kernel space.
19544 - *
19545 - * Returns number of bytes that could not be copied.
19546 - * On success, this will be zero.
19547 - *
19548 - * If some data could not be copied, this function will pad the copied
19549 - * data to the requested size using zero bytes.
19550 - */
19551 -unsigned long
19552 -copy_from_user(void *to, const void __user *from, unsigned long n)
19553 +void set_fs(mm_segment_t x)
19554 {
19555 - if (access_ok(VERIFY_READ, from, n))
19556 - n = __copy_from_user(to, from, n);
19557 - else
19558 - memset(to, 0, n);
19559 - return n;
19560 + current_thread_info()->addr_limit = x;
19561 + __set_fs(x);
19562 }
19563 -EXPORT_SYMBOL(copy_from_user);
19564 +EXPORT_SYMBOL(set_fs);
19565 +#endif
19566 diff -urNp linux-2.6.32.41/arch/x86/lib/usercopy_64.c linux-2.6.32.41/arch/x86/lib/usercopy_64.c
19567 --- linux-2.6.32.41/arch/x86/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
19568 +++ linux-2.6.32.41/arch/x86/lib/usercopy_64.c 2011-05-04 17:56:20.000000000 -0400
19569 @@ -42,6 +42,12 @@ long
19570 __strncpy_from_user(char *dst, const char __user *src, long count)
19571 {
19572 long res;
19573 +
19574 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19575 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19576 + src += PAX_USER_SHADOW_BASE;
19577 +#endif
19578 +
19579 __do_strncpy_from_user(dst, src, count, res);
19580 return res;
19581 }
19582 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
19583 {
19584 long __d0;
19585 might_fault();
19586 +
19587 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19588 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
19589 + addr += PAX_USER_SHADOW_BASE;
19590 +#endif
19591 +
19592 /* no memory constraint because it doesn't change any memory gcc knows
19593 about */
19594 asm volatile(
19595 @@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
19596
19597 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
19598 {
19599 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
19600 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
19601 +
19602 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19603 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
19604 + to += PAX_USER_SHADOW_BASE;
19605 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
19606 + from += PAX_USER_SHADOW_BASE;
19607 +#endif
19608 +
19609 return copy_user_generic((__force void *)to, (__force void *)from, len);
19610 - }
19611 - return len;
19612 + }
19613 + return len;
19614 }
19615 EXPORT_SYMBOL(copy_in_user);
19616
19617 diff -urNp linux-2.6.32.41/arch/x86/Makefile linux-2.6.32.41/arch/x86/Makefile
19618 --- linux-2.6.32.41/arch/x86/Makefile 2011-03-27 14:31:47.000000000 -0400
19619 +++ linux-2.6.32.41/arch/x86/Makefile 2011-04-17 15:56:46.000000000 -0400
19620 @@ -189,3 +189,12 @@ define archhelp
19621 echo ' FDARGS="..." arguments for the booted kernel'
19622 echo ' FDINITRD=file initrd for the booted kernel'
19623 endef
19624 +
19625 +define OLD_LD
19626 +
19627 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
19628 +*** Please upgrade your binutils to 2.18 or newer
19629 +endef
19630 +
19631 +archprepare:
19632 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
19633 diff -urNp linux-2.6.32.41/arch/x86/mm/extable.c linux-2.6.32.41/arch/x86/mm/extable.c
19634 --- linux-2.6.32.41/arch/x86/mm/extable.c 2011-03-27 14:31:47.000000000 -0400
19635 +++ linux-2.6.32.41/arch/x86/mm/extable.c 2011-04-17 15:56:46.000000000 -0400
19636 @@ -1,14 +1,71 @@
19637 #include <linux/module.h>
19638 #include <linux/spinlock.h>
19639 +#include <linux/sort.h>
19640 #include <asm/uaccess.h>
19641 +#include <asm/pgtable.h>
19642
19643 +/*
19644 + * The exception table needs to be sorted so that the binary
19645 + * search that we use to find entries in it works properly.
19646 + * This is used both for the kernel exception table and for
19647 + * the exception tables of modules that get loaded.
19648 + */
19649 +static int cmp_ex(const void *a, const void *b)
19650 +{
19651 + const struct exception_table_entry *x = a, *y = b;
19652 +
19653 + /* avoid overflow */
19654 + if (x->insn > y->insn)
19655 + return 1;
19656 + if (x->insn < y->insn)
19657 + return -1;
19658 + return 0;
19659 +}
19660 +
19661 +static void swap_ex(void *a, void *b, int size)
19662 +{
19663 + struct exception_table_entry t, *x = a, *y = b;
19664 +
19665 + t = *x;
19666 +
19667 + pax_open_kernel();
19668 + *x = *y;
19669 + *y = t;
19670 + pax_close_kernel();
19671 +}
19672 +
19673 +void sort_extable(struct exception_table_entry *start,
19674 + struct exception_table_entry *finish)
19675 +{
19676 + sort(start, finish - start, sizeof(struct exception_table_entry),
19677 + cmp_ex, swap_ex);
19678 +}
19679 +
19680 +#ifdef CONFIG_MODULES
19681 +/*
19682 + * If the exception table is sorted, any referring to the module init
19683 + * will be at the beginning or the end.
19684 + */
19685 +void trim_init_extable(struct module *m)
19686 +{
19687 + /*trim the beginning*/
19688 + while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
19689 + m->extable++;
19690 + m->num_exentries--;
19691 + }
19692 + /*trim the end*/
19693 + while (m->num_exentries &&
19694 + within_module_init(m->extable[m->num_exentries-1].insn, m))
19695 + m->num_exentries--;
19696 +}
19697 +#endif /* CONFIG_MODULES */
19698
19699 int fixup_exception(struct pt_regs *regs)
19700 {
19701 const struct exception_table_entry *fixup;
19702
19703 #ifdef CONFIG_PNPBIOS
19704 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
19705 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
19706 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
19707 extern u32 pnp_bios_is_utter_crap;
19708 pnp_bios_is_utter_crap = 1;
19709 diff -urNp linux-2.6.32.41/arch/x86/mm/fault.c linux-2.6.32.41/arch/x86/mm/fault.c
19710 --- linux-2.6.32.41/arch/x86/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
19711 +++ linux-2.6.32.41/arch/x86/mm/fault.c 2011-06-04 20:36:59.000000000 -0400
19712 @@ -11,10 +11,19 @@
19713 #include <linux/kprobes.h> /* __kprobes, ... */
19714 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
19715 #include <linux/perf_event.h> /* perf_sw_event */
19716 +#include <linux/unistd.h>
19717 +#include <linux/compiler.h>
19718
19719 #include <asm/traps.h> /* dotraplinkage, ... */
19720 #include <asm/pgalloc.h> /* pgd_*(), ... */
19721 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
19722 +#include <asm/vsyscall.h>
19723 +#include <asm/tlbflush.h>
19724 +
19725 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19726 +#include <asm/stacktrace.h>
19727 +#include "../kernel/dumpstack.h"
19728 +#endif
19729
19730 /*
19731 * Page fault error code bits:
19732 @@ -51,7 +60,7 @@ static inline int notify_page_fault(stru
19733 int ret = 0;
19734
19735 /* kprobe_running() needs smp_processor_id() */
19736 - if (kprobes_built_in() && !user_mode_vm(regs)) {
19737 + if (kprobes_built_in() && !user_mode(regs)) {
19738 preempt_disable();
19739 if (kprobe_running() && kprobe_fault_handler(regs, 14))
19740 ret = 1;
19741 @@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *re
19742 return !instr_lo || (instr_lo>>1) == 1;
19743 case 0x00:
19744 /* Prefetch instruction is 0x0F0D or 0x0F18 */
19745 - if (probe_kernel_address(instr, opcode))
19746 + if (user_mode(regs)) {
19747 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
19748 + return 0;
19749 + } else if (probe_kernel_address(instr, opcode))
19750 return 0;
19751
19752 *prefetch = (instr_lo == 0xF) &&
19753 @@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsign
19754 while (instr < max_instr) {
19755 unsigned char opcode;
19756
19757 - if (probe_kernel_address(instr, opcode))
19758 + if (user_mode(regs)) {
19759 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
19760 + break;
19761 + } else if (probe_kernel_address(instr, opcode))
19762 break;
19763
19764 instr++;
19765 @@ -172,6 +187,30 @@ force_sig_info_fault(int si_signo, int s
19766 force_sig_info(si_signo, &info, tsk);
19767 }
19768
19769 +#ifdef CONFIG_PAX_EMUTRAMP
19770 +static int pax_handle_fetch_fault(struct pt_regs *regs);
19771 +#endif
19772 +
19773 +#ifdef CONFIG_PAX_PAGEEXEC
19774 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
19775 +{
19776 + pgd_t *pgd;
19777 + pud_t *pud;
19778 + pmd_t *pmd;
19779 +
19780 + pgd = pgd_offset(mm, address);
19781 + if (!pgd_present(*pgd))
19782 + return NULL;
19783 + pud = pud_offset(pgd, address);
19784 + if (!pud_present(*pud))
19785 + return NULL;
19786 + pmd = pmd_offset(pud, address);
19787 + if (!pmd_present(*pmd))
19788 + return NULL;
19789 + return pmd;
19790 +}
19791 +#endif
19792 +
19793 DEFINE_SPINLOCK(pgd_lock);
19794 LIST_HEAD(pgd_list);
19795
19796 @@ -224,11 +263,24 @@ void vmalloc_sync_all(void)
19797 address += PMD_SIZE) {
19798
19799 unsigned long flags;
19800 +
19801 +#ifdef CONFIG_PAX_PER_CPU_PGD
19802 + unsigned long cpu;
19803 +#else
19804 struct page *page;
19805 +#endif
19806
19807 spin_lock_irqsave(&pgd_lock, flags);
19808 +
19809 +#ifdef CONFIG_PAX_PER_CPU_PGD
19810 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19811 + pgd_t *pgd = get_cpu_pgd(cpu);
19812 +#else
19813 list_for_each_entry(page, &pgd_list, lru) {
19814 - if (!vmalloc_sync_one(page_address(page), address))
19815 + pgd_t *pgd = page_address(page);
19816 +#endif
19817 +
19818 + if (!vmalloc_sync_one(pgd, address))
19819 break;
19820 }
19821 spin_unlock_irqrestore(&pgd_lock, flags);
19822 @@ -258,6 +310,11 @@ static noinline int vmalloc_fault(unsign
19823 * an interrupt in the middle of a task switch..
19824 */
19825 pgd_paddr = read_cr3();
19826 +
19827 +#ifdef CONFIG_PAX_PER_CPU_PGD
19828 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
19829 +#endif
19830 +
19831 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
19832 if (!pmd_k)
19833 return -1;
19834 @@ -332,15 +389,27 @@ void vmalloc_sync_all(void)
19835
19836 const pgd_t *pgd_ref = pgd_offset_k(address);
19837 unsigned long flags;
19838 +
19839 +#ifdef CONFIG_PAX_PER_CPU_PGD
19840 + unsigned long cpu;
19841 +#else
19842 struct page *page;
19843 +#endif
19844
19845 if (pgd_none(*pgd_ref))
19846 continue;
19847
19848 spin_lock_irqsave(&pgd_lock, flags);
19849 +
19850 +#ifdef CONFIG_PAX_PER_CPU_PGD
19851 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19852 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
19853 +#else
19854 list_for_each_entry(page, &pgd_list, lru) {
19855 pgd_t *pgd;
19856 pgd = (pgd_t *)page_address(page) + pgd_index(address);
19857 +#endif
19858 +
19859 if (pgd_none(*pgd))
19860 set_pgd(pgd, *pgd_ref);
19861 else
19862 @@ -373,7 +442,14 @@ static noinline int vmalloc_fault(unsign
19863 * happen within a race in page table update. In the later
19864 * case just flush:
19865 */
19866 +
19867 +#ifdef CONFIG_PAX_PER_CPU_PGD
19868 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
19869 + pgd = pgd_offset_cpu(smp_processor_id(), address);
19870 +#else
19871 pgd = pgd_offset(current->active_mm, address);
19872 +#endif
19873 +
19874 pgd_ref = pgd_offset_k(address);
19875 if (pgd_none(*pgd_ref))
19876 return -1;
19877 @@ -535,7 +611,7 @@ static int is_errata93(struct pt_regs *r
19878 static int is_errata100(struct pt_regs *regs, unsigned long address)
19879 {
19880 #ifdef CONFIG_X86_64
19881 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
19882 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
19883 return 1;
19884 #endif
19885 return 0;
19886 @@ -562,7 +638,7 @@ static int is_f00f_bug(struct pt_regs *r
19887 }
19888
19889 static const char nx_warning[] = KERN_CRIT
19890 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
19891 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
19892
19893 static void
19894 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
19895 @@ -571,15 +647,26 @@ show_fault_oops(struct pt_regs *regs, un
19896 if (!oops_may_print())
19897 return;
19898
19899 - if (error_code & PF_INSTR) {
19900 + if (nx_enabled && (error_code & PF_INSTR)) {
19901 unsigned int level;
19902
19903 pte_t *pte = lookup_address(address, &level);
19904
19905 if (pte && pte_present(*pte) && !pte_exec(*pte))
19906 - printk(nx_warning, current_uid());
19907 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
19908 }
19909
19910 +#ifdef CONFIG_PAX_KERNEXEC
19911 + if (init_mm.start_code <= address && address < init_mm.end_code) {
19912 + if (current->signal->curr_ip)
19913 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
19914 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
19915 + else
19916 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
19917 + current->comm, task_pid_nr(current), current_uid(), current_euid());
19918 + }
19919 +#endif
19920 +
19921 printk(KERN_ALERT "BUG: unable to handle kernel ");
19922 if (address < PAGE_SIZE)
19923 printk(KERN_CONT "NULL pointer dereference");
19924 @@ -704,6 +791,68 @@ __bad_area_nosemaphore(struct pt_regs *r
19925 unsigned long address, int si_code)
19926 {
19927 struct task_struct *tsk = current;
19928 + struct mm_struct *mm = tsk->mm;
19929 +
19930 +#ifdef CONFIG_X86_64
19931 + if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
19932 + if (regs->ip == (unsigned long)vgettimeofday) {
19933 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, gettimeofday);
19934 + return;
19935 + } else if (regs->ip == (unsigned long)vtime) {
19936 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, clock_gettime);
19937 + return;
19938 + } else if (regs->ip == (unsigned long)vgetcpu) {
19939 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
19940 + return;
19941 + }
19942 + }
19943 +#endif
19944 +
19945 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19946 + if (mm && (error_code & PF_USER)) {
19947 + unsigned long ip = regs->ip;
19948 +
19949 + if (v8086_mode(regs))
19950 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
19951 +
19952 + /*
19953 + * It's possible to have interrupts off here:
19954 + */
19955 + local_irq_enable();
19956 +
19957 +#ifdef CONFIG_PAX_PAGEEXEC
19958 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
19959 + ((nx_enabled && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
19960 +
19961 +#ifdef CONFIG_PAX_EMUTRAMP
19962 + switch (pax_handle_fetch_fault(regs)) {
19963 + case 2:
19964 + return;
19965 + }
19966 +#endif
19967 +
19968 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
19969 + do_group_exit(SIGKILL);
19970 + }
19971 +#endif
19972 +
19973 +#ifdef CONFIG_PAX_SEGMEXEC
19974 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
19975 +
19976 +#ifdef CONFIG_PAX_EMUTRAMP
19977 + switch (pax_handle_fetch_fault(regs)) {
19978 + case 2:
19979 + return;
19980 + }
19981 +#endif
19982 +
19983 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
19984 + do_group_exit(SIGKILL);
19985 + }
19986 +#endif
19987 +
19988 + }
19989 +#endif
19990
19991 /* User mode accesses just cause a SIGSEGV */
19992 if (error_code & PF_USER) {
19993 @@ -857,6 +1006,99 @@ static int spurious_fault_check(unsigned
19994 return 1;
19995 }
19996
19997 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19998 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
19999 +{
20000 + pte_t *pte;
20001 + pmd_t *pmd;
20002 + spinlock_t *ptl;
20003 + unsigned char pte_mask;
20004 +
20005 + if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
20006 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
20007 + return 0;
20008 +
20009 + /* PaX: it's our fault, let's handle it if we can */
20010 +
20011 + /* PaX: take a look at read faults before acquiring any locks */
20012 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
20013 + /* instruction fetch attempt from a protected page in user mode */
20014 + up_read(&mm->mmap_sem);
20015 +
20016 +#ifdef CONFIG_PAX_EMUTRAMP
20017 + switch (pax_handle_fetch_fault(regs)) {
20018 + case 2:
20019 + return 1;
20020 + }
20021 +#endif
20022 +
20023 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
20024 + do_group_exit(SIGKILL);
20025 + }
20026 +
20027 + pmd = pax_get_pmd(mm, address);
20028 + if (unlikely(!pmd))
20029 + return 0;
20030 +
20031 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
20032 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
20033 + pte_unmap_unlock(pte, ptl);
20034 + return 0;
20035 + }
20036 +
20037 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
20038 + /* write attempt to a protected page in user mode */
20039 + pte_unmap_unlock(pte, ptl);
20040 + return 0;
20041 + }
20042 +
20043 +#ifdef CONFIG_SMP
20044 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
20045 +#else
20046 + if (likely(address > get_limit(regs->cs)))
20047 +#endif
20048 + {
20049 + set_pte(pte, pte_mkread(*pte));
20050 + __flush_tlb_one(address);
20051 + pte_unmap_unlock(pte, ptl);
20052 + up_read(&mm->mmap_sem);
20053 + return 1;
20054 + }
20055 +
20056 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
20057 +
20058 + /*
20059 + * PaX: fill DTLB with user rights and retry
20060 + */
20061 + __asm__ __volatile__ (
20062 + "orb %2,(%1)\n"
20063 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
20064 +/*
20065 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
20066 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
20067 + * page fault when examined during a TLB load attempt. this is true not only
20068 + * for PTEs holding a non-present entry but also present entries that will
20069 + * raise a page fault (such as those set up by PaX, or the copy-on-write
20070 + * mechanism). in effect it means that we do *not* need to flush the TLBs
20071 + * for our target pages since their PTEs are simply not in the TLBs at all.
20072 +
20073 + * the best thing in omitting it is that we gain around 15-20% speed in the
20074 + * fast path of the page fault handler and can get rid of tracing since we
20075 + * can no longer flush unintended entries.
20076 + */
20077 + "invlpg (%0)\n"
20078 +#endif
20079 + __copyuser_seg"testb $0,(%0)\n"
20080 + "xorb %3,(%1)\n"
20081 + :
20082 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
20083 + : "memory", "cc");
20084 + pte_unmap_unlock(pte, ptl);
20085 + up_read(&mm->mmap_sem);
20086 + return 1;
20087 +}
20088 +#endif
20089 +
20090 /*
20091 * Handle a spurious fault caused by a stale TLB entry.
20092 *
20093 @@ -923,6 +1165,9 @@ int show_unhandled_signals = 1;
20094 static inline int
20095 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
20096 {
20097 + if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
20098 + return 1;
20099 +
20100 if (write) {
20101 /* write, present and write, not present: */
20102 if (unlikely(!(vma->vm_flags & VM_WRITE)))
20103 @@ -956,17 +1201,31 @@ do_page_fault(struct pt_regs *regs, unsi
20104 {
20105 struct vm_area_struct *vma;
20106 struct task_struct *tsk;
20107 - unsigned long address;
20108 struct mm_struct *mm;
20109 int write;
20110 int fault;
20111
20112 + /* Get the faulting address: */
20113 + unsigned long address = read_cr2();
20114 +
20115 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20116 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
20117 + if (!search_exception_tables(regs->ip)) {
20118 + bad_area_nosemaphore(regs, error_code, address);
20119 + return;
20120 + }
20121 + if (address < PAX_USER_SHADOW_BASE) {
20122 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
20123 + printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
20124 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
20125 + } else
20126 + address -= PAX_USER_SHADOW_BASE;
20127 + }
20128 +#endif
20129 +
20130 tsk = current;
20131 mm = tsk->mm;
20132
20133 - /* Get the faulting address: */
20134 - address = read_cr2();
20135 -
20136 /*
20137 * Detect and handle instructions that would cause a page fault for
20138 * both a tracked kernel page and a userspace page.
20139 @@ -1026,7 +1285,7 @@ do_page_fault(struct pt_regs *regs, unsi
20140 * User-mode registers count as a user access even for any
20141 * potential system fault or CPU buglet:
20142 */
20143 - if (user_mode_vm(regs)) {
20144 + if (user_mode(regs)) {
20145 local_irq_enable();
20146 error_code |= PF_USER;
20147 } else {
20148 @@ -1080,6 +1339,11 @@ do_page_fault(struct pt_regs *regs, unsi
20149 might_sleep();
20150 }
20151
20152 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20153 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
20154 + return;
20155 +#endif
20156 +
20157 vma = find_vma(mm, address);
20158 if (unlikely(!vma)) {
20159 bad_area(regs, error_code, address);
20160 @@ -1091,18 +1355,24 @@ do_page_fault(struct pt_regs *regs, unsi
20161 bad_area(regs, error_code, address);
20162 return;
20163 }
20164 - if (error_code & PF_USER) {
20165 - /*
20166 - * Accessing the stack below %sp is always a bug.
20167 - * The large cushion allows instructions like enter
20168 - * and pusha to work. ("enter $65535, $31" pushes
20169 - * 32 pointers and then decrements %sp by 65535.)
20170 - */
20171 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
20172 - bad_area(regs, error_code, address);
20173 - return;
20174 - }
20175 + /*
20176 + * Accessing the stack below %sp is always a bug.
20177 + * The large cushion allows instructions like enter
20178 + * and pusha to work. ("enter $65535, $31" pushes
20179 + * 32 pointers and then decrements %sp by 65535.)
20180 + */
20181 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
20182 + bad_area(regs, error_code, address);
20183 + return;
20184 + }
20185 +
20186 +#ifdef CONFIG_PAX_SEGMEXEC
20187 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
20188 + bad_area(regs, error_code, address);
20189 + return;
20190 }
20191 +#endif
20192 +
20193 if (unlikely(expand_stack(vma, address))) {
20194 bad_area(regs, error_code, address);
20195 return;
20196 @@ -1146,3 +1416,199 @@ good_area:
20197
20198 up_read(&mm->mmap_sem);
20199 }
20200 +
20201 +#ifdef CONFIG_PAX_EMUTRAMP
20202 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
20203 +{
20204 + int err;
20205 +
20206 + do { /* PaX: gcc trampoline emulation #1 */
20207 + unsigned char mov1, mov2;
20208 + unsigned short jmp;
20209 + unsigned int addr1, addr2;
20210 +
20211 +#ifdef CONFIG_X86_64
20212 + if ((regs->ip + 11) >> 32)
20213 + break;
20214 +#endif
20215 +
20216 + err = get_user(mov1, (unsigned char __user *)regs->ip);
20217 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
20218 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
20219 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
20220 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
20221 +
20222 + if (err)
20223 + break;
20224 +
20225 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
20226 + regs->cx = addr1;
20227 + regs->ax = addr2;
20228 + regs->ip = addr2;
20229 + return 2;
20230 + }
20231 + } while (0);
20232 +
20233 + do { /* PaX: gcc trampoline emulation #2 */
20234 + unsigned char mov, jmp;
20235 + unsigned int addr1, addr2;
20236 +
20237 +#ifdef CONFIG_X86_64
20238 + if ((regs->ip + 9) >> 32)
20239 + break;
20240 +#endif
20241 +
20242 + err = get_user(mov, (unsigned char __user *)regs->ip);
20243 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
20244 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
20245 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
20246 +
20247 + if (err)
20248 + break;
20249 +
20250 + if (mov == 0xB9 && jmp == 0xE9) {
20251 + regs->cx = addr1;
20252 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
20253 + return 2;
20254 + }
20255 + } while (0);
20256 +
20257 + return 1; /* PaX in action */
20258 +}
20259 +
20260 +#ifdef CONFIG_X86_64
20261 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
20262 +{
20263 + int err;
20264 +
20265 + do { /* PaX: gcc trampoline emulation #1 */
20266 + unsigned short mov1, mov2, jmp1;
20267 + unsigned char jmp2;
20268 + unsigned int addr1;
20269 + unsigned long addr2;
20270 +
20271 + err = get_user(mov1, (unsigned short __user *)regs->ip);
20272 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
20273 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
20274 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
20275 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
20276 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
20277 +
20278 + if (err)
20279 + break;
20280 +
20281 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
20282 + regs->r11 = addr1;
20283 + regs->r10 = addr2;
20284 + regs->ip = addr1;
20285 + return 2;
20286 + }
20287 + } while (0);
20288 +
20289 + do { /* PaX: gcc trampoline emulation #2 */
20290 + unsigned short mov1, mov2, jmp1;
20291 + unsigned char jmp2;
20292 + unsigned long addr1, addr2;
20293 +
20294 + err = get_user(mov1, (unsigned short __user *)regs->ip);
20295 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
20296 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
20297 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
20298 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
20299 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
20300 +
20301 + if (err)
20302 + break;
20303 +
20304 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
20305 + regs->r11 = addr1;
20306 + regs->r10 = addr2;
20307 + regs->ip = addr1;
20308 + return 2;
20309 + }
20310 + } while (0);
20311 +
20312 + return 1; /* PaX in action */
20313 +}
20314 +#endif
20315 +
20316 +/*
20317 + * PaX: decide what to do with offenders (regs->ip = fault address)
20318 + *
20319 + * returns 1 when task should be killed
20320 + * 2 when gcc trampoline was detected
20321 + */
20322 +static int pax_handle_fetch_fault(struct pt_regs *regs)
20323 +{
20324 + if (v8086_mode(regs))
20325 + return 1;
20326 +
20327 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
20328 + return 1;
20329 +
20330 +#ifdef CONFIG_X86_32
20331 + return pax_handle_fetch_fault_32(regs);
20332 +#else
20333 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
20334 + return pax_handle_fetch_fault_32(regs);
20335 + else
20336 + return pax_handle_fetch_fault_64(regs);
20337 +#endif
20338 +}
20339 +#endif
20340 +
20341 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20342 +void pax_report_insns(void *pc, void *sp)
20343 +{
20344 + long i;
20345 +
20346 + printk(KERN_ERR "PAX: bytes at PC: ");
20347 + for (i = 0; i < 20; i++) {
20348 + unsigned char c;
20349 + if (get_user(c, (__force unsigned char __user *)pc+i))
20350 + printk(KERN_CONT "?? ");
20351 + else
20352 + printk(KERN_CONT "%02x ", c);
20353 + }
20354 + printk("\n");
20355 +
20356 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
20357 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
20358 + unsigned long c;
20359 + if (get_user(c, (__force unsigned long __user *)sp+i))
20360 +#ifdef CONFIG_X86_32
20361 + printk(KERN_CONT "???????? ");
20362 +#else
20363 + printk(KERN_CONT "???????????????? ");
20364 +#endif
20365 + else
20366 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
20367 + }
20368 + printk("\n");
20369 +}
20370 +#endif
20371 +
20372 +/**
20373 + * probe_kernel_write(): safely attempt to write to a location
20374 + * @dst: address to write to
20375 + * @src: pointer to the data that shall be written
20376 + * @size: size of the data chunk
20377 + *
20378 + * Safely write to address @dst from the buffer at @src. If a kernel fault
20379 + * happens, handle that and return -EFAULT.
20380 + */
20381 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
20382 +{
20383 + long ret;
20384 + mm_segment_t old_fs = get_fs();
20385 +
20386 + set_fs(KERNEL_DS);
20387 + pagefault_disable();
20388 + pax_open_kernel();
20389 + ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
20390 + pax_close_kernel();
20391 + pagefault_enable();
20392 + set_fs(old_fs);
20393 +
20394 + return ret ? -EFAULT : 0;
20395 +}
20396 diff -urNp linux-2.6.32.41/arch/x86/mm/gup.c linux-2.6.32.41/arch/x86/mm/gup.c
20397 --- linux-2.6.32.41/arch/x86/mm/gup.c 2011-03-27 14:31:47.000000000 -0400
20398 +++ linux-2.6.32.41/arch/x86/mm/gup.c 2011-04-17 15:56:46.000000000 -0400
20399 @@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long
20400 addr = start;
20401 len = (unsigned long) nr_pages << PAGE_SHIFT;
20402 end = start + len;
20403 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
20404 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
20405 (void __user *)start, len)))
20406 return 0;
20407
20408 diff -urNp linux-2.6.32.41/arch/x86/mm/highmem_32.c linux-2.6.32.41/arch/x86/mm/highmem_32.c
20409 --- linux-2.6.32.41/arch/x86/mm/highmem_32.c 2011-03-27 14:31:47.000000000 -0400
20410 +++ linux-2.6.32.41/arch/x86/mm/highmem_32.c 2011-04-17 15:56:46.000000000 -0400
20411 @@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page
20412 idx = type + KM_TYPE_NR*smp_processor_id();
20413 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
20414 BUG_ON(!pte_none(*(kmap_pte-idx)));
20415 +
20416 + pax_open_kernel();
20417 set_pte(kmap_pte-idx, mk_pte(page, prot));
20418 + pax_close_kernel();
20419
20420 return (void *)vaddr;
20421 }
20422 diff -urNp linux-2.6.32.41/arch/x86/mm/hugetlbpage.c linux-2.6.32.41/arch/x86/mm/hugetlbpage.c
20423 --- linux-2.6.32.41/arch/x86/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
20424 +++ linux-2.6.32.41/arch/x86/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
20425 @@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmappe
20426 struct hstate *h = hstate_file(file);
20427 struct mm_struct *mm = current->mm;
20428 struct vm_area_struct *vma;
20429 - unsigned long start_addr;
20430 + unsigned long start_addr, pax_task_size = TASK_SIZE;
20431 +
20432 +#ifdef CONFIG_PAX_SEGMEXEC
20433 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20434 + pax_task_size = SEGMEXEC_TASK_SIZE;
20435 +#endif
20436 +
20437 + pax_task_size -= PAGE_SIZE;
20438
20439 if (len > mm->cached_hole_size) {
20440 - start_addr = mm->free_area_cache;
20441 + start_addr = mm->free_area_cache;
20442 } else {
20443 - start_addr = TASK_UNMAPPED_BASE;
20444 - mm->cached_hole_size = 0;
20445 + start_addr = mm->mmap_base;
20446 + mm->cached_hole_size = 0;
20447 }
20448
20449 full_search:
20450 @@ -281,26 +288,27 @@ full_search:
20451
20452 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
20453 /* At this point: (!vma || addr < vma->vm_end). */
20454 - if (TASK_SIZE - len < addr) {
20455 + if (pax_task_size - len < addr) {
20456 /*
20457 * Start a new search - just in case we missed
20458 * some holes.
20459 */
20460 - if (start_addr != TASK_UNMAPPED_BASE) {
20461 - start_addr = TASK_UNMAPPED_BASE;
20462 + if (start_addr != mm->mmap_base) {
20463 + start_addr = mm->mmap_base;
20464 mm->cached_hole_size = 0;
20465 goto full_search;
20466 }
20467 return -ENOMEM;
20468 }
20469 - if (!vma || addr + len <= vma->vm_start) {
20470 - mm->free_area_cache = addr + len;
20471 - return addr;
20472 - }
20473 + if (check_heap_stack_gap(vma, addr, len))
20474 + break;
20475 if (addr + mm->cached_hole_size < vma->vm_start)
20476 mm->cached_hole_size = vma->vm_start - addr;
20477 addr = ALIGN(vma->vm_end, huge_page_size(h));
20478 }
20479 +
20480 + mm->free_area_cache = addr + len;
20481 + return addr;
20482 }
20483
20484 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
20485 @@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmappe
20486 {
20487 struct hstate *h = hstate_file(file);
20488 struct mm_struct *mm = current->mm;
20489 - struct vm_area_struct *vma, *prev_vma;
20490 - unsigned long base = mm->mmap_base, addr = addr0;
20491 + struct vm_area_struct *vma;
20492 + unsigned long base = mm->mmap_base, addr;
20493 unsigned long largest_hole = mm->cached_hole_size;
20494 - int first_time = 1;
20495
20496 /* don't allow allocations above current base */
20497 if (mm->free_area_cache > base)
20498 @@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmappe
20499 largest_hole = 0;
20500 mm->free_area_cache = base;
20501 }
20502 -try_again:
20503 +
20504 /* make sure it can fit in the remaining address space */
20505 if (mm->free_area_cache < len)
20506 goto fail;
20507
20508 /* either no address requested or cant fit in requested address hole */
20509 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
20510 + addr = (mm->free_area_cache - len);
20511 do {
20512 + addr &= huge_page_mask(h);
20513 + vma = find_vma(mm, addr);
20514 /*
20515 * Lookup failure means no vma is above this address,
20516 * i.e. return with success:
20517 - */
20518 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
20519 - return addr;
20520 -
20521 - /*
20522 * new region fits between prev_vma->vm_end and
20523 * vma->vm_start, use it:
20524 */
20525 - if (addr + len <= vma->vm_start &&
20526 - (!prev_vma || (addr >= prev_vma->vm_end))) {
20527 + if (check_heap_stack_gap(vma, addr, len)) {
20528 /* remember the address as a hint for next time */
20529 - mm->cached_hole_size = largest_hole;
20530 - return (mm->free_area_cache = addr);
20531 - } else {
20532 - /* pull free_area_cache down to the first hole */
20533 - if (mm->free_area_cache == vma->vm_end) {
20534 - mm->free_area_cache = vma->vm_start;
20535 - mm->cached_hole_size = largest_hole;
20536 - }
20537 + mm->cached_hole_size = largest_hole;
20538 + return (mm->free_area_cache = addr);
20539 + }
20540 + /* pull free_area_cache down to the first hole */
20541 + if (mm->free_area_cache == vma->vm_end) {
20542 + mm->free_area_cache = vma->vm_start;
20543 + mm->cached_hole_size = largest_hole;
20544 }
20545
20546 /* remember the largest hole we saw so far */
20547 if (addr + largest_hole < vma->vm_start)
20548 - largest_hole = vma->vm_start - addr;
20549 + largest_hole = vma->vm_start - addr;
20550
20551 /* try just below the current vma->vm_start */
20552 - addr = (vma->vm_start - len) & huge_page_mask(h);
20553 - } while (len <= vma->vm_start);
20554 + addr = skip_heap_stack_gap(vma, len);
20555 + } while (!IS_ERR_VALUE(addr));
20556
20557 fail:
20558 /*
20559 - * if hint left us with no space for the requested
20560 - * mapping then try again:
20561 - */
20562 - if (first_time) {
20563 - mm->free_area_cache = base;
20564 - largest_hole = 0;
20565 - first_time = 0;
20566 - goto try_again;
20567 - }
20568 - /*
20569 * A failed mmap() very likely causes application failure,
20570 * so fall back to the bottom-up function here. This scenario
20571 * can happen with large stack limits and large mmap()
20572 * allocations.
20573 */
20574 - mm->free_area_cache = TASK_UNMAPPED_BASE;
20575 +
20576 +#ifdef CONFIG_PAX_SEGMEXEC
20577 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20578 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
20579 + else
20580 +#endif
20581 +
20582 + mm->mmap_base = TASK_UNMAPPED_BASE;
20583 +
20584 +#ifdef CONFIG_PAX_RANDMMAP
20585 + if (mm->pax_flags & MF_PAX_RANDMMAP)
20586 + mm->mmap_base += mm->delta_mmap;
20587 +#endif
20588 +
20589 + mm->free_area_cache = mm->mmap_base;
20590 mm->cached_hole_size = ~0UL;
20591 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
20592 len, pgoff, flags);
20593 @@ -387,6 +393,7 @@ fail:
20594 /*
20595 * Restore the topdown base:
20596 */
20597 + mm->mmap_base = base;
20598 mm->free_area_cache = base;
20599 mm->cached_hole_size = ~0UL;
20600
20601 @@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *f
20602 struct hstate *h = hstate_file(file);
20603 struct mm_struct *mm = current->mm;
20604 struct vm_area_struct *vma;
20605 + unsigned long pax_task_size = TASK_SIZE;
20606
20607 if (len & ~huge_page_mask(h))
20608 return -EINVAL;
20609 - if (len > TASK_SIZE)
20610 +
20611 +#ifdef CONFIG_PAX_SEGMEXEC
20612 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20613 + pax_task_size = SEGMEXEC_TASK_SIZE;
20614 +#endif
20615 +
20616 + pax_task_size -= PAGE_SIZE;
20617 +
20618 + if (len > pax_task_size)
20619 return -ENOMEM;
20620
20621 if (flags & MAP_FIXED) {
20622 @@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *f
20623 if (addr) {
20624 addr = ALIGN(addr, huge_page_size(h));
20625 vma = find_vma(mm, addr);
20626 - if (TASK_SIZE - len >= addr &&
20627 - (!vma || addr + len <= vma->vm_start))
20628 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
20629 return addr;
20630 }
20631 if (mm->get_unmapped_area == arch_get_unmapped_area)
20632 diff -urNp linux-2.6.32.41/arch/x86/mm/init_32.c linux-2.6.32.41/arch/x86/mm/init_32.c
20633 --- linux-2.6.32.41/arch/x86/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
20634 +++ linux-2.6.32.41/arch/x86/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
20635 @@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
20636 }
20637
20638 /*
20639 - * Creates a middle page table and puts a pointer to it in the
20640 - * given global directory entry. This only returns the gd entry
20641 - * in non-PAE compilation mode, since the middle layer is folded.
20642 - */
20643 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
20644 -{
20645 - pud_t *pud;
20646 - pmd_t *pmd_table;
20647 -
20648 -#ifdef CONFIG_X86_PAE
20649 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
20650 - if (after_bootmem)
20651 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
20652 - else
20653 - pmd_table = (pmd_t *)alloc_low_page();
20654 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
20655 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
20656 - pud = pud_offset(pgd, 0);
20657 - BUG_ON(pmd_table != pmd_offset(pud, 0));
20658 -
20659 - return pmd_table;
20660 - }
20661 -#endif
20662 - pud = pud_offset(pgd, 0);
20663 - pmd_table = pmd_offset(pud, 0);
20664 -
20665 - return pmd_table;
20666 -}
20667 -
20668 -/*
20669 * Create a page table and place a pointer to it in a middle page
20670 * directory entry:
20671 */
20672 @@ -121,13 +91,28 @@ static pte_t * __init one_page_table_ini
20673 page_table = (pte_t *)alloc_low_page();
20674
20675 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
20676 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20677 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
20678 +#else
20679 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
20680 +#endif
20681 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
20682 }
20683
20684 return pte_offset_kernel(pmd, 0);
20685 }
20686
20687 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
20688 +{
20689 + pud_t *pud;
20690 + pmd_t *pmd_table;
20691 +
20692 + pud = pud_offset(pgd, 0);
20693 + pmd_table = pmd_offset(pud, 0);
20694 +
20695 + return pmd_table;
20696 +}
20697 +
20698 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
20699 {
20700 int pgd_idx = pgd_index(vaddr);
20701 @@ -201,6 +186,7 @@ page_table_range_init(unsigned long star
20702 int pgd_idx, pmd_idx;
20703 unsigned long vaddr;
20704 pgd_t *pgd;
20705 + pud_t *pud;
20706 pmd_t *pmd;
20707 pte_t *pte = NULL;
20708
20709 @@ -210,8 +196,13 @@ page_table_range_init(unsigned long star
20710 pgd = pgd_base + pgd_idx;
20711
20712 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
20713 - pmd = one_md_table_init(pgd);
20714 - pmd = pmd + pmd_index(vaddr);
20715 + pud = pud_offset(pgd, vaddr);
20716 + pmd = pmd_offset(pud, vaddr);
20717 +
20718 +#ifdef CONFIG_X86_PAE
20719 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
20720 +#endif
20721 +
20722 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
20723 pmd++, pmd_idx++) {
20724 pte = page_table_kmap_check(one_page_table_init(pmd),
20725 @@ -223,11 +214,20 @@ page_table_range_init(unsigned long star
20726 }
20727 }
20728
20729 -static inline int is_kernel_text(unsigned long addr)
20730 +static inline int is_kernel_text(unsigned long start, unsigned long end)
20731 {
20732 - if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
20733 - return 1;
20734 - return 0;
20735 + if ((start > ktla_ktva((unsigned long)_etext) ||
20736 + end <= ktla_ktva((unsigned long)_stext)) &&
20737 + (start > ktla_ktva((unsigned long)_einittext) ||
20738 + end <= ktla_ktva((unsigned long)_sinittext)) &&
20739 +
20740 +#ifdef CONFIG_ACPI_SLEEP
20741 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
20742 +#endif
20743 +
20744 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
20745 + return 0;
20746 + return 1;
20747 }
20748
20749 /*
20750 @@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned lo
20751 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
20752 unsigned long start_pfn, end_pfn;
20753 pgd_t *pgd_base = swapper_pg_dir;
20754 - int pgd_idx, pmd_idx, pte_ofs;
20755 + unsigned int pgd_idx, pmd_idx, pte_ofs;
20756 unsigned long pfn;
20757 pgd_t *pgd;
20758 + pud_t *pud;
20759 pmd_t *pmd;
20760 pte_t *pte;
20761 unsigned pages_2m, pages_4k;
20762 @@ -278,8 +279,13 @@ repeat:
20763 pfn = start_pfn;
20764 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
20765 pgd = pgd_base + pgd_idx;
20766 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
20767 - pmd = one_md_table_init(pgd);
20768 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
20769 + pud = pud_offset(pgd, 0);
20770 + pmd = pmd_offset(pud, 0);
20771 +
20772 +#ifdef CONFIG_X86_PAE
20773 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
20774 +#endif
20775
20776 if (pfn >= end_pfn)
20777 continue;
20778 @@ -291,14 +297,13 @@ repeat:
20779 #endif
20780 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
20781 pmd++, pmd_idx++) {
20782 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
20783 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
20784
20785 /*
20786 * Map with big pages if possible, otherwise
20787 * create normal page tables:
20788 */
20789 if (use_pse) {
20790 - unsigned int addr2;
20791 pgprot_t prot = PAGE_KERNEL_LARGE;
20792 /*
20793 * first pass will use the same initial
20794 @@ -308,11 +313,7 @@ repeat:
20795 __pgprot(PTE_IDENT_ATTR |
20796 _PAGE_PSE);
20797
20798 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
20799 - PAGE_OFFSET + PAGE_SIZE-1;
20800 -
20801 - if (is_kernel_text(addr) ||
20802 - is_kernel_text(addr2))
20803 + if (is_kernel_text(address, address + PMD_SIZE))
20804 prot = PAGE_KERNEL_LARGE_EXEC;
20805
20806 pages_2m++;
20807 @@ -329,7 +330,7 @@ repeat:
20808 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
20809 pte += pte_ofs;
20810 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
20811 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
20812 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
20813 pgprot_t prot = PAGE_KERNEL;
20814 /*
20815 * first pass will use the same initial
20816 @@ -337,7 +338,7 @@ repeat:
20817 */
20818 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
20819
20820 - if (is_kernel_text(addr))
20821 + if (is_kernel_text(address, address + PAGE_SIZE))
20822 prot = PAGE_KERNEL_EXEC;
20823
20824 pages_4k++;
20825 @@ -489,7 +490,7 @@ void __init native_pagetable_setup_start
20826
20827 pud = pud_offset(pgd, va);
20828 pmd = pmd_offset(pud, va);
20829 - if (!pmd_present(*pmd))
20830 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
20831 break;
20832
20833 pte = pte_offset_kernel(pmd, va);
20834 @@ -541,9 +542,7 @@ void __init early_ioremap_page_table_ran
20835
20836 static void __init pagetable_init(void)
20837 {
20838 - pgd_t *pgd_base = swapper_pg_dir;
20839 -
20840 - permanent_kmaps_init(pgd_base);
20841 + permanent_kmaps_init(swapper_pg_dir);
20842 }
20843
20844 #ifdef CONFIG_ACPI_SLEEP
20845 @@ -551,12 +550,12 @@ static void __init pagetable_init(void)
20846 * ACPI suspend needs this for resume, because things like the intel-agp
20847 * driver might have split up a kernel 4MB mapping.
20848 */
20849 -char swsusp_pg_dir[PAGE_SIZE]
20850 +pgd_t swsusp_pg_dir[PTRS_PER_PGD]
20851 __attribute__ ((aligned(PAGE_SIZE)));
20852
20853 static inline void save_pg_dir(void)
20854 {
20855 - memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
20856 + clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
20857 }
20858 #else /* !CONFIG_ACPI_SLEEP */
20859 static inline void save_pg_dir(void)
20860 @@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
20861 flush_tlb_all();
20862 }
20863
20864 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
20865 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
20866 EXPORT_SYMBOL_GPL(__supported_pte_mask);
20867
20868 /* user-defined highmem size */
20869 @@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void
20870 * Initialize the boot-time allocator (with low memory only):
20871 */
20872 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
20873 - bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
20874 + bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
20875 PAGE_SIZE);
20876 if (bootmap == -1L)
20877 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
20878 @@ -864,6 +863,12 @@ void __init mem_init(void)
20879
20880 pci_iommu_alloc();
20881
20882 +#ifdef CONFIG_PAX_PER_CPU_PGD
20883 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
20884 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20885 + KERNEL_PGD_PTRS);
20886 +#endif
20887 +
20888 #ifdef CONFIG_FLATMEM
20889 BUG_ON(!mem_map);
20890 #endif
20891 @@ -881,7 +886,7 @@ void __init mem_init(void)
20892 set_highmem_pages_init();
20893
20894 codesize = (unsigned long) &_etext - (unsigned long) &_text;
20895 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
20896 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
20897 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
20898
20899 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
20900 @@ -923,10 +928,10 @@ void __init mem_init(void)
20901 ((unsigned long)&__init_end -
20902 (unsigned long)&__init_begin) >> 10,
20903
20904 - (unsigned long)&_etext, (unsigned long)&_edata,
20905 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
20906 + (unsigned long)&_sdata, (unsigned long)&_edata,
20907 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
20908
20909 - (unsigned long)&_text, (unsigned long)&_etext,
20910 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
20911 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
20912
20913 /*
20914 @@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
20915 if (!kernel_set_to_readonly)
20916 return;
20917
20918 + start = ktla_ktva(start);
20919 pr_debug("Set kernel text: %lx - %lx for read write\n",
20920 start, start+size);
20921
20922 @@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
20923 if (!kernel_set_to_readonly)
20924 return;
20925
20926 + start = ktla_ktva(start);
20927 pr_debug("Set kernel text: %lx - %lx for read only\n",
20928 start, start+size);
20929
20930 @@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
20931 unsigned long start = PFN_ALIGN(_text);
20932 unsigned long size = PFN_ALIGN(_etext) - start;
20933
20934 + start = ktla_ktva(start);
20935 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
20936 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
20937 size >> 10);
20938 diff -urNp linux-2.6.32.41/arch/x86/mm/init_64.c linux-2.6.32.41/arch/x86/mm/init_64.c
20939 --- linux-2.6.32.41/arch/x86/mm/init_64.c 2011-04-17 17:00:52.000000000 -0400
20940 +++ linux-2.6.32.41/arch/x86/mm/init_64.c 2011-04-17 17:03:05.000000000 -0400
20941 @@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
20942 pmd = fill_pmd(pud, vaddr);
20943 pte = fill_pte(pmd, vaddr);
20944
20945 + pax_open_kernel();
20946 set_pte(pte, new_pte);
20947 + pax_close_kernel();
20948
20949 /*
20950 * It's enough to flush this one mapping.
20951 @@ -223,14 +225,12 @@ static void __init __init_extra_mapping(
20952 pgd = pgd_offset_k((unsigned long)__va(phys));
20953 if (pgd_none(*pgd)) {
20954 pud = (pud_t *) spp_getpage();
20955 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
20956 - _PAGE_USER));
20957 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
20958 }
20959 pud = pud_offset(pgd, (unsigned long)__va(phys));
20960 if (pud_none(*pud)) {
20961 pmd = (pmd_t *) spp_getpage();
20962 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
20963 - _PAGE_USER));
20964 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
20965 }
20966 pmd = pmd_offset(pud, phys);
20967 BUG_ON(!pmd_none(*pmd));
20968 @@ -675,6 +675,12 @@ void __init mem_init(void)
20969
20970 pci_iommu_alloc();
20971
20972 +#ifdef CONFIG_PAX_PER_CPU_PGD
20973 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
20974 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20975 + KERNEL_PGD_PTRS);
20976 +#endif
20977 +
20978 /* clear_bss() already clear the empty_zero_page */
20979
20980 reservedpages = 0;
20981 @@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
20982 static struct vm_area_struct gate_vma = {
20983 .vm_start = VSYSCALL_START,
20984 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
20985 - .vm_page_prot = PAGE_READONLY_EXEC,
20986 - .vm_flags = VM_READ | VM_EXEC
20987 + .vm_page_prot = PAGE_READONLY,
20988 + .vm_flags = VM_READ
20989 };
20990
20991 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
20992 @@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long a
20993
20994 const char *arch_vma_name(struct vm_area_struct *vma)
20995 {
20996 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
20997 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
20998 return "[vdso]";
20999 if (vma == &gate_vma)
21000 return "[vsyscall]";
21001 diff -urNp linux-2.6.32.41/arch/x86/mm/init.c linux-2.6.32.41/arch/x86/mm/init.c
21002 --- linux-2.6.32.41/arch/x86/mm/init.c 2011-04-17 17:00:52.000000000 -0400
21003 +++ linux-2.6.32.41/arch/x86/mm/init.c 2011-05-23 19:02:20.000000000 -0400
21004 @@ -69,11 +69,7 @@ static void __init find_early_table_spac
21005 * cause a hotspot and fill up ZONE_DMA. The page tables
21006 * need roughly 0.5KB per GB.
21007 */
21008 -#ifdef CONFIG_X86_32
21009 - start = 0x7000;
21010 -#else
21011 - start = 0x8000;
21012 -#endif
21013 + start = 0x100000;
21014 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
21015 tables, PAGE_SIZE);
21016 if (e820_table_start == -1UL)
21017 @@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_m
21018 #endif
21019
21020 set_nx();
21021 - if (nx_enabled)
21022 + if (nx_enabled && cpu_has_nx)
21023 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
21024
21025 /* Enable PSE if available */
21026 @@ -331,7 +327,19 @@ unsigned long __init_refok init_memory_m
21027 */
21028 int devmem_is_allowed(unsigned long pagenr)
21029 {
21030 - if (pagenr <= 256)
21031 +#ifndef CONFIG_GRKERNSEC_KMEM
21032 + if (!pagenr)
21033 + return 1;
21034 +#ifdef CONFIG_VM86
21035 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
21036 + return 1;
21037 +#endif
21038 +#else
21039 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
21040 + return 0;
21041 +#endif
21042 +
21043 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
21044 return 1;
21045 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
21046 return 0;
21047 @@ -379,6 +387,86 @@ void free_init_pages(char *what, unsigne
21048
21049 void free_initmem(void)
21050 {
21051 +
21052 +#ifdef CONFIG_PAX_KERNEXEC
21053 +#ifdef CONFIG_X86_32
21054 + /* PaX: limit KERNEL_CS to actual size */
21055 + unsigned long addr, limit;
21056 + struct desc_struct d;
21057 + int cpu;
21058 +
21059 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
21060 + limit = (limit - 1UL) >> PAGE_SHIFT;
21061 +
21062 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
21063 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
21064 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
21065 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
21066 + }
21067 +
21068 + /* PaX: make KERNEL_CS read-only */
21069 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
21070 + if (!paravirt_enabled())
21071 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
21072 +/*
21073 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
21074 + pgd = pgd_offset_k(addr);
21075 + pud = pud_offset(pgd, addr);
21076 + pmd = pmd_offset(pud, addr);
21077 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21078 + }
21079 +*/
21080 +#ifdef CONFIG_X86_PAE
21081 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
21082 +/*
21083 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
21084 + pgd = pgd_offset_k(addr);
21085 + pud = pud_offset(pgd, addr);
21086 + pmd = pmd_offset(pud, addr);
21087 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
21088 + }
21089 +*/
21090 +#endif
21091 +
21092 +#ifdef CONFIG_MODULES
21093 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
21094 +#endif
21095 +
21096 +#else
21097 + pgd_t *pgd;
21098 + pud_t *pud;
21099 + pmd_t *pmd;
21100 + unsigned long addr, end;
21101 +
21102 + /* PaX: make kernel code/rodata read-only, rest non-executable */
21103 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
21104 + pgd = pgd_offset_k(addr);
21105 + pud = pud_offset(pgd, addr);
21106 + pmd = pmd_offset(pud, addr);
21107 + if (!pmd_present(*pmd))
21108 + continue;
21109 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
21110 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21111 + else
21112 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
21113 + }
21114 +
21115 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
21116 + end = addr + KERNEL_IMAGE_SIZE;
21117 + for (; addr < end; addr += PMD_SIZE) {
21118 + pgd = pgd_offset_k(addr);
21119 + pud = pud_offset(pgd, addr);
21120 + pmd = pmd_offset(pud, addr);
21121 + if (!pmd_present(*pmd))
21122 + continue;
21123 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
21124 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21125 + }
21126 +#endif
21127 +
21128 + flush_tlb_all();
21129 +#endif
21130 +
21131 free_init_pages("unused kernel memory",
21132 (unsigned long)(&__init_begin),
21133 (unsigned long)(&__init_end));
21134 diff -urNp linux-2.6.32.41/arch/x86/mm/iomap_32.c linux-2.6.32.41/arch/x86/mm/iomap_32.c
21135 --- linux-2.6.32.41/arch/x86/mm/iomap_32.c 2011-03-27 14:31:47.000000000 -0400
21136 +++ linux-2.6.32.41/arch/x86/mm/iomap_32.c 2011-04-17 15:56:46.000000000 -0400
21137 @@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long
21138 debug_kmap_atomic(type);
21139 idx = type + KM_TYPE_NR * smp_processor_id();
21140 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
21141 +
21142 + pax_open_kernel();
21143 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
21144 + pax_close_kernel();
21145 +
21146 arch_flush_lazy_mmu_mode();
21147
21148 return (void *)vaddr;
21149 diff -urNp linux-2.6.32.41/arch/x86/mm/ioremap.c linux-2.6.32.41/arch/x86/mm/ioremap.c
21150 --- linux-2.6.32.41/arch/x86/mm/ioremap.c 2011-03-27 14:31:47.000000000 -0400
21151 +++ linux-2.6.32.41/arch/x86/mm/ioremap.c 2011-04-17 15:56:46.000000000 -0400
21152 @@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
21153 * Second special case: Some BIOSen report the PC BIOS
21154 * area (640->1Mb) as ram even though it is not.
21155 */
21156 - if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
21157 - pagenr < (BIOS_END >> PAGE_SHIFT))
21158 + if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
21159 + pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
21160 return 0;
21161
21162 for (i = 0; i < e820.nr_map; i++) {
21163 @@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(re
21164 /*
21165 * Don't allow anybody to remap normal RAM that we're using..
21166 */
21167 - for (pfn = phys_addr >> PAGE_SHIFT;
21168 - (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
21169 - pfn++) {
21170 -
21171 + for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
21172 int is_ram = page_is_ram(pfn);
21173
21174 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
21175 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
21176 return NULL;
21177 WARN_ON_ONCE(is_ram);
21178 }
21179 @@ -407,7 +404,7 @@ static int __init early_ioremap_debug_se
21180 early_param("early_ioremap_debug", early_ioremap_debug_setup);
21181
21182 static __initdata int after_paging_init;
21183 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
21184 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
21185
21186 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
21187 {
21188 @@ -439,8 +436,7 @@ void __init early_ioremap_init(void)
21189 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
21190
21191 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
21192 - memset(bm_pte, 0, sizeof(bm_pte));
21193 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
21194 + pmd_populate_user(&init_mm, pmd, bm_pte);
21195
21196 /*
21197 * The boot-ioremap range spans multiple pmds, for which
21198 diff -urNp linux-2.6.32.41/arch/x86/mm/kmemcheck/kmemcheck.c linux-2.6.32.41/arch/x86/mm/kmemcheck/kmemcheck.c
21199 --- linux-2.6.32.41/arch/x86/mm/kmemcheck/kmemcheck.c 2011-03-27 14:31:47.000000000 -0400
21200 +++ linux-2.6.32.41/arch/x86/mm/kmemcheck/kmemcheck.c 2011-04-17 15:56:46.000000000 -0400
21201 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
21202 * memory (e.g. tracked pages)? For now, we need this to avoid
21203 * invoking kmemcheck for PnP BIOS calls.
21204 */
21205 - if (regs->flags & X86_VM_MASK)
21206 + if (v8086_mode(regs))
21207 return false;
21208 - if (regs->cs != __KERNEL_CS)
21209 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
21210 return false;
21211
21212 pte = kmemcheck_pte_lookup(address);
21213 diff -urNp linux-2.6.32.41/arch/x86/mm/mmap.c linux-2.6.32.41/arch/x86/mm/mmap.c
21214 --- linux-2.6.32.41/arch/x86/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
21215 +++ linux-2.6.32.41/arch/x86/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
21216 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
21217 * Leave an at least ~128 MB hole with possible stack randomization.
21218 */
21219 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
21220 -#define MAX_GAP (TASK_SIZE/6*5)
21221 +#define MAX_GAP (pax_task_size/6*5)
21222
21223 /*
21224 * True on X86_32 or when emulating IA32 on X86_64
21225 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
21226 return rnd << PAGE_SHIFT;
21227 }
21228
21229 -static unsigned long mmap_base(void)
21230 +static unsigned long mmap_base(struct mm_struct *mm)
21231 {
21232 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
21233 + unsigned long pax_task_size = TASK_SIZE;
21234 +
21235 +#ifdef CONFIG_PAX_SEGMEXEC
21236 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21237 + pax_task_size = SEGMEXEC_TASK_SIZE;
21238 +#endif
21239
21240 if (gap < MIN_GAP)
21241 gap = MIN_GAP;
21242 else if (gap > MAX_GAP)
21243 gap = MAX_GAP;
21244
21245 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
21246 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
21247 }
21248
21249 /*
21250 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
21251 * does, but not when emulating X86_32
21252 */
21253 -static unsigned long mmap_legacy_base(void)
21254 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
21255 {
21256 - if (mmap_is_ia32())
21257 + if (mmap_is_ia32()) {
21258 +
21259 +#ifdef CONFIG_PAX_SEGMEXEC
21260 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21261 + return SEGMEXEC_TASK_UNMAPPED_BASE;
21262 + else
21263 +#endif
21264 +
21265 return TASK_UNMAPPED_BASE;
21266 - else
21267 + } else
21268 return TASK_UNMAPPED_BASE + mmap_rnd();
21269 }
21270
21271 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
21272 void arch_pick_mmap_layout(struct mm_struct *mm)
21273 {
21274 if (mmap_is_legacy()) {
21275 - mm->mmap_base = mmap_legacy_base();
21276 + mm->mmap_base = mmap_legacy_base(mm);
21277 +
21278 +#ifdef CONFIG_PAX_RANDMMAP
21279 + if (mm->pax_flags & MF_PAX_RANDMMAP)
21280 + mm->mmap_base += mm->delta_mmap;
21281 +#endif
21282 +
21283 mm->get_unmapped_area = arch_get_unmapped_area;
21284 mm->unmap_area = arch_unmap_area;
21285 } else {
21286 - mm->mmap_base = mmap_base();
21287 + mm->mmap_base = mmap_base(mm);
21288 +
21289 +#ifdef CONFIG_PAX_RANDMMAP
21290 + if (mm->pax_flags & MF_PAX_RANDMMAP)
21291 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
21292 +#endif
21293 +
21294 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
21295 mm->unmap_area = arch_unmap_area_topdown;
21296 }
21297 diff -urNp linux-2.6.32.41/arch/x86/mm/mmio-mod.c linux-2.6.32.41/arch/x86/mm/mmio-mod.c
21298 --- linux-2.6.32.41/arch/x86/mm/mmio-mod.c 2011-03-27 14:31:47.000000000 -0400
21299 +++ linux-2.6.32.41/arch/x86/mm/mmio-mod.c 2011-05-04 17:56:28.000000000 -0400
21300 @@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p,
21301 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
21302 void __iomem *addr)
21303 {
21304 - static atomic_t next_id;
21305 + static atomic_unchecked_t next_id;
21306 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
21307 /* These are page-unaligned. */
21308 struct mmiotrace_map map = {
21309 @@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_
21310 .private = trace
21311 },
21312 .phys = offset,
21313 - .id = atomic_inc_return(&next_id)
21314 + .id = atomic_inc_return_unchecked(&next_id)
21315 };
21316 map.map_id = trace->id;
21317
21318 diff -urNp linux-2.6.32.41/arch/x86/mm/numa_32.c linux-2.6.32.41/arch/x86/mm/numa_32.c
21319 --- linux-2.6.32.41/arch/x86/mm/numa_32.c 2011-03-27 14:31:47.000000000 -0400
21320 +++ linux-2.6.32.41/arch/x86/mm/numa_32.c 2011-04-17 15:56:46.000000000 -0400
21321 @@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int
21322 }
21323 #endif
21324
21325 -extern unsigned long find_max_low_pfn(void);
21326 extern unsigned long highend_pfn, highstart_pfn;
21327
21328 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
21329 diff -urNp linux-2.6.32.41/arch/x86/mm/pageattr.c linux-2.6.32.41/arch/x86/mm/pageattr.c
21330 --- linux-2.6.32.41/arch/x86/mm/pageattr.c 2011-03-27 14:31:47.000000000 -0400
21331 +++ linux-2.6.32.41/arch/x86/mm/pageattr.c 2011-04-17 15:56:46.000000000 -0400
21332 @@ -261,16 +261,17 @@ static inline pgprot_t static_protection
21333 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
21334 */
21335 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
21336 - pgprot_val(forbidden) |= _PAGE_NX;
21337 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
21338
21339 /*
21340 * The kernel text needs to be executable for obvious reasons
21341 * Does not cover __inittext since that is gone later on. On
21342 * 64bit we do not enforce !NX on the low mapping
21343 */
21344 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
21345 - pgprot_val(forbidden) |= _PAGE_NX;
21346 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
21347 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
21348
21349 +#ifdef CONFIG_DEBUG_RODATA
21350 /*
21351 * The .rodata section needs to be read-only. Using the pfn
21352 * catches all aliases.
21353 @@ -278,6 +279,14 @@ static inline pgprot_t static_protection
21354 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
21355 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
21356 pgprot_val(forbidden) |= _PAGE_RW;
21357 +#endif
21358 +
21359 +#ifdef CONFIG_PAX_KERNEXEC
21360 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
21361 + pgprot_val(forbidden) |= _PAGE_RW;
21362 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
21363 + }
21364 +#endif
21365
21366 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
21367
21368 @@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
21369 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
21370 {
21371 /* change init_mm */
21372 + pax_open_kernel();
21373 set_pte_atomic(kpte, pte);
21374 +
21375 #ifdef CONFIG_X86_32
21376 if (!SHARED_KERNEL_PMD) {
21377 +
21378 +#ifdef CONFIG_PAX_PER_CPU_PGD
21379 + unsigned long cpu;
21380 +#else
21381 struct page *page;
21382 +#endif
21383
21384 +#ifdef CONFIG_PAX_PER_CPU_PGD
21385 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
21386 + pgd_t *pgd = get_cpu_pgd(cpu);
21387 +#else
21388 list_for_each_entry(page, &pgd_list, lru) {
21389 - pgd_t *pgd;
21390 + pgd_t *pgd = (pgd_t *)page_address(page);
21391 +#endif
21392 +
21393 pud_t *pud;
21394 pmd_t *pmd;
21395
21396 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
21397 + pgd += pgd_index(address);
21398 pud = pud_offset(pgd, address);
21399 pmd = pmd_offset(pud, address);
21400 set_pte_atomic((pte_t *)pmd, pte);
21401 }
21402 }
21403 #endif
21404 + pax_close_kernel();
21405 }
21406
21407 static int
21408 diff -urNp linux-2.6.32.41/arch/x86/mm/pageattr-test.c linux-2.6.32.41/arch/x86/mm/pageattr-test.c
21409 --- linux-2.6.32.41/arch/x86/mm/pageattr-test.c 2011-03-27 14:31:47.000000000 -0400
21410 +++ linux-2.6.32.41/arch/x86/mm/pageattr-test.c 2011-04-17 15:56:46.000000000 -0400
21411 @@ -36,7 +36,7 @@ enum {
21412
21413 static int pte_testbit(pte_t pte)
21414 {
21415 - return pte_flags(pte) & _PAGE_UNUSED1;
21416 + return pte_flags(pte) & _PAGE_CPA_TEST;
21417 }
21418
21419 struct split_state {
21420 diff -urNp linux-2.6.32.41/arch/x86/mm/pat.c linux-2.6.32.41/arch/x86/mm/pat.c
21421 --- linux-2.6.32.41/arch/x86/mm/pat.c 2011-03-27 14:31:47.000000000 -0400
21422 +++ linux-2.6.32.41/arch/x86/mm/pat.c 2011-04-17 15:56:46.000000000 -0400
21423 @@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct
21424
21425 conflict:
21426 printk(KERN_INFO "%s:%d conflicting memory types "
21427 - "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
21428 + "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
21429 new->end, cattr_name(new->type), cattr_name(entry->type));
21430 return -EBUSY;
21431 }
21432 @@ -559,7 +559,7 @@ unlock_ret:
21433
21434 if (err) {
21435 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
21436 - current->comm, current->pid, start, end);
21437 + current->comm, task_pid_nr(current), start, end);
21438 }
21439
21440 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
21441 @@ -689,8 +689,8 @@ static inline int range_is_allowed(unsig
21442 while (cursor < to) {
21443 if (!devmem_is_allowed(pfn)) {
21444 printk(KERN_INFO
21445 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
21446 - current->comm, from, to);
21447 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
21448 + current->comm, from, to, cursor);
21449 return 0;
21450 }
21451 cursor += PAGE_SIZE;
21452 @@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, un
21453 printk(KERN_INFO
21454 "%s:%d ioremap_change_attr failed %s "
21455 "for %Lx-%Lx\n",
21456 - current->comm, current->pid,
21457 + current->comm, task_pid_nr(current),
21458 cattr_name(flags),
21459 base, (unsigned long long)(base + size));
21460 return -EINVAL;
21461 @@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr,
21462 free_memtype(paddr, paddr + size);
21463 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
21464 " for %Lx-%Lx, got %s\n",
21465 - current->comm, current->pid,
21466 + current->comm, task_pid_nr(current),
21467 cattr_name(want_flags),
21468 (unsigned long long)paddr,
21469 (unsigned long long)(paddr + size),
21470 diff -urNp linux-2.6.32.41/arch/x86/mm/pgtable_32.c linux-2.6.32.41/arch/x86/mm/pgtable_32.c
21471 --- linux-2.6.32.41/arch/x86/mm/pgtable_32.c 2011-03-27 14:31:47.000000000 -0400
21472 +++ linux-2.6.32.41/arch/x86/mm/pgtable_32.c 2011-04-17 15:56:46.000000000 -0400
21473 @@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr,
21474 return;
21475 }
21476 pte = pte_offset_kernel(pmd, vaddr);
21477 +
21478 + pax_open_kernel();
21479 if (pte_val(pteval))
21480 set_pte_at(&init_mm, vaddr, pte, pteval);
21481 else
21482 pte_clear(&init_mm, vaddr, pte);
21483 + pax_close_kernel();
21484
21485 /*
21486 * It's enough to flush this one mapping.
21487 diff -urNp linux-2.6.32.41/arch/x86/mm/pgtable.c linux-2.6.32.41/arch/x86/mm/pgtable.c
21488 --- linux-2.6.32.41/arch/x86/mm/pgtable.c 2011-03-27 14:31:47.000000000 -0400
21489 +++ linux-2.6.32.41/arch/x86/mm/pgtable.c 2011-05-11 18:25:15.000000000 -0400
21490 @@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *p
21491 list_del(&page->lru);
21492 }
21493
21494 -#define UNSHARED_PTRS_PER_PGD \
21495 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
21496 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21497 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
21498
21499 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
21500 +{
21501 + while (count--)
21502 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
21503 +}
21504 +#endif
21505 +
21506 +#ifdef CONFIG_PAX_PER_CPU_PGD
21507 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
21508 +{
21509 + while (count--)
21510 +
21511 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21512 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
21513 +#else
21514 + *dst++ = *src++;
21515 +#endif
21516 +
21517 +}
21518 +#endif
21519 +
21520 +#ifdef CONFIG_X86_64
21521 +#define pxd_t pud_t
21522 +#define pyd_t pgd_t
21523 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
21524 +#define pxd_free(mm, pud) pud_free((mm), (pud))
21525 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
21526 +#define pyd_offset(mm ,address) pgd_offset((mm), (address))
21527 +#define PYD_SIZE PGDIR_SIZE
21528 +#else
21529 +#define pxd_t pmd_t
21530 +#define pyd_t pud_t
21531 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
21532 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
21533 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
21534 +#define pyd_offset(mm ,address) pud_offset((mm), (address))
21535 +#define PYD_SIZE PUD_SIZE
21536 +#endif
21537 +
21538 +#ifdef CONFIG_PAX_PER_CPU_PGD
21539 +static inline void pgd_ctor(pgd_t *pgd) {}
21540 +static inline void pgd_dtor(pgd_t *pgd) {}
21541 +#else
21542 static void pgd_ctor(pgd_t *pgd)
21543 {
21544 /* If the pgd points to a shared pagetable level (either the
21545 @@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
21546 pgd_list_del(pgd);
21547 spin_unlock_irqrestore(&pgd_lock, flags);
21548 }
21549 +#endif
21550
21551 /*
21552 * List of all pgd's needed for non-PAE so it can invalidate entries
21553 @@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
21554 * -- wli
21555 */
21556
21557 -#ifdef CONFIG_X86_PAE
21558 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
21559 /*
21560 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
21561 * updating the top-level pagetable entries to guarantee the
21562 @@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
21563 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
21564 * and initialize the kernel pmds here.
21565 */
21566 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
21567 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
21568
21569 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
21570 {
21571 @@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm,
21572 */
21573 flush_tlb_mm(mm);
21574 }
21575 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
21576 +#define PREALLOCATED_PXDS USER_PGD_PTRS
21577 #else /* !CONFIG_X86_PAE */
21578
21579 /* No need to prepopulate any pagetable entries in non-PAE modes. */
21580 -#define PREALLOCATED_PMDS 0
21581 +#define PREALLOCATED_PXDS 0
21582
21583 #endif /* CONFIG_X86_PAE */
21584
21585 -static void free_pmds(pmd_t *pmds[])
21586 +static void free_pxds(pxd_t *pxds[])
21587 {
21588 int i;
21589
21590 - for(i = 0; i < PREALLOCATED_PMDS; i++)
21591 - if (pmds[i])
21592 - free_page((unsigned long)pmds[i]);
21593 + for(i = 0; i < PREALLOCATED_PXDS; i++)
21594 + if (pxds[i])
21595 + free_page((unsigned long)pxds[i]);
21596 }
21597
21598 -static int preallocate_pmds(pmd_t *pmds[])
21599 +static int preallocate_pxds(pxd_t *pxds[])
21600 {
21601 int i;
21602 bool failed = false;
21603
21604 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
21605 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
21606 - if (pmd == NULL)
21607 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
21608 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
21609 + if (pxd == NULL)
21610 failed = true;
21611 - pmds[i] = pmd;
21612 + pxds[i] = pxd;
21613 }
21614
21615 if (failed) {
21616 - free_pmds(pmds);
21617 + free_pxds(pxds);
21618 return -ENOMEM;
21619 }
21620
21621 @@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[
21622 * preallocate which never got a corresponding vma will need to be
21623 * freed manually.
21624 */
21625 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
21626 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
21627 {
21628 int i;
21629
21630 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
21631 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
21632 pgd_t pgd = pgdp[i];
21633
21634 if (pgd_val(pgd) != 0) {
21635 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
21636 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
21637
21638 - pgdp[i] = native_make_pgd(0);
21639 + set_pgd(pgdp + i, native_make_pgd(0));
21640
21641 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
21642 - pmd_free(mm, pmd);
21643 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
21644 + pxd_free(mm, pxd);
21645 }
21646 }
21647 }
21648
21649 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
21650 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
21651 {
21652 - pud_t *pud;
21653 + pyd_t *pyd;
21654 unsigned long addr;
21655 int i;
21656
21657 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
21658 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
21659 return;
21660
21661 - pud = pud_offset(pgd, 0);
21662 +#ifdef CONFIG_X86_64
21663 + pyd = pyd_offset(mm, 0L);
21664 +#else
21665 + pyd = pyd_offset(pgd, 0L);
21666 +#endif
21667
21668 - for (addr = i = 0; i < PREALLOCATED_PMDS;
21669 - i++, pud++, addr += PUD_SIZE) {
21670 - pmd_t *pmd = pmds[i];
21671 + for (addr = i = 0; i < PREALLOCATED_PXDS;
21672 + i++, pyd++, addr += PYD_SIZE) {
21673 + pxd_t *pxd = pxds[i];
21674
21675 if (i >= KERNEL_PGD_BOUNDARY)
21676 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
21677 - sizeof(pmd_t) * PTRS_PER_PMD);
21678 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
21679 + sizeof(pxd_t) * PTRS_PER_PMD);
21680
21681 - pud_populate(mm, pud, pmd);
21682 + pyd_populate(mm, pyd, pxd);
21683 }
21684 }
21685
21686 pgd_t *pgd_alloc(struct mm_struct *mm)
21687 {
21688 pgd_t *pgd;
21689 - pmd_t *pmds[PREALLOCATED_PMDS];
21690 + pxd_t *pxds[PREALLOCATED_PXDS];
21691 +
21692 unsigned long flags;
21693
21694 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
21695 @@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
21696
21697 mm->pgd = pgd;
21698
21699 - if (preallocate_pmds(pmds) != 0)
21700 + if (preallocate_pxds(pxds) != 0)
21701 goto out_free_pgd;
21702
21703 if (paravirt_pgd_alloc(mm) != 0)
21704 - goto out_free_pmds;
21705 + goto out_free_pxds;
21706
21707 /*
21708 * Make sure that pre-populating the pmds is atomic with
21709 @@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
21710 spin_lock_irqsave(&pgd_lock, flags);
21711
21712 pgd_ctor(pgd);
21713 - pgd_prepopulate_pmd(mm, pgd, pmds);
21714 + pgd_prepopulate_pxd(mm, pgd, pxds);
21715
21716 spin_unlock_irqrestore(&pgd_lock, flags);
21717
21718 return pgd;
21719
21720 -out_free_pmds:
21721 - free_pmds(pmds);
21722 +out_free_pxds:
21723 + free_pxds(pxds);
21724 out_free_pgd:
21725 free_page((unsigned long)pgd);
21726 out:
21727 @@ -287,7 +338,7 @@ out:
21728
21729 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
21730 {
21731 - pgd_mop_up_pmds(mm, pgd);
21732 + pgd_mop_up_pxds(mm, pgd);
21733 pgd_dtor(pgd);
21734 paravirt_pgd_free(mm, pgd);
21735 free_page((unsigned long)pgd);
21736 diff -urNp linux-2.6.32.41/arch/x86/mm/setup_nx.c linux-2.6.32.41/arch/x86/mm/setup_nx.c
21737 --- linux-2.6.32.41/arch/x86/mm/setup_nx.c 2011-03-27 14:31:47.000000000 -0400
21738 +++ linux-2.6.32.41/arch/x86/mm/setup_nx.c 2011-04-17 15:56:46.000000000 -0400
21739 @@ -4,11 +4,10 @@
21740
21741 #include <asm/pgtable.h>
21742
21743 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
21744 int nx_enabled;
21745
21746 -#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21747 -static int disable_nx __cpuinitdata;
21748 -
21749 +#ifndef CONFIG_PAX_PAGEEXEC
21750 /*
21751 * noexec = on|off
21752 *
21753 @@ -22,32 +21,26 @@ static int __init noexec_setup(char *str
21754 if (!str)
21755 return -EINVAL;
21756 if (!strncmp(str, "on", 2)) {
21757 - __supported_pte_mask |= _PAGE_NX;
21758 - disable_nx = 0;
21759 + nx_enabled = 1;
21760 } else if (!strncmp(str, "off", 3)) {
21761 - disable_nx = 1;
21762 - __supported_pte_mask &= ~_PAGE_NX;
21763 + nx_enabled = 0;
21764 }
21765 return 0;
21766 }
21767 early_param("noexec", noexec_setup);
21768 #endif
21769 +#endif
21770
21771 #ifdef CONFIG_X86_PAE
21772 void __init set_nx(void)
21773 {
21774 - unsigned int v[4], l, h;
21775 + if (!nx_enabled && cpu_has_nx) {
21776 + unsigned l, h;
21777
21778 - if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
21779 - cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
21780 -
21781 - if ((v[3] & (1 << 20)) && !disable_nx) {
21782 - rdmsr(MSR_EFER, l, h);
21783 - l |= EFER_NX;
21784 - wrmsr(MSR_EFER, l, h);
21785 - nx_enabled = 1;
21786 - __supported_pte_mask |= _PAGE_NX;
21787 - }
21788 + __supported_pte_mask &= ~_PAGE_NX;
21789 + rdmsr(MSR_EFER, l, h);
21790 + l &= ~EFER_NX;
21791 + wrmsr(MSR_EFER, l, h);
21792 }
21793 }
21794 #else
21795 @@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
21796 unsigned long efer;
21797
21798 rdmsrl(MSR_EFER, efer);
21799 - if (!(efer & EFER_NX) || disable_nx)
21800 + if (!(efer & EFER_NX) || !nx_enabled)
21801 __supported_pte_mask &= ~_PAGE_NX;
21802 }
21803 #endif
21804 diff -urNp linux-2.6.32.41/arch/x86/mm/tlb.c linux-2.6.32.41/arch/x86/mm/tlb.c
21805 --- linux-2.6.32.41/arch/x86/mm/tlb.c 2011-03-27 14:31:47.000000000 -0400
21806 +++ linux-2.6.32.41/arch/x86/mm/tlb.c 2011-04-23 12:56:10.000000000 -0400
21807 @@ -61,7 +61,11 @@ void leave_mm(int cpu)
21808 BUG();
21809 cpumask_clear_cpu(cpu,
21810 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
21811 +
21812 +#ifndef CONFIG_PAX_PER_CPU_PGD
21813 load_cr3(swapper_pg_dir);
21814 +#endif
21815 +
21816 }
21817 EXPORT_SYMBOL_GPL(leave_mm);
21818
21819 diff -urNp linux-2.6.32.41/arch/x86/oprofile/backtrace.c linux-2.6.32.41/arch/x86/oprofile/backtrace.c
21820 --- linux-2.6.32.41/arch/x86/oprofile/backtrace.c 2011-03-27 14:31:47.000000000 -0400
21821 +++ linux-2.6.32.41/arch/x86/oprofile/backtrace.c 2011-04-17 15:56:46.000000000 -0400
21822 @@ -57,7 +57,7 @@ static struct frame_head *dump_user_back
21823 struct frame_head bufhead[2];
21824
21825 /* Also check accessibility of one struct frame_head beyond */
21826 - if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
21827 + if (!__access_ok(VERIFY_READ, head, sizeof(bufhead)))
21828 return NULL;
21829 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
21830 return NULL;
21831 @@ -77,7 +77,7 @@ x86_backtrace(struct pt_regs * const reg
21832 {
21833 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
21834
21835 - if (!user_mode_vm(regs)) {
21836 + if (!user_mode(regs)) {
21837 unsigned long stack = kernel_stack_pointer(regs);
21838 if (depth)
21839 dump_trace(NULL, regs, (unsigned long *)stack, 0,
21840 diff -urNp linux-2.6.32.41/arch/x86/oprofile/op_model_p4.c linux-2.6.32.41/arch/x86/oprofile/op_model_p4.c
21841 --- linux-2.6.32.41/arch/x86/oprofile/op_model_p4.c 2011-03-27 14:31:47.000000000 -0400
21842 +++ linux-2.6.32.41/arch/x86/oprofile/op_model_p4.c 2011-04-17 15:56:46.000000000 -0400
21843 @@ -50,7 +50,7 @@ static inline void setup_num_counters(vo
21844 #endif
21845 }
21846
21847 -static int inline addr_increment(void)
21848 +static inline int addr_increment(void)
21849 {
21850 #ifdef CONFIG_SMP
21851 return smp_num_siblings == 2 ? 2 : 1;
21852 diff -urNp linux-2.6.32.41/arch/x86/pci/common.c linux-2.6.32.41/arch/x86/pci/common.c
21853 --- linux-2.6.32.41/arch/x86/pci/common.c 2011-03-27 14:31:47.000000000 -0400
21854 +++ linux-2.6.32.41/arch/x86/pci/common.c 2011-04-23 12:56:10.000000000 -0400
21855 @@ -31,8 +31,8 @@ int noioapicreroute = 1;
21856 int pcibios_last_bus = -1;
21857 unsigned long pirq_table_addr;
21858 struct pci_bus *pci_root_bus;
21859 -struct pci_raw_ops *raw_pci_ops;
21860 -struct pci_raw_ops *raw_pci_ext_ops;
21861 +const struct pci_raw_ops *raw_pci_ops;
21862 +const struct pci_raw_ops *raw_pci_ext_ops;
21863
21864 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
21865 int reg, int len, u32 *val)
21866 diff -urNp linux-2.6.32.41/arch/x86/pci/direct.c linux-2.6.32.41/arch/x86/pci/direct.c
21867 --- linux-2.6.32.41/arch/x86/pci/direct.c 2011-03-27 14:31:47.000000000 -0400
21868 +++ linux-2.6.32.41/arch/x86/pci/direct.c 2011-04-17 15:56:46.000000000 -0400
21869 @@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int
21870
21871 #undef PCI_CONF1_ADDRESS
21872
21873 -struct pci_raw_ops pci_direct_conf1 = {
21874 +const struct pci_raw_ops pci_direct_conf1 = {
21875 .read = pci_conf1_read,
21876 .write = pci_conf1_write,
21877 };
21878 @@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int
21879
21880 #undef PCI_CONF2_ADDRESS
21881
21882 -struct pci_raw_ops pci_direct_conf2 = {
21883 +const struct pci_raw_ops pci_direct_conf2 = {
21884 .read = pci_conf2_read,
21885 .write = pci_conf2_write,
21886 };
21887 @@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
21888 * This should be close to trivial, but it isn't, because there are buggy
21889 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
21890 */
21891 -static int __init pci_sanity_check(struct pci_raw_ops *o)
21892 +static int __init pci_sanity_check(const struct pci_raw_ops *o)
21893 {
21894 u32 x = 0;
21895 int year, devfn;
21896 diff -urNp linux-2.6.32.41/arch/x86/pci/mmconfig_32.c linux-2.6.32.41/arch/x86/pci/mmconfig_32.c
21897 --- linux-2.6.32.41/arch/x86/pci/mmconfig_32.c 2011-03-27 14:31:47.000000000 -0400
21898 +++ linux-2.6.32.41/arch/x86/pci/mmconfig_32.c 2011-04-17 15:56:46.000000000 -0400
21899 @@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int
21900 return 0;
21901 }
21902
21903 -static struct pci_raw_ops pci_mmcfg = {
21904 +static const struct pci_raw_ops pci_mmcfg = {
21905 .read = pci_mmcfg_read,
21906 .write = pci_mmcfg_write,
21907 };
21908 diff -urNp linux-2.6.32.41/arch/x86/pci/mmconfig_64.c linux-2.6.32.41/arch/x86/pci/mmconfig_64.c
21909 --- linux-2.6.32.41/arch/x86/pci/mmconfig_64.c 2011-03-27 14:31:47.000000000 -0400
21910 +++ linux-2.6.32.41/arch/x86/pci/mmconfig_64.c 2011-04-17 15:56:46.000000000 -0400
21911 @@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int
21912 return 0;
21913 }
21914
21915 -static struct pci_raw_ops pci_mmcfg = {
21916 +static const struct pci_raw_ops pci_mmcfg = {
21917 .read = pci_mmcfg_read,
21918 .write = pci_mmcfg_write,
21919 };
21920 diff -urNp linux-2.6.32.41/arch/x86/pci/numaq_32.c linux-2.6.32.41/arch/x86/pci/numaq_32.c
21921 --- linux-2.6.32.41/arch/x86/pci/numaq_32.c 2011-03-27 14:31:47.000000000 -0400
21922 +++ linux-2.6.32.41/arch/x86/pci/numaq_32.c 2011-04-17 15:56:46.000000000 -0400
21923 @@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned i
21924
21925 #undef PCI_CONF1_MQ_ADDRESS
21926
21927 -static struct pci_raw_ops pci_direct_conf1_mq = {
21928 +static const struct pci_raw_ops pci_direct_conf1_mq = {
21929 .read = pci_conf1_mq_read,
21930 .write = pci_conf1_mq_write
21931 };
21932 diff -urNp linux-2.6.32.41/arch/x86/pci/olpc.c linux-2.6.32.41/arch/x86/pci/olpc.c
21933 --- linux-2.6.32.41/arch/x86/pci/olpc.c 2011-03-27 14:31:47.000000000 -0400
21934 +++ linux-2.6.32.41/arch/x86/pci/olpc.c 2011-04-17 15:56:46.000000000 -0400
21935 @@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int s
21936 return 0;
21937 }
21938
21939 -static struct pci_raw_ops pci_olpc_conf = {
21940 +static const struct pci_raw_ops pci_olpc_conf = {
21941 .read = pci_olpc_read,
21942 .write = pci_olpc_write,
21943 };
21944 diff -urNp linux-2.6.32.41/arch/x86/pci/pcbios.c linux-2.6.32.41/arch/x86/pci/pcbios.c
21945 --- linux-2.6.32.41/arch/x86/pci/pcbios.c 2011-03-27 14:31:47.000000000 -0400
21946 +++ linux-2.6.32.41/arch/x86/pci/pcbios.c 2011-04-17 15:56:46.000000000 -0400
21947 @@ -56,50 +56,93 @@ union bios32 {
21948 static struct {
21949 unsigned long address;
21950 unsigned short segment;
21951 -} bios32_indirect = { 0, __KERNEL_CS };
21952 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
21953
21954 /*
21955 * Returns the entry point for the given service, NULL on error
21956 */
21957
21958 -static unsigned long bios32_service(unsigned long service)
21959 +static unsigned long __devinit bios32_service(unsigned long service)
21960 {
21961 unsigned char return_code; /* %al */
21962 unsigned long address; /* %ebx */
21963 unsigned long length; /* %ecx */
21964 unsigned long entry; /* %edx */
21965 unsigned long flags;
21966 + struct desc_struct d, *gdt;
21967
21968 local_irq_save(flags);
21969 - __asm__("lcall *(%%edi); cld"
21970 +
21971 + gdt = get_cpu_gdt_table(smp_processor_id());
21972 +
21973 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
21974 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
21975 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
21976 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
21977 +
21978 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
21979 : "=a" (return_code),
21980 "=b" (address),
21981 "=c" (length),
21982 "=d" (entry)
21983 : "0" (service),
21984 "1" (0),
21985 - "D" (&bios32_indirect));
21986 + "D" (&bios32_indirect),
21987 + "r"(__PCIBIOS_DS)
21988 + : "memory");
21989 +
21990 + pax_open_kernel();
21991 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
21992 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
21993 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
21994 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
21995 + pax_close_kernel();
21996 +
21997 local_irq_restore(flags);
21998
21999 switch (return_code) {
22000 - case 0:
22001 - return address + entry;
22002 - case 0x80: /* Not present */
22003 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
22004 - return 0;
22005 - default: /* Shouldn't happen */
22006 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
22007 - service, return_code);
22008 + case 0: {
22009 + int cpu;
22010 + unsigned char flags;
22011 +
22012 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
22013 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
22014 + printk(KERN_WARNING "bios32_service: not valid\n");
22015 return 0;
22016 + }
22017 + address = address + PAGE_OFFSET;
22018 + length += 16UL; /* some BIOSs underreport this... */
22019 + flags = 4;
22020 + if (length >= 64*1024*1024) {
22021 + length >>= PAGE_SHIFT;
22022 + flags |= 8;
22023 + }
22024 +
22025 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
22026 + gdt = get_cpu_gdt_table(cpu);
22027 + pack_descriptor(&d, address, length, 0x9b, flags);
22028 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
22029 + pack_descriptor(&d, address, length, 0x93, flags);
22030 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
22031 + }
22032 + return entry;
22033 + }
22034 + case 0x80: /* Not present */
22035 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
22036 + return 0;
22037 + default: /* Shouldn't happen */
22038 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
22039 + service, return_code);
22040 + return 0;
22041 }
22042 }
22043
22044 static struct {
22045 unsigned long address;
22046 unsigned short segment;
22047 -} pci_indirect = { 0, __KERNEL_CS };
22048 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
22049
22050 -static int pci_bios_present;
22051 +static int pci_bios_present __read_only;
22052
22053 static int __devinit check_pcibios(void)
22054 {
22055 @@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
22056 unsigned long flags, pcibios_entry;
22057
22058 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
22059 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
22060 + pci_indirect.address = pcibios_entry;
22061
22062 local_irq_save(flags);
22063 - __asm__(
22064 - "lcall *(%%edi); cld\n\t"
22065 + __asm__("movw %w6, %%ds\n\t"
22066 + "lcall *%%ss:(%%edi); cld\n\t"
22067 + "push %%ss\n\t"
22068 + "pop %%ds\n\t"
22069 "jc 1f\n\t"
22070 "xor %%ah, %%ah\n"
22071 "1:"
22072 @@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
22073 "=b" (ebx),
22074 "=c" (ecx)
22075 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
22076 - "D" (&pci_indirect)
22077 + "D" (&pci_indirect),
22078 + "r" (__PCIBIOS_DS)
22079 : "memory");
22080 local_irq_restore(flags);
22081
22082 @@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int se
22083
22084 switch (len) {
22085 case 1:
22086 - __asm__("lcall *(%%esi); cld\n\t"
22087 + __asm__("movw %w6, %%ds\n\t"
22088 + "lcall *%%ss:(%%esi); cld\n\t"
22089 + "push %%ss\n\t"
22090 + "pop %%ds\n\t"
22091 "jc 1f\n\t"
22092 "xor %%ah, %%ah\n"
22093 "1:"
22094 @@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int se
22095 : "1" (PCIBIOS_READ_CONFIG_BYTE),
22096 "b" (bx),
22097 "D" ((long)reg),
22098 - "S" (&pci_indirect));
22099 + "S" (&pci_indirect),
22100 + "r" (__PCIBIOS_DS));
22101 /*
22102 * Zero-extend the result beyond 8 bits, do not trust the
22103 * BIOS having done it:
22104 @@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int se
22105 *value &= 0xff;
22106 break;
22107 case 2:
22108 - __asm__("lcall *(%%esi); cld\n\t"
22109 + __asm__("movw %w6, %%ds\n\t"
22110 + "lcall *%%ss:(%%esi); cld\n\t"
22111 + "push %%ss\n\t"
22112 + "pop %%ds\n\t"
22113 "jc 1f\n\t"
22114 "xor %%ah, %%ah\n"
22115 "1:"
22116 @@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int se
22117 : "1" (PCIBIOS_READ_CONFIG_WORD),
22118 "b" (bx),
22119 "D" ((long)reg),
22120 - "S" (&pci_indirect));
22121 + "S" (&pci_indirect),
22122 + "r" (__PCIBIOS_DS));
22123 /*
22124 * Zero-extend the result beyond 16 bits, do not trust the
22125 * BIOS having done it:
22126 @@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int se
22127 *value &= 0xffff;
22128 break;
22129 case 4:
22130 - __asm__("lcall *(%%esi); cld\n\t"
22131 + __asm__("movw %w6, %%ds\n\t"
22132 + "lcall *%%ss:(%%esi); cld\n\t"
22133 + "push %%ss\n\t"
22134 + "pop %%ds\n\t"
22135 "jc 1f\n\t"
22136 "xor %%ah, %%ah\n"
22137 "1:"
22138 @@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int se
22139 : "1" (PCIBIOS_READ_CONFIG_DWORD),
22140 "b" (bx),
22141 "D" ((long)reg),
22142 - "S" (&pci_indirect));
22143 + "S" (&pci_indirect),
22144 + "r" (__PCIBIOS_DS));
22145 break;
22146 }
22147
22148 @@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int s
22149
22150 switch (len) {
22151 case 1:
22152 - __asm__("lcall *(%%esi); cld\n\t"
22153 + __asm__("movw %w6, %%ds\n\t"
22154 + "lcall *%%ss:(%%esi); cld\n\t"
22155 + "push %%ss\n\t"
22156 + "pop %%ds\n\t"
22157 "jc 1f\n\t"
22158 "xor %%ah, %%ah\n"
22159 "1:"
22160 @@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int s
22161 "c" (value),
22162 "b" (bx),
22163 "D" ((long)reg),
22164 - "S" (&pci_indirect));
22165 + "S" (&pci_indirect),
22166 + "r" (__PCIBIOS_DS));
22167 break;
22168 case 2:
22169 - __asm__("lcall *(%%esi); cld\n\t"
22170 + __asm__("movw %w6, %%ds\n\t"
22171 + "lcall *%%ss:(%%esi); cld\n\t"
22172 + "push %%ss\n\t"
22173 + "pop %%ds\n\t"
22174 "jc 1f\n\t"
22175 "xor %%ah, %%ah\n"
22176 "1:"
22177 @@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int s
22178 "c" (value),
22179 "b" (bx),
22180 "D" ((long)reg),
22181 - "S" (&pci_indirect));
22182 + "S" (&pci_indirect),
22183 + "r" (__PCIBIOS_DS));
22184 break;
22185 case 4:
22186 - __asm__("lcall *(%%esi); cld\n\t"
22187 + __asm__("movw %w6, %%ds\n\t"
22188 + "lcall *%%ss:(%%esi); cld\n\t"
22189 + "push %%ss\n\t"
22190 + "pop %%ds\n\t"
22191 "jc 1f\n\t"
22192 "xor %%ah, %%ah\n"
22193 "1:"
22194 @@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int s
22195 "c" (value),
22196 "b" (bx),
22197 "D" ((long)reg),
22198 - "S" (&pci_indirect));
22199 + "S" (&pci_indirect),
22200 + "r" (__PCIBIOS_DS));
22201 break;
22202 }
22203
22204 @@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int s
22205 * Function table for BIOS32 access
22206 */
22207
22208 -static struct pci_raw_ops pci_bios_access = {
22209 +static const struct pci_raw_ops pci_bios_access = {
22210 .read = pci_bios_read,
22211 .write = pci_bios_write
22212 };
22213 @@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_acces
22214 * Try to find PCI BIOS.
22215 */
22216
22217 -static struct pci_raw_ops * __devinit pci_find_bios(void)
22218 +static const struct pci_raw_ops * __devinit pci_find_bios(void)
22219 {
22220 union bios32 *check;
22221 unsigned char sum;
22222 @@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_i
22223
22224 DBG("PCI: Fetching IRQ routing table... ");
22225 __asm__("push %%es\n\t"
22226 + "movw %w8, %%ds\n\t"
22227 "push %%ds\n\t"
22228 "pop %%es\n\t"
22229 - "lcall *(%%esi); cld\n\t"
22230 + "lcall *%%ss:(%%esi); cld\n\t"
22231 "pop %%es\n\t"
22232 + "push %%ss\n\t"
22233 + "pop %%ds\n"
22234 "jc 1f\n\t"
22235 "xor %%ah, %%ah\n"
22236 "1:"
22237 @@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_i
22238 "1" (0),
22239 "D" ((long) &opt),
22240 "S" (&pci_indirect),
22241 - "m" (opt)
22242 + "m" (opt),
22243 + "r" (__PCIBIOS_DS)
22244 : "memory");
22245 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
22246 if (ret & 0xff00)
22247 @@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_d
22248 {
22249 int ret;
22250
22251 - __asm__("lcall *(%%esi); cld\n\t"
22252 + __asm__("movw %w5, %%ds\n\t"
22253 + "lcall *%%ss:(%%esi); cld\n\t"
22254 + "push %%ss\n\t"
22255 + "pop %%ds\n"
22256 "jc 1f\n\t"
22257 "xor %%ah, %%ah\n"
22258 "1:"
22259 @@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_d
22260 : "0" (PCIBIOS_SET_PCI_HW_INT),
22261 "b" ((dev->bus->number << 8) | dev->devfn),
22262 "c" ((irq << 8) | (pin + 10)),
22263 - "S" (&pci_indirect));
22264 + "S" (&pci_indirect),
22265 + "r" (__PCIBIOS_DS));
22266 return !(ret & 0xff00);
22267 }
22268 EXPORT_SYMBOL(pcibios_set_irq_routing);
22269 diff -urNp linux-2.6.32.41/arch/x86/power/cpu.c linux-2.6.32.41/arch/x86/power/cpu.c
22270 --- linux-2.6.32.41/arch/x86/power/cpu.c 2011-03-27 14:31:47.000000000 -0400
22271 +++ linux-2.6.32.41/arch/x86/power/cpu.c 2011-04-17 15:56:46.000000000 -0400
22272 @@ -129,7 +129,7 @@ static void do_fpu_end(void)
22273 static void fix_processor_context(void)
22274 {
22275 int cpu = smp_processor_id();
22276 - struct tss_struct *t = &per_cpu(init_tss, cpu);
22277 + struct tss_struct *t = init_tss + cpu;
22278
22279 set_tss_desc(cpu, t); /*
22280 * This just modifies memory; should not be
22281 @@ -139,7 +139,9 @@ static void fix_processor_context(void)
22282 */
22283
22284 #ifdef CONFIG_X86_64
22285 + pax_open_kernel();
22286 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
22287 + pax_close_kernel();
22288
22289 syscall_init(); /* This sets MSR_*STAR and related */
22290 #endif
22291 diff -urNp linux-2.6.32.41/arch/x86/vdso/Makefile linux-2.6.32.41/arch/x86/vdso/Makefile
22292 --- linux-2.6.32.41/arch/x86/vdso/Makefile 2011-03-27 14:31:47.000000000 -0400
22293 +++ linux-2.6.32.41/arch/x86/vdso/Makefile 2011-04-17 15:56:46.000000000 -0400
22294 @@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
22295 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
22296 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
22297
22298 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
22299 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
22300 GCOV_PROFILE := n
22301
22302 #
22303 diff -urNp linux-2.6.32.41/arch/x86/vdso/vclock_gettime.c linux-2.6.32.41/arch/x86/vdso/vclock_gettime.c
22304 --- linux-2.6.32.41/arch/x86/vdso/vclock_gettime.c 2011-03-27 14:31:47.000000000 -0400
22305 +++ linux-2.6.32.41/arch/x86/vdso/vclock_gettime.c 2011-04-17 15:56:46.000000000 -0400
22306 @@ -22,24 +22,48 @@
22307 #include <asm/hpet.h>
22308 #include <asm/unistd.h>
22309 #include <asm/io.h>
22310 +#include <asm/fixmap.h>
22311 #include "vextern.h"
22312
22313 #define gtod vdso_vsyscall_gtod_data
22314
22315 +notrace noinline long __vdso_fallback_time(long *t)
22316 +{
22317 + long secs;
22318 + asm volatile("syscall"
22319 + : "=a" (secs)
22320 + : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
22321 + return secs;
22322 +}
22323 +
22324 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
22325 {
22326 long ret;
22327 asm("syscall" : "=a" (ret) :
22328 - "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
22329 + "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
22330 return ret;
22331 }
22332
22333 +notrace static inline cycle_t __vdso_vread_hpet(void)
22334 +{
22335 + return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
22336 +}
22337 +
22338 +notrace static inline cycle_t __vdso_vread_tsc(void)
22339 +{
22340 + cycle_t ret = (cycle_t)vget_cycles();
22341 +
22342 + return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
22343 +}
22344 +
22345 notrace static inline long vgetns(void)
22346 {
22347 long v;
22348 - cycles_t (*vread)(void);
22349 - vread = gtod->clock.vread;
22350 - v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
22351 + if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
22352 + v = __vdso_vread_tsc();
22353 + else
22354 + v = __vdso_vread_hpet();
22355 + v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
22356 return (v * gtod->clock.mult) >> gtod->clock.shift;
22357 }
22358
22359 @@ -113,7 +137,9 @@ notrace static noinline int do_monotonic
22360
22361 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
22362 {
22363 - if (likely(gtod->sysctl_enabled))
22364 + if (likely(gtod->sysctl_enabled &&
22365 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
22366 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
22367 switch (clock) {
22368 case CLOCK_REALTIME:
22369 if (likely(gtod->clock.vread))
22370 @@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid
22371 int clock_gettime(clockid_t, struct timespec *)
22372 __attribute__((weak, alias("__vdso_clock_gettime")));
22373
22374 -notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
22375 +notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
22376 {
22377 long ret;
22378 - if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
22379 + asm("syscall" : "=a" (ret) :
22380 + "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
22381 + return ret;
22382 +}
22383 +
22384 +notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
22385 +{
22386 + if (likely(gtod->sysctl_enabled &&
22387 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
22388 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
22389 + {
22390 if (likely(tv != NULL)) {
22391 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
22392 offsetof(struct timespec, tv_nsec) ||
22393 @@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct t
22394 }
22395 return 0;
22396 }
22397 - asm("syscall" : "=a" (ret) :
22398 - "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
22399 - return ret;
22400 + return __vdso_fallback_gettimeofday(tv, tz);
22401 }
22402 int gettimeofday(struct timeval *, struct timezone *)
22403 __attribute__((weak, alias("__vdso_gettimeofday")));
22404 diff -urNp linux-2.6.32.41/arch/x86/vdso/vdso32-setup.c linux-2.6.32.41/arch/x86/vdso/vdso32-setup.c
22405 --- linux-2.6.32.41/arch/x86/vdso/vdso32-setup.c 2011-03-27 14:31:47.000000000 -0400
22406 +++ linux-2.6.32.41/arch/x86/vdso/vdso32-setup.c 2011-04-23 12:56:10.000000000 -0400
22407 @@ -25,6 +25,7 @@
22408 #include <asm/tlbflush.h>
22409 #include <asm/vdso.h>
22410 #include <asm/proto.h>
22411 +#include <asm/mman.h>
22412
22413 enum {
22414 VDSO_DISABLED = 0,
22415 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
22416 void enable_sep_cpu(void)
22417 {
22418 int cpu = get_cpu();
22419 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
22420 + struct tss_struct *tss = init_tss + cpu;
22421
22422 if (!boot_cpu_has(X86_FEATURE_SEP)) {
22423 put_cpu();
22424 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
22425 gate_vma.vm_start = FIXADDR_USER_START;
22426 gate_vma.vm_end = FIXADDR_USER_END;
22427 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
22428 - gate_vma.vm_page_prot = __P101;
22429 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
22430 /*
22431 * Make sure the vDSO gets into every core dump.
22432 * Dumping its contents makes post-mortem fully interpretable later
22433 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
22434 if (compat)
22435 addr = VDSO_HIGH_BASE;
22436 else {
22437 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
22438 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
22439 if (IS_ERR_VALUE(addr)) {
22440 ret = addr;
22441 goto up_fail;
22442 }
22443 }
22444
22445 - current->mm->context.vdso = (void *)addr;
22446 + current->mm->context.vdso = addr;
22447
22448 if (compat_uses_vma || !compat) {
22449 /*
22450 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
22451 }
22452
22453 current_thread_info()->sysenter_return =
22454 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
22455 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
22456
22457 up_fail:
22458 if (ret)
22459 - current->mm->context.vdso = NULL;
22460 + current->mm->context.vdso = 0;
22461
22462 up_write(&mm->mmap_sem);
22463
22464 @@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
22465
22466 const char *arch_vma_name(struct vm_area_struct *vma)
22467 {
22468 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
22469 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
22470 return "[vdso]";
22471 +
22472 +#ifdef CONFIG_PAX_SEGMEXEC
22473 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
22474 + return "[vdso]";
22475 +#endif
22476 +
22477 return NULL;
22478 }
22479
22480 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
22481 struct mm_struct *mm = tsk->mm;
22482
22483 /* Check to see if this task was created in compat vdso mode */
22484 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
22485 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
22486 return &gate_vma;
22487 return NULL;
22488 }
22489 diff -urNp linux-2.6.32.41/arch/x86/vdso/vdso.lds.S linux-2.6.32.41/arch/x86/vdso/vdso.lds.S
22490 --- linux-2.6.32.41/arch/x86/vdso/vdso.lds.S 2011-03-27 14:31:47.000000000 -0400
22491 +++ linux-2.6.32.41/arch/x86/vdso/vdso.lds.S 2011-06-04 20:37:24.000000000 -0400
22492 @@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
22493 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
22494 #include "vextern.h"
22495 #undef VEXTERN
22496 +
22497 +#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
22498 +VEXTERN(gettimeofday)
22499 +VEXTERN(clock_gettime)
22500 +VEXTERN(getcpu)
22501 +#undef VEXTERN
22502 diff -urNp linux-2.6.32.41/arch/x86/vdso/vextern.h linux-2.6.32.41/arch/x86/vdso/vextern.h
22503 --- linux-2.6.32.41/arch/x86/vdso/vextern.h 2011-03-27 14:31:47.000000000 -0400
22504 +++ linux-2.6.32.41/arch/x86/vdso/vextern.h 2011-04-17 15:56:46.000000000 -0400
22505 @@ -11,6 +11,5 @@
22506 put into vextern.h and be referenced as a pointer with vdso prefix.
22507 The main kernel later fills in the values. */
22508
22509 -VEXTERN(jiffies)
22510 VEXTERN(vgetcpu_mode)
22511 VEXTERN(vsyscall_gtod_data)
22512 diff -urNp linux-2.6.32.41/arch/x86/vdso/vma.c linux-2.6.32.41/arch/x86/vdso/vma.c
22513 --- linux-2.6.32.41/arch/x86/vdso/vma.c 2011-03-27 14:31:47.000000000 -0400
22514 +++ linux-2.6.32.41/arch/x86/vdso/vma.c 2011-04-17 15:56:46.000000000 -0400
22515 @@ -57,7 +57,7 @@ static int __init init_vdso_vars(void)
22516 if (!vbase)
22517 goto oom;
22518
22519 - if (memcmp(vbase, "\177ELF", 4)) {
22520 + if (memcmp(vbase, ELFMAG, SELFMAG)) {
22521 printk("VDSO: I'm broken; not ELF\n");
22522 vdso_enabled = 0;
22523 }
22524 @@ -66,6 +66,7 @@ static int __init init_vdso_vars(void)
22525 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
22526 #include "vextern.h"
22527 #undef VEXTERN
22528 + vunmap(vbase);
22529 return 0;
22530
22531 oom:
22532 @@ -116,7 +117,7 @@ int arch_setup_additional_pages(struct l
22533 goto up_fail;
22534 }
22535
22536 - current->mm->context.vdso = (void *)addr;
22537 + current->mm->context.vdso = addr;
22538
22539 ret = install_special_mapping(mm, addr, vdso_size,
22540 VM_READ|VM_EXEC|
22541 @@ -124,7 +125,7 @@ int arch_setup_additional_pages(struct l
22542 VM_ALWAYSDUMP,
22543 vdso_pages);
22544 if (ret) {
22545 - current->mm->context.vdso = NULL;
22546 + current->mm->context.vdso = 0;
22547 goto up_fail;
22548 }
22549
22550 @@ -132,10 +133,3 @@ up_fail:
22551 up_write(&mm->mmap_sem);
22552 return ret;
22553 }
22554 -
22555 -static __init int vdso_setup(char *s)
22556 -{
22557 - vdso_enabled = simple_strtoul(s, NULL, 0);
22558 - return 0;
22559 -}
22560 -__setup("vdso=", vdso_setup);
22561 diff -urNp linux-2.6.32.41/arch/x86/xen/enlighten.c linux-2.6.32.41/arch/x86/xen/enlighten.c
22562 --- linux-2.6.32.41/arch/x86/xen/enlighten.c 2011-03-27 14:31:47.000000000 -0400
22563 +++ linux-2.6.32.41/arch/x86/xen/enlighten.c 2011-05-22 23:02:03.000000000 -0400
22564 @@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
22565
22566 struct shared_info xen_dummy_shared_info;
22567
22568 -void *xen_initial_gdt;
22569 -
22570 /*
22571 * Point at some empty memory to start with. We map the real shared_info
22572 * page as soon as fixmap is up and running.
22573 @@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_des
22574
22575 preempt_disable();
22576
22577 - start = __get_cpu_var(idt_desc).address;
22578 + start = (unsigned long)__get_cpu_var(idt_desc).address;
22579 end = start + __get_cpu_var(idt_desc).size + 1;
22580
22581 xen_mc_flush();
22582 @@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic
22583 #endif
22584 };
22585
22586 -static void xen_reboot(int reason)
22587 +static __noreturn void xen_reboot(int reason)
22588 {
22589 struct sched_shutdown r = { .reason = reason };
22590
22591 @@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
22592 BUG();
22593 }
22594
22595 -static void xen_restart(char *msg)
22596 +static __noreturn void xen_restart(char *msg)
22597 {
22598 xen_reboot(SHUTDOWN_reboot);
22599 }
22600
22601 -static void xen_emergency_restart(void)
22602 +static __noreturn void xen_emergency_restart(void)
22603 {
22604 xen_reboot(SHUTDOWN_reboot);
22605 }
22606
22607 -static void xen_machine_halt(void)
22608 +static __noreturn void xen_machine_halt(void)
22609 {
22610 xen_reboot(SHUTDOWN_poweroff);
22611 }
22612 @@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(
22613 */
22614 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
22615
22616 -#ifdef CONFIG_X86_64
22617 /* Work out if we support NX */
22618 - check_efer();
22619 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
22620 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
22621 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
22622 + unsigned l, h;
22623 +
22624 +#ifdef CONFIG_X86_PAE
22625 + nx_enabled = 1;
22626 +#endif
22627 + __supported_pte_mask |= _PAGE_NX;
22628 + rdmsr(MSR_EFER, l, h);
22629 + l |= EFER_NX;
22630 + wrmsr(MSR_EFER, l, h);
22631 + }
22632 #endif
22633
22634 xen_setup_features();
22635 @@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(
22636
22637 machine_ops = xen_machine_ops;
22638
22639 - /*
22640 - * The only reliable way to retain the initial address of the
22641 - * percpu gdt_page is to remember it here, so we can go and
22642 - * mark it RW later, when the initial percpu area is freed.
22643 - */
22644 - xen_initial_gdt = &per_cpu(gdt_page, 0);
22645 -
22646 xen_smp_init();
22647
22648 pgd = (pgd_t *)xen_start_info->pt_base;
22649 diff -urNp linux-2.6.32.41/arch/x86/xen/mmu.c linux-2.6.32.41/arch/x86/xen/mmu.c
22650 --- linux-2.6.32.41/arch/x86/xen/mmu.c 2011-03-27 14:31:47.000000000 -0400
22651 +++ linux-2.6.32.41/arch/x86/xen/mmu.c 2011-04-17 15:56:46.000000000 -0400
22652 @@ -1714,6 +1714,8 @@ __init pgd_t *xen_setup_kernel_pagetable
22653 convert_pfn_mfn(init_level4_pgt);
22654 convert_pfn_mfn(level3_ident_pgt);
22655 convert_pfn_mfn(level3_kernel_pgt);
22656 + convert_pfn_mfn(level3_vmalloc_pgt);
22657 + convert_pfn_mfn(level3_vmemmap_pgt);
22658
22659 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
22660 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
22661 @@ -1732,7 +1734,10 @@ __init pgd_t *xen_setup_kernel_pagetable
22662 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
22663 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
22664 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
22665 + set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
22666 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
22667 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
22668 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
22669 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
22670 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
22671
22672 diff -urNp linux-2.6.32.41/arch/x86/xen/smp.c linux-2.6.32.41/arch/x86/xen/smp.c
22673 --- linux-2.6.32.41/arch/x86/xen/smp.c 2011-03-27 14:31:47.000000000 -0400
22674 +++ linux-2.6.32.41/arch/x86/xen/smp.c 2011-05-11 18:25:15.000000000 -0400
22675 @@ -167,11 +167,6 @@ static void __init xen_smp_prepare_boot_
22676 {
22677 BUG_ON(smp_processor_id() != 0);
22678 native_smp_prepare_boot_cpu();
22679 -
22680 - /* We've switched to the "real" per-cpu gdt, so make sure the
22681 - old memory can be recycled */
22682 - make_lowmem_page_readwrite(xen_initial_gdt);
22683 -
22684 xen_setup_vcpu_info_placement();
22685 }
22686
22687 @@ -231,12 +226,12 @@ cpu_initialize_context(unsigned int cpu,
22688 gdt = get_cpu_gdt_table(cpu);
22689
22690 ctxt->flags = VGCF_IN_KERNEL;
22691 - ctxt->user_regs.ds = __USER_DS;
22692 - ctxt->user_regs.es = __USER_DS;
22693 + ctxt->user_regs.ds = __KERNEL_DS;
22694 + ctxt->user_regs.es = __KERNEL_DS;
22695 ctxt->user_regs.ss = __KERNEL_DS;
22696 #ifdef CONFIG_X86_32
22697 ctxt->user_regs.fs = __KERNEL_PERCPU;
22698 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
22699 + savesegment(gs, ctxt->user_regs.gs);
22700 #else
22701 ctxt->gs_base_kernel = per_cpu_offset(cpu);
22702 #endif
22703 @@ -287,13 +282,12 @@ static int __cpuinit xen_cpu_up(unsigned
22704 int rc;
22705
22706 per_cpu(current_task, cpu) = idle;
22707 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
22708 #ifdef CONFIG_X86_32
22709 irq_ctx_init(cpu);
22710 #else
22711 clear_tsk_thread_flag(idle, TIF_FORK);
22712 - per_cpu(kernel_stack, cpu) =
22713 - (unsigned long)task_stack_page(idle) -
22714 - KERNEL_STACK_OFFSET + THREAD_SIZE;
22715 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
22716 #endif
22717 xen_setup_runstate_info(cpu);
22718 xen_setup_timer(cpu);
22719 diff -urNp linux-2.6.32.41/arch/x86/xen/xen-asm_32.S linux-2.6.32.41/arch/x86/xen/xen-asm_32.S
22720 --- linux-2.6.32.41/arch/x86/xen/xen-asm_32.S 2011-03-27 14:31:47.000000000 -0400
22721 +++ linux-2.6.32.41/arch/x86/xen/xen-asm_32.S 2011-04-22 19:13:13.000000000 -0400
22722 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
22723 ESP_OFFSET=4 # bytes pushed onto stack
22724
22725 /*
22726 - * Store vcpu_info pointer for easy access. Do it this way to
22727 - * avoid having to reload %fs
22728 + * Store vcpu_info pointer for easy access.
22729 */
22730 #ifdef CONFIG_SMP
22731 - GET_THREAD_INFO(%eax)
22732 - movl TI_cpu(%eax), %eax
22733 - movl __per_cpu_offset(,%eax,4), %eax
22734 - mov per_cpu__xen_vcpu(%eax), %eax
22735 + push %fs
22736 + mov $(__KERNEL_PERCPU), %eax
22737 + mov %eax, %fs
22738 + mov PER_CPU_VAR(xen_vcpu), %eax
22739 + pop %fs
22740 #else
22741 movl per_cpu__xen_vcpu, %eax
22742 #endif
22743 diff -urNp linux-2.6.32.41/arch/x86/xen/xen-head.S linux-2.6.32.41/arch/x86/xen/xen-head.S
22744 --- linux-2.6.32.41/arch/x86/xen/xen-head.S 2011-03-27 14:31:47.000000000 -0400
22745 +++ linux-2.6.32.41/arch/x86/xen/xen-head.S 2011-04-17 15:56:46.000000000 -0400
22746 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
22747 #ifdef CONFIG_X86_32
22748 mov %esi,xen_start_info
22749 mov $init_thread_union+THREAD_SIZE,%esp
22750 +#ifdef CONFIG_SMP
22751 + movl $cpu_gdt_table,%edi
22752 + movl $__per_cpu_load,%eax
22753 + movw %ax,__KERNEL_PERCPU + 2(%edi)
22754 + rorl $16,%eax
22755 + movb %al,__KERNEL_PERCPU + 4(%edi)
22756 + movb %ah,__KERNEL_PERCPU + 7(%edi)
22757 + movl $__per_cpu_end - 1,%eax
22758 + subl $__per_cpu_start,%eax
22759 + movw %ax,__KERNEL_PERCPU + 0(%edi)
22760 +#endif
22761 #else
22762 mov %rsi,xen_start_info
22763 mov $init_thread_union+THREAD_SIZE,%rsp
22764 diff -urNp linux-2.6.32.41/arch/x86/xen/xen-ops.h linux-2.6.32.41/arch/x86/xen/xen-ops.h
22765 --- linux-2.6.32.41/arch/x86/xen/xen-ops.h 2011-03-27 14:31:47.000000000 -0400
22766 +++ linux-2.6.32.41/arch/x86/xen/xen-ops.h 2011-04-17 15:56:46.000000000 -0400
22767 @@ -10,8 +10,6 @@
22768 extern const char xen_hypervisor_callback[];
22769 extern const char xen_failsafe_callback[];
22770
22771 -extern void *xen_initial_gdt;
22772 -
22773 struct trap_info;
22774 void xen_copy_trap_info(struct trap_info *traps);
22775
22776 diff -urNp linux-2.6.32.41/block/blk-integrity.c linux-2.6.32.41/block/blk-integrity.c
22777 --- linux-2.6.32.41/block/blk-integrity.c 2011-03-27 14:31:47.000000000 -0400
22778 +++ linux-2.6.32.41/block/blk-integrity.c 2011-04-17 15:56:46.000000000 -0400
22779 @@ -278,7 +278,7 @@ static struct attribute *integrity_attrs
22780 NULL,
22781 };
22782
22783 -static struct sysfs_ops integrity_ops = {
22784 +static const struct sysfs_ops integrity_ops = {
22785 .show = &integrity_attr_show,
22786 .store = &integrity_attr_store,
22787 };
22788 diff -urNp linux-2.6.32.41/block/blk-iopoll.c linux-2.6.32.41/block/blk-iopoll.c
22789 --- linux-2.6.32.41/block/blk-iopoll.c 2011-03-27 14:31:47.000000000 -0400
22790 +++ linux-2.6.32.41/block/blk-iopoll.c 2011-04-17 15:56:46.000000000 -0400
22791 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
22792 }
22793 EXPORT_SYMBOL(blk_iopoll_complete);
22794
22795 -static void blk_iopoll_softirq(struct softirq_action *h)
22796 +static void blk_iopoll_softirq(void)
22797 {
22798 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
22799 int rearm = 0, budget = blk_iopoll_budget;
22800 diff -urNp linux-2.6.32.41/block/blk-map.c linux-2.6.32.41/block/blk-map.c
22801 --- linux-2.6.32.41/block/blk-map.c 2011-03-27 14:31:47.000000000 -0400
22802 +++ linux-2.6.32.41/block/blk-map.c 2011-04-18 16:57:33.000000000 -0400
22803 @@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct requ
22804 * direct dma. else, set up kernel bounce buffers
22805 */
22806 uaddr = (unsigned long) ubuf;
22807 - if (blk_rq_aligned(q, ubuf, len) && !map_data)
22808 + if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
22809 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
22810 else
22811 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
22812 @@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_q
22813 for (i = 0; i < iov_count; i++) {
22814 unsigned long uaddr = (unsigned long)iov[i].iov_base;
22815
22816 + if (!iov[i].iov_len)
22817 + return -EINVAL;
22818 +
22819 if (uaddr & queue_dma_alignment(q)) {
22820 unaligned = 1;
22821 break;
22822 }
22823 - if (!iov[i].iov_len)
22824 - return -EINVAL;
22825 }
22826
22827 if (unaligned || (q->dma_pad_mask & len) || map_data)
22828 @@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue
22829 if (!len || !kbuf)
22830 return -EINVAL;
22831
22832 - do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
22833 + do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
22834 if (do_copy)
22835 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
22836 else
22837 diff -urNp linux-2.6.32.41/block/blk-softirq.c linux-2.6.32.41/block/blk-softirq.c
22838 --- linux-2.6.32.41/block/blk-softirq.c 2011-03-27 14:31:47.000000000 -0400
22839 +++ linux-2.6.32.41/block/blk-softirq.c 2011-04-17 15:56:46.000000000 -0400
22840 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
22841 * Softirq action handler - move entries to local list and loop over them
22842 * while passing them to the queue registered handler.
22843 */
22844 -static void blk_done_softirq(struct softirq_action *h)
22845 +static void blk_done_softirq(void)
22846 {
22847 struct list_head *cpu_list, local_list;
22848
22849 diff -urNp linux-2.6.32.41/block/blk-sysfs.c linux-2.6.32.41/block/blk-sysfs.c
22850 --- linux-2.6.32.41/block/blk-sysfs.c 2011-05-10 22:12:01.000000000 -0400
22851 +++ linux-2.6.32.41/block/blk-sysfs.c 2011-05-10 22:12:26.000000000 -0400
22852 @@ -414,7 +414,7 @@ static void blk_release_queue(struct kob
22853 kmem_cache_free(blk_requestq_cachep, q);
22854 }
22855
22856 -static struct sysfs_ops queue_sysfs_ops = {
22857 +static const struct sysfs_ops queue_sysfs_ops = {
22858 .show = queue_attr_show,
22859 .store = queue_attr_store,
22860 };
22861 diff -urNp linux-2.6.32.41/block/bsg.c linux-2.6.32.41/block/bsg.c
22862 --- linux-2.6.32.41/block/bsg.c 2011-03-27 14:31:47.000000000 -0400
22863 +++ linux-2.6.32.41/block/bsg.c 2011-04-17 15:56:46.000000000 -0400
22864 @@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
22865 struct sg_io_v4 *hdr, struct bsg_device *bd,
22866 fmode_t has_write_perm)
22867 {
22868 + unsigned char tmpcmd[sizeof(rq->__cmd)];
22869 + unsigned char *cmdptr;
22870 +
22871 if (hdr->request_len > BLK_MAX_CDB) {
22872 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
22873 if (!rq->cmd)
22874 return -ENOMEM;
22875 - }
22876 + cmdptr = rq->cmd;
22877 + } else
22878 + cmdptr = tmpcmd;
22879
22880 - if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
22881 + if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
22882 hdr->request_len))
22883 return -EFAULT;
22884
22885 + if (cmdptr != rq->cmd)
22886 + memcpy(rq->cmd, cmdptr, hdr->request_len);
22887 +
22888 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
22889 if (blk_verify_command(rq->cmd, has_write_perm))
22890 return -EPERM;
22891 diff -urNp linux-2.6.32.41/block/elevator.c linux-2.6.32.41/block/elevator.c
22892 --- linux-2.6.32.41/block/elevator.c 2011-03-27 14:31:47.000000000 -0400
22893 +++ linux-2.6.32.41/block/elevator.c 2011-04-17 15:56:46.000000000 -0400
22894 @@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, str
22895 return error;
22896 }
22897
22898 -static struct sysfs_ops elv_sysfs_ops = {
22899 +static const struct sysfs_ops elv_sysfs_ops = {
22900 .show = elv_attr_show,
22901 .store = elv_attr_store,
22902 };
22903 diff -urNp linux-2.6.32.41/block/scsi_ioctl.c linux-2.6.32.41/block/scsi_ioctl.c
22904 --- linux-2.6.32.41/block/scsi_ioctl.c 2011-03-27 14:31:47.000000000 -0400
22905 +++ linux-2.6.32.41/block/scsi_ioctl.c 2011-04-23 13:28:22.000000000 -0400
22906 @@ -220,8 +220,20 @@ EXPORT_SYMBOL(blk_verify_command);
22907 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
22908 struct sg_io_hdr *hdr, fmode_t mode)
22909 {
22910 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
22911 + unsigned char tmpcmd[sizeof(rq->__cmd)];
22912 + unsigned char *cmdptr;
22913 +
22914 + if (rq->cmd != rq->__cmd)
22915 + cmdptr = rq->cmd;
22916 + else
22917 + cmdptr = tmpcmd;
22918 +
22919 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
22920 return -EFAULT;
22921 +
22922 + if (cmdptr != rq->cmd)
22923 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
22924 +
22925 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
22926 return -EPERM;
22927
22928 @@ -430,6 +442,8 @@ int sg_scsi_ioctl(struct request_queue *
22929 int err;
22930 unsigned int in_len, out_len, bytes, opcode, cmdlen;
22931 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
22932 + unsigned char tmpcmd[sizeof(rq->__cmd)];
22933 + unsigned char *cmdptr;
22934
22935 if (!sic)
22936 return -EINVAL;
22937 @@ -463,9 +477,18 @@ int sg_scsi_ioctl(struct request_queue *
22938 */
22939 err = -EFAULT;
22940 rq->cmd_len = cmdlen;
22941 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
22942 +
22943 + if (rq->cmd != rq->__cmd)
22944 + cmdptr = rq->cmd;
22945 + else
22946 + cmdptr = tmpcmd;
22947 +
22948 + if (copy_from_user(cmdptr, sic->data, cmdlen))
22949 goto error;
22950
22951 + if (rq->cmd != cmdptr)
22952 + memcpy(rq->cmd, cmdptr, cmdlen);
22953 +
22954 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
22955 goto error;
22956
22957 diff -urNp linux-2.6.32.41/crypto/serpent.c linux-2.6.32.41/crypto/serpent.c
22958 --- linux-2.6.32.41/crypto/serpent.c 2011-03-27 14:31:47.000000000 -0400
22959 +++ linux-2.6.32.41/crypto/serpent.c 2011-05-16 21:46:57.000000000 -0400
22960 @@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
22961 u32 r0,r1,r2,r3,r4;
22962 int i;
22963
22964 + pax_track_stack();
22965 +
22966 /* Copy key, add padding */
22967
22968 for (i = 0; i < keylen; ++i)
22969 diff -urNp linux-2.6.32.41/Documentation/dontdiff linux-2.6.32.41/Documentation/dontdiff
22970 --- linux-2.6.32.41/Documentation/dontdiff 2011-03-27 14:31:47.000000000 -0400
22971 +++ linux-2.6.32.41/Documentation/dontdiff 2011-05-18 20:09:36.000000000 -0400
22972 @@ -1,13 +1,16 @@
22973 *.a
22974 *.aux
22975 *.bin
22976 +*.cis
22977 *.cpio
22978 *.csp
22979 +*.dbg
22980 *.dsp
22981 *.dvi
22982 *.elf
22983 *.eps
22984 *.fw
22985 +*.gcno
22986 *.gen.S
22987 *.gif
22988 *.grep
22989 @@ -38,8 +41,10 @@
22990 *.tab.h
22991 *.tex
22992 *.ver
22993 +*.vim
22994 *.xml
22995 *_MODULES
22996 +*_reg_safe.h
22997 *_vga16.c
22998 *~
22999 *.9
23000 @@ -49,11 +54,16 @@
23001 53c700_d.h
23002 CVS
23003 ChangeSet
23004 +GPATH
23005 +GRTAGS
23006 +GSYMS
23007 +GTAGS
23008 Image
23009 Kerntypes
23010 Module.markers
23011 Module.symvers
23012 PENDING
23013 +PERF*
23014 SCCS
23015 System.map*
23016 TAGS
23017 @@ -76,7 +86,11 @@ btfixupprep
23018 build
23019 bvmlinux
23020 bzImage*
23021 +capability_names.h
23022 +capflags.c
23023 classlist.h*
23024 +clut_vga16.c
23025 +common-cmds.h
23026 comp*.log
23027 compile.h*
23028 conf
23029 @@ -103,13 +117,14 @@ gen_crc32table
23030 gen_init_cpio
23031 genksyms
23032 *_gray256.c
23033 +hash
23034 ihex2fw
23035 ikconfig.h*
23036 initramfs_data.cpio
23037 +initramfs_data.cpio.bz2
23038 initramfs_data.cpio.gz
23039 initramfs_list
23040 kallsyms
23041 -kconfig
23042 keywords.c
23043 ksym.c*
23044 ksym.h*
23045 @@ -133,7 +148,9 @@ mkboot
23046 mkbugboot
23047 mkcpustr
23048 mkdep
23049 +mkpiggy
23050 mkprep
23051 +mkregtable
23052 mktables
23053 mktree
23054 modpost
23055 @@ -149,6 +166,7 @@ patches*
23056 pca200e.bin
23057 pca200e_ecd.bin2
23058 piggy.gz
23059 +piggy.S
23060 piggyback
23061 pnmtologo
23062 ppc_defs.h*
23063 @@ -157,12 +175,15 @@ qconf
23064 raid6altivec*.c
23065 raid6int*.c
23066 raid6tables.c
23067 +regdb.c
23068 relocs
23069 +rlim_names.h
23070 series
23071 setup
23072 setup.bin
23073 setup.elf
23074 sImage
23075 +slabinfo
23076 sm_tbl*
23077 split-include
23078 syscalltab.h
23079 @@ -186,14 +207,20 @@ version.h*
23080 vmlinux
23081 vmlinux-*
23082 vmlinux.aout
23083 +vmlinux.bin.all
23084 +vmlinux.bin.bz2
23085 vmlinux.lds
23086 +vmlinux.relocs
23087 +voffset.h
23088 vsyscall.lds
23089 vsyscall_32.lds
23090 wanxlfw.inc
23091 uImage
23092 unifdef
23093 +utsrelease.h
23094 wakeup.bin
23095 wakeup.elf
23096 wakeup.lds
23097 zImage*
23098 zconf.hash.c
23099 +zoffset.h
23100 diff -urNp linux-2.6.32.41/Documentation/kernel-parameters.txt linux-2.6.32.41/Documentation/kernel-parameters.txt
23101 --- linux-2.6.32.41/Documentation/kernel-parameters.txt 2011-03-27 14:31:47.000000000 -0400
23102 +++ linux-2.6.32.41/Documentation/kernel-parameters.txt 2011-04-17 15:56:45.000000000 -0400
23103 @@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters.
23104 the specified number of seconds. This is to be used if
23105 your oopses keep scrolling off the screen.
23106
23107 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
23108 + virtualization environments that don't cope well with the
23109 + expand down segment used by UDEREF on X86-32 or the frequent
23110 + page table updates on X86-64.
23111 +
23112 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
23113 +
23114 pcbit= [HW,ISDN]
23115
23116 pcd. [PARIDE]
23117 diff -urNp linux-2.6.32.41/drivers/acpi/acpi_pad.c linux-2.6.32.41/drivers/acpi/acpi_pad.c
23118 --- linux-2.6.32.41/drivers/acpi/acpi_pad.c 2011-03-27 14:31:47.000000000 -0400
23119 +++ linux-2.6.32.41/drivers/acpi/acpi_pad.c 2011-04-17 15:56:46.000000000 -0400
23120 @@ -30,7 +30,7 @@
23121 #include <acpi/acpi_bus.h>
23122 #include <acpi/acpi_drivers.h>
23123
23124 -#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
23125 +#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
23126 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
23127 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
23128 static DEFINE_MUTEX(isolated_cpus_lock);
23129 diff -urNp linux-2.6.32.41/drivers/acpi/battery.c linux-2.6.32.41/drivers/acpi/battery.c
23130 --- linux-2.6.32.41/drivers/acpi/battery.c 2011-03-27 14:31:47.000000000 -0400
23131 +++ linux-2.6.32.41/drivers/acpi/battery.c 2011-04-17 15:56:46.000000000 -0400
23132 @@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
23133 }
23134
23135 static struct battery_file {
23136 - struct file_operations ops;
23137 + const struct file_operations ops;
23138 mode_t mode;
23139 const char *name;
23140 } acpi_battery_file[] = {
23141 diff -urNp linux-2.6.32.41/drivers/acpi/dock.c linux-2.6.32.41/drivers/acpi/dock.c
23142 --- linux-2.6.32.41/drivers/acpi/dock.c 2011-03-27 14:31:47.000000000 -0400
23143 +++ linux-2.6.32.41/drivers/acpi/dock.c 2011-04-17 15:56:46.000000000 -0400
23144 @@ -77,7 +77,7 @@ struct dock_dependent_device {
23145 struct list_head list;
23146 struct list_head hotplug_list;
23147 acpi_handle handle;
23148 - struct acpi_dock_ops *ops;
23149 + const struct acpi_dock_ops *ops;
23150 void *context;
23151 };
23152
23153 @@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifi
23154 * the dock driver after _DCK is executed.
23155 */
23156 int
23157 -register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
23158 +register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
23159 void *context)
23160 {
23161 struct dock_dependent_device *dd;
23162 diff -urNp linux-2.6.32.41/drivers/acpi/osl.c linux-2.6.32.41/drivers/acpi/osl.c
23163 --- linux-2.6.32.41/drivers/acpi/osl.c 2011-03-27 14:31:47.000000000 -0400
23164 +++ linux-2.6.32.41/drivers/acpi/osl.c 2011-04-17 15:56:46.000000000 -0400
23165 @@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_addres
23166 void __iomem *virt_addr;
23167
23168 virt_addr = ioremap(phys_addr, width);
23169 + if (!virt_addr)
23170 + return AE_NO_MEMORY;
23171 if (!value)
23172 value = &dummy;
23173
23174 @@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_addre
23175 void __iomem *virt_addr;
23176
23177 virt_addr = ioremap(phys_addr, width);
23178 + if (!virt_addr)
23179 + return AE_NO_MEMORY;
23180
23181 switch (width) {
23182 case 8:
23183 diff -urNp linux-2.6.32.41/drivers/acpi/power_meter.c linux-2.6.32.41/drivers/acpi/power_meter.c
23184 --- linux-2.6.32.41/drivers/acpi/power_meter.c 2011-03-27 14:31:47.000000000 -0400
23185 +++ linux-2.6.32.41/drivers/acpi/power_meter.c 2011-04-17 15:56:46.000000000 -0400
23186 @@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *d
23187 return res;
23188
23189 temp /= 1000;
23190 - if (temp < 0)
23191 - return -EINVAL;
23192
23193 mutex_lock(&resource->lock);
23194 resource->trip[attr->index - 7] = temp;
23195 diff -urNp linux-2.6.32.41/drivers/acpi/proc.c linux-2.6.32.41/drivers/acpi/proc.c
23196 --- linux-2.6.32.41/drivers/acpi/proc.c 2011-03-27 14:31:47.000000000 -0400
23197 +++ linux-2.6.32.41/drivers/acpi/proc.c 2011-04-17 15:56:46.000000000 -0400
23198 @@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct f
23199 size_t count, loff_t * ppos)
23200 {
23201 struct list_head *node, *next;
23202 - char strbuf[5];
23203 - char str[5] = "";
23204 - unsigned int len = count;
23205 + char strbuf[5] = {0};
23206 struct acpi_device *found_dev = NULL;
23207
23208 - if (len > 4)
23209 - len = 4;
23210 - if (len < 0)
23211 - return -EFAULT;
23212 + if (count > 4)
23213 + count = 4;
23214
23215 - if (copy_from_user(strbuf, buffer, len))
23216 + if (copy_from_user(strbuf, buffer, count))
23217 return -EFAULT;
23218 - strbuf[len] = '\0';
23219 - sscanf(strbuf, "%s", str);
23220 + strbuf[count] = '\0';
23221
23222 mutex_lock(&acpi_device_lock);
23223 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
23224 @@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct f
23225 if (!dev->wakeup.flags.valid)
23226 continue;
23227
23228 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
23229 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
23230 dev->wakeup.state.enabled =
23231 dev->wakeup.state.enabled ? 0 : 1;
23232 found_dev = dev;
23233 diff -urNp linux-2.6.32.41/drivers/acpi/processor_core.c linux-2.6.32.41/drivers/acpi/processor_core.c
23234 --- linux-2.6.32.41/drivers/acpi/processor_core.c 2011-03-27 14:31:47.000000000 -0400
23235 +++ linux-2.6.32.41/drivers/acpi/processor_core.c 2011-04-17 15:56:46.000000000 -0400
23236 @@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(
23237 return 0;
23238 }
23239
23240 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
23241 + BUG_ON(pr->id >= nr_cpu_ids);
23242
23243 /*
23244 * Buggy BIOS check
23245 diff -urNp linux-2.6.32.41/drivers/acpi/sbshc.c linux-2.6.32.41/drivers/acpi/sbshc.c
23246 --- linux-2.6.32.41/drivers/acpi/sbshc.c 2011-03-27 14:31:47.000000000 -0400
23247 +++ linux-2.6.32.41/drivers/acpi/sbshc.c 2011-04-17 15:56:46.000000000 -0400
23248 @@ -17,7 +17,7 @@
23249
23250 #define PREFIX "ACPI: "
23251
23252 -#define ACPI_SMB_HC_CLASS "smbus_host_controller"
23253 +#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
23254 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
23255
23256 struct acpi_smb_hc {
23257 diff -urNp linux-2.6.32.41/drivers/acpi/sleep.c linux-2.6.32.41/drivers/acpi/sleep.c
23258 --- linux-2.6.32.41/drivers/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
23259 +++ linux-2.6.32.41/drivers/acpi/sleep.c 2011-04-17 15:56:46.000000000 -0400
23260 @@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(susp
23261 }
23262 }
23263
23264 -static struct platform_suspend_ops acpi_suspend_ops = {
23265 +static const struct platform_suspend_ops acpi_suspend_ops = {
23266 .valid = acpi_suspend_state_valid,
23267 .begin = acpi_suspend_begin,
23268 .prepare_late = acpi_pm_prepare,
23269 @@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspen
23270 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
23271 * been requested.
23272 */
23273 -static struct platform_suspend_ops acpi_suspend_ops_old = {
23274 +static const struct platform_suspend_ops acpi_suspend_ops_old = {
23275 .valid = acpi_suspend_state_valid,
23276 .begin = acpi_suspend_begin_old,
23277 .prepare_late = acpi_pm_disable_gpes,
23278 @@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
23279 acpi_enable_all_runtime_gpes();
23280 }
23281
23282 -static struct platform_hibernation_ops acpi_hibernation_ops = {
23283 +static const struct platform_hibernation_ops acpi_hibernation_ops = {
23284 .begin = acpi_hibernation_begin,
23285 .end = acpi_pm_end,
23286 .pre_snapshot = acpi_hibernation_pre_snapshot,
23287 @@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot
23288 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
23289 * been requested.
23290 */
23291 -static struct platform_hibernation_ops acpi_hibernation_ops_old = {
23292 +static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
23293 .begin = acpi_hibernation_begin_old,
23294 .end = acpi_pm_end,
23295 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
23296 diff -urNp linux-2.6.32.41/drivers/acpi/video.c linux-2.6.32.41/drivers/acpi/video.c
23297 --- linux-2.6.32.41/drivers/acpi/video.c 2011-03-27 14:31:47.000000000 -0400
23298 +++ linux-2.6.32.41/drivers/acpi/video.c 2011-04-17 15:56:46.000000000 -0400
23299 @@ -359,7 +359,7 @@ static int acpi_video_set_brightness(str
23300 vd->brightness->levels[request_level]);
23301 }
23302
23303 -static struct backlight_ops acpi_backlight_ops = {
23304 +static const struct backlight_ops acpi_backlight_ops = {
23305 .get_brightness = acpi_video_get_brightness,
23306 .update_status = acpi_video_set_brightness,
23307 };
23308 diff -urNp linux-2.6.32.41/drivers/ata/ahci.c linux-2.6.32.41/drivers/ata/ahci.c
23309 --- linux-2.6.32.41/drivers/ata/ahci.c 2011-03-27 14:31:47.000000000 -0400
23310 +++ linux-2.6.32.41/drivers/ata/ahci.c 2011-04-23 12:56:10.000000000 -0400
23311 @@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sh
23312 .sdev_attrs = ahci_sdev_attrs,
23313 };
23314
23315 -static struct ata_port_operations ahci_ops = {
23316 +static const struct ata_port_operations ahci_ops = {
23317 .inherits = &sata_pmp_port_ops,
23318
23319 .qc_defer = sata_pmp_qc_defer_cmd_switch,
23320 @@ -424,17 +424,17 @@ static struct ata_port_operations ahci_o
23321 .port_stop = ahci_port_stop,
23322 };
23323
23324 -static struct ata_port_operations ahci_vt8251_ops = {
23325 +static const struct ata_port_operations ahci_vt8251_ops = {
23326 .inherits = &ahci_ops,
23327 .hardreset = ahci_vt8251_hardreset,
23328 };
23329
23330 -static struct ata_port_operations ahci_p5wdh_ops = {
23331 +static const struct ata_port_operations ahci_p5wdh_ops = {
23332 .inherits = &ahci_ops,
23333 .hardreset = ahci_p5wdh_hardreset,
23334 };
23335
23336 -static struct ata_port_operations ahci_sb600_ops = {
23337 +static const struct ata_port_operations ahci_sb600_ops = {
23338 .inherits = &ahci_ops,
23339 .softreset = ahci_sb600_softreset,
23340 .pmp_softreset = ahci_sb600_softreset,
23341 diff -urNp linux-2.6.32.41/drivers/ata/ata_generic.c linux-2.6.32.41/drivers/ata/ata_generic.c
23342 --- linux-2.6.32.41/drivers/ata/ata_generic.c 2011-03-27 14:31:47.000000000 -0400
23343 +++ linux-2.6.32.41/drivers/ata/ata_generic.c 2011-04-17 15:56:46.000000000 -0400
23344 @@ -104,7 +104,7 @@ static struct scsi_host_template generic
23345 ATA_BMDMA_SHT(DRV_NAME),
23346 };
23347
23348 -static struct ata_port_operations generic_port_ops = {
23349 +static const struct ata_port_operations generic_port_ops = {
23350 .inherits = &ata_bmdma_port_ops,
23351 .cable_detect = ata_cable_unknown,
23352 .set_mode = generic_set_mode,
23353 diff -urNp linux-2.6.32.41/drivers/ata/ata_piix.c linux-2.6.32.41/drivers/ata/ata_piix.c
23354 --- linux-2.6.32.41/drivers/ata/ata_piix.c 2011-03-27 14:31:47.000000000 -0400
23355 +++ linux-2.6.32.41/drivers/ata/ata_piix.c 2011-04-23 12:56:10.000000000 -0400
23356 @@ -318,7 +318,7 @@ static struct scsi_host_template piix_sh
23357 ATA_BMDMA_SHT(DRV_NAME),
23358 };
23359
23360 -static struct ata_port_operations piix_pata_ops = {
23361 +static const struct ata_port_operations piix_pata_ops = {
23362 .inherits = &ata_bmdma32_port_ops,
23363 .cable_detect = ata_cable_40wire,
23364 .set_piomode = piix_set_piomode,
23365 @@ -326,22 +326,22 @@ static struct ata_port_operations piix_p
23366 .prereset = piix_pata_prereset,
23367 };
23368
23369 -static struct ata_port_operations piix_vmw_ops = {
23370 +static const struct ata_port_operations piix_vmw_ops = {
23371 .inherits = &piix_pata_ops,
23372 .bmdma_status = piix_vmw_bmdma_status,
23373 };
23374
23375 -static struct ata_port_operations ich_pata_ops = {
23376 +static const struct ata_port_operations ich_pata_ops = {
23377 .inherits = &piix_pata_ops,
23378 .cable_detect = ich_pata_cable_detect,
23379 .set_dmamode = ich_set_dmamode,
23380 };
23381
23382 -static struct ata_port_operations piix_sata_ops = {
23383 +static const struct ata_port_operations piix_sata_ops = {
23384 .inherits = &ata_bmdma_port_ops,
23385 };
23386
23387 -static struct ata_port_operations piix_sidpr_sata_ops = {
23388 +static const struct ata_port_operations piix_sidpr_sata_ops = {
23389 .inherits = &piix_sata_ops,
23390 .hardreset = sata_std_hardreset,
23391 .scr_read = piix_sidpr_scr_read,
23392 diff -urNp linux-2.6.32.41/drivers/ata/libata-acpi.c linux-2.6.32.41/drivers/ata/libata-acpi.c
23393 --- linux-2.6.32.41/drivers/ata/libata-acpi.c 2011-03-27 14:31:47.000000000 -0400
23394 +++ linux-2.6.32.41/drivers/ata/libata-acpi.c 2011-04-17 15:56:46.000000000 -0400
23395 @@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_han
23396 ata_acpi_uevent(dev->link->ap, dev, event);
23397 }
23398
23399 -static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
23400 +static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
23401 .handler = ata_acpi_dev_notify_dock,
23402 .uevent = ata_acpi_dev_uevent,
23403 };
23404
23405 -static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
23406 +static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
23407 .handler = ata_acpi_ap_notify_dock,
23408 .uevent = ata_acpi_ap_uevent,
23409 };
23410 diff -urNp linux-2.6.32.41/drivers/ata/libata-core.c linux-2.6.32.41/drivers/ata/libata-core.c
23411 --- linux-2.6.32.41/drivers/ata/libata-core.c 2011-03-27 14:31:47.000000000 -0400
23412 +++ linux-2.6.32.41/drivers/ata/libata-core.c 2011-04-23 12:56:10.000000000 -0400
23413 @@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *
23414 struct ata_port *ap;
23415 unsigned int tag;
23416
23417 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23418 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23419 ap = qc->ap;
23420
23421 qc->flags = 0;
23422 @@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued
23423 struct ata_port *ap;
23424 struct ata_link *link;
23425
23426 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23427 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23428 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
23429 ap = qc->ap;
23430 link = qc->dev->link;
23431 @@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device
23432 * LOCKING:
23433 * None.
23434 */
23435 -static void ata_finalize_port_ops(struct ata_port_operations *ops)
23436 +static void ata_finalize_port_ops(const struct ata_port_operations *ops)
23437 {
23438 static DEFINE_SPINLOCK(lock);
23439 const struct ata_port_operations *cur;
23440 @@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct
23441 return;
23442
23443 spin_lock(&lock);
23444 + pax_open_kernel();
23445
23446 for (cur = ops->inherits; cur; cur = cur->inherits) {
23447 void **inherit = (void **)cur;
23448 @@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct
23449 if (IS_ERR(*pp))
23450 *pp = NULL;
23451
23452 - ops->inherits = NULL;
23453 + ((struct ata_port_operations *)ops)->inherits = NULL;
23454
23455 + pax_close_kernel();
23456 spin_unlock(&lock);
23457 }
23458
23459 @@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host
23460 */
23461 /* KILLME - the only user left is ipr */
23462 void ata_host_init(struct ata_host *host, struct device *dev,
23463 - unsigned long flags, struct ata_port_operations *ops)
23464 + unsigned long flags, const struct ata_port_operations *ops)
23465 {
23466 spin_lock_init(&host->lock);
23467 host->dev = dev;
23468 @@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(stru
23469 /* truly dummy */
23470 }
23471
23472 -struct ata_port_operations ata_dummy_port_ops = {
23473 +const struct ata_port_operations ata_dummy_port_ops = {
23474 .qc_prep = ata_noop_qc_prep,
23475 .qc_issue = ata_dummy_qc_issue,
23476 .error_handler = ata_dummy_error_handler,
23477 diff -urNp linux-2.6.32.41/drivers/ata/libata-eh.c linux-2.6.32.41/drivers/ata/libata-eh.c
23478 --- linux-2.6.32.41/drivers/ata/libata-eh.c 2011-03-27 14:31:47.000000000 -0400
23479 +++ linux-2.6.32.41/drivers/ata/libata-eh.c 2011-05-16 21:46:57.000000000 -0400
23480 @@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
23481 {
23482 struct ata_link *link;
23483
23484 + pax_track_stack();
23485 +
23486 ata_for_each_link(link, ap, HOST_FIRST)
23487 ata_eh_link_report(link);
23488 }
23489 @@ -3590,7 +3592,7 @@ void ata_do_eh(struct ata_port *ap, ata_
23490 */
23491 void ata_std_error_handler(struct ata_port *ap)
23492 {
23493 - struct ata_port_operations *ops = ap->ops;
23494 + const struct ata_port_operations *ops = ap->ops;
23495 ata_reset_fn_t hardreset = ops->hardreset;
23496
23497 /* ignore built-in hardreset if SCR access is not available */
23498 diff -urNp linux-2.6.32.41/drivers/ata/libata-pmp.c linux-2.6.32.41/drivers/ata/libata-pmp.c
23499 --- linux-2.6.32.41/drivers/ata/libata-pmp.c 2011-03-27 14:31:47.000000000 -0400
23500 +++ linux-2.6.32.41/drivers/ata/libata-pmp.c 2011-04-17 15:56:46.000000000 -0400
23501 @@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(str
23502 */
23503 static int sata_pmp_eh_recover(struct ata_port *ap)
23504 {
23505 - struct ata_port_operations *ops = ap->ops;
23506 + const struct ata_port_operations *ops = ap->ops;
23507 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
23508 struct ata_link *pmp_link = &ap->link;
23509 struct ata_device *pmp_dev = pmp_link->device;
23510 diff -urNp linux-2.6.32.41/drivers/ata/pata_acpi.c linux-2.6.32.41/drivers/ata/pata_acpi.c
23511 --- linux-2.6.32.41/drivers/ata/pata_acpi.c 2011-03-27 14:31:47.000000000 -0400
23512 +++ linux-2.6.32.41/drivers/ata/pata_acpi.c 2011-04-17 15:56:46.000000000 -0400
23513 @@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_s
23514 ATA_BMDMA_SHT(DRV_NAME),
23515 };
23516
23517 -static struct ata_port_operations pacpi_ops = {
23518 +static const struct ata_port_operations pacpi_ops = {
23519 .inherits = &ata_bmdma_port_ops,
23520 .qc_issue = pacpi_qc_issue,
23521 .cable_detect = pacpi_cable_detect,
23522 diff -urNp linux-2.6.32.41/drivers/ata/pata_ali.c linux-2.6.32.41/drivers/ata/pata_ali.c
23523 --- linux-2.6.32.41/drivers/ata/pata_ali.c 2011-03-27 14:31:47.000000000 -0400
23524 +++ linux-2.6.32.41/drivers/ata/pata_ali.c 2011-04-17 15:56:46.000000000 -0400
23525 @@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht
23526 * Port operations for PIO only ALi
23527 */
23528
23529 -static struct ata_port_operations ali_early_port_ops = {
23530 +static const struct ata_port_operations ali_early_port_ops = {
23531 .inherits = &ata_sff_port_ops,
23532 .cable_detect = ata_cable_40wire,
23533 .set_piomode = ali_set_piomode,
23534 @@ -382,7 +382,7 @@ static const struct ata_port_operations
23535 * Port operations for DMA capable ALi without cable
23536 * detect
23537 */
23538 -static struct ata_port_operations ali_20_port_ops = {
23539 +static const struct ata_port_operations ali_20_port_ops = {
23540 .inherits = &ali_dma_base_ops,
23541 .cable_detect = ata_cable_40wire,
23542 .mode_filter = ali_20_filter,
23543 @@ -393,7 +393,7 @@ static struct ata_port_operations ali_20
23544 /*
23545 * Port operations for DMA capable ALi with cable detect
23546 */
23547 -static struct ata_port_operations ali_c2_port_ops = {
23548 +static const struct ata_port_operations ali_c2_port_ops = {
23549 .inherits = &ali_dma_base_ops,
23550 .check_atapi_dma = ali_check_atapi_dma,
23551 .cable_detect = ali_c2_cable_detect,
23552 @@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2
23553 /*
23554 * Port operations for DMA capable ALi with cable detect
23555 */
23556 -static struct ata_port_operations ali_c4_port_ops = {
23557 +static const struct ata_port_operations ali_c4_port_ops = {
23558 .inherits = &ali_dma_base_ops,
23559 .check_atapi_dma = ali_check_atapi_dma,
23560 .cable_detect = ali_c2_cable_detect,
23561 @@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4
23562 /*
23563 * Port operations for DMA capable ALi with cable detect and LBA48
23564 */
23565 -static struct ata_port_operations ali_c5_port_ops = {
23566 +static const struct ata_port_operations ali_c5_port_ops = {
23567 .inherits = &ali_dma_base_ops,
23568 .check_atapi_dma = ali_check_atapi_dma,
23569 .dev_config = ali_warn_atapi_dma,
23570 diff -urNp linux-2.6.32.41/drivers/ata/pata_amd.c linux-2.6.32.41/drivers/ata/pata_amd.c
23571 --- linux-2.6.32.41/drivers/ata/pata_amd.c 2011-03-27 14:31:47.000000000 -0400
23572 +++ linux-2.6.32.41/drivers/ata/pata_amd.c 2011-04-17 15:56:46.000000000 -0400
23573 @@ -397,28 +397,28 @@ static const struct ata_port_operations
23574 .prereset = amd_pre_reset,
23575 };
23576
23577 -static struct ata_port_operations amd33_port_ops = {
23578 +static const struct ata_port_operations amd33_port_ops = {
23579 .inherits = &amd_base_port_ops,
23580 .cable_detect = ata_cable_40wire,
23581 .set_piomode = amd33_set_piomode,
23582 .set_dmamode = amd33_set_dmamode,
23583 };
23584
23585 -static struct ata_port_operations amd66_port_ops = {
23586 +static const struct ata_port_operations amd66_port_ops = {
23587 .inherits = &amd_base_port_ops,
23588 .cable_detect = ata_cable_unknown,
23589 .set_piomode = amd66_set_piomode,
23590 .set_dmamode = amd66_set_dmamode,
23591 };
23592
23593 -static struct ata_port_operations amd100_port_ops = {
23594 +static const struct ata_port_operations amd100_port_ops = {
23595 .inherits = &amd_base_port_ops,
23596 .cable_detect = ata_cable_unknown,
23597 .set_piomode = amd100_set_piomode,
23598 .set_dmamode = amd100_set_dmamode,
23599 };
23600
23601 -static struct ata_port_operations amd133_port_ops = {
23602 +static const struct ata_port_operations amd133_port_ops = {
23603 .inherits = &amd_base_port_ops,
23604 .cable_detect = amd_cable_detect,
23605 .set_piomode = amd133_set_piomode,
23606 @@ -433,13 +433,13 @@ static const struct ata_port_operations
23607 .host_stop = nv_host_stop,
23608 };
23609
23610 -static struct ata_port_operations nv100_port_ops = {
23611 +static const struct ata_port_operations nv100_port_ops = {
23612 .inherits = &nv_base_port_ops,
23613 .set_piomode = nv100_set_piomode,
23614 .set_dmamode = nv100_set_dmamode,
23615 };
23616
23617 -static struct ata_port_operations nv133_port_ops = {
23618 +static const struct ata_port_operations nv133_port_ops = {
23619 .inherits = &nv_base_port_ops,
23620 .set_piomode = nv133_set_piomode,
23621 .set_dmamode = nv133_set_dmamode,
23622 diff -urNp linux-2.6.32.41/drivers/ata/pata_artop.c linux-2.6.32.41/drivers/ata/pata_artop.c
23623 --- linux-2.6.32.41/drivers/ata/pata_artop.c 2011-03-27 14:31:47.000000000 -0400
23624 +++ linux-2.6.32.41/drivers/ata/pata_artop.c 2011-04-17 15:56:46.000000000 -0400
23625 @@ -311,7 +311,7 @@ static struct scsi_host_template artop_s
23626 ATA_BMDMA_SHT(DRV_NAME),
23627 };
23628
23629 -static struct ata_port_operations artop6210_ops = {
23630 +static const struct ata_port_operations artop6210_ops = {
23631 .inherits = &ata_bmdma_port_ops,
23632 .cable_detect = ata_cable_40wire,
23633 .set_piomode = artop6210_set_piomode,
23634 @@ -320,7 +320,7 @@ static struct ata_port_operations artop6
23635 .qc_defer = artop6210_qc_defer,
23636 };
23637
23638 -static struct ata_port_operations artop6260_ops = {
23639 +static const struct ata_port_operations artop6260_ops = {
23640 .inherits = &ata_bmdma_port_ops,
23641 .cable_detect = artop6260_cable_detect,
23642 .set_piomode = artop6260_set_piomode,
23643 diff -urNp linux-2.6.32.41/drivers/ata/pata_at32.c linux-2.6.32.41/drivers/ata/pata_at32.c
23644 --- linux-2.6.32.41/drivers/ata/pata_at32.c 2011-03-27 14:31:47.000000000 -0400
23645 +++ linux-2.6.32.41/drivers/ata/pata_at32.c 2011-04-17 15:56:46.000000000 -0400
23646 @@ -172,7 +172,7 @@ static struct scsi_host_template at32_sh
23647 ATA_PIO_SHT(DRV_NAME),
23648 };
23649
23650 -static struct ata_port_operations at32_port_ops = {
23651 +static const struct ata_port_operations at32_port_ops = {
23652 .inherits = &ata_sff_port_ops,
23653 .cable_detect = ata_cable_40wire,
23654 .set_piomode = pata_at32_set_piomode,
23655 diff -urNp linux-2.6.32.41/drivers/ata/pata_at91.c linux-2.6.32.41/drivers/ata/pata_at91.c
23656 --- linux-2.6.32.41/drivers/ata/pata_at91.c 2011-03-27 14:31:47.000000000 -0400
23657 +++ linux-2.6.32.41/drivers/ata/pata_at91.c 2011-04-17 15:56:46.000000000 -0400
23658 @@ -195,7 +195,7 @@ static struct scsi_host_template pata_at
23659 ATA_PIO_SHT(DRV_NAME),
23660 };
23661
23662 -static struct ata_port_operations pata_at91_port_ops = {
23663 +static const struct ata_port_operations pata_at91_port_ops = {
23664 .inherits = &ata_sff_port_ops,
23665
23666 .sff_data_xfer = pata_at91_data_xfer_noirq,
23667 diff -urNp linux-2.6.32.41/drivers/ata/pata_atiixp.c linux-2.6.32.41/drivers/ata/pata_atiixp.c
23668 --- linux-2.6.32.41/drivers/ata/pata_atiixp.c 2011-03-27 14:31:47.000000000 -0400
23669 +++ linux-2.6.32.41/drivers/ata/pata_atiixp.c 2011-04-17 15:56:46.000000000 -0400
23670 @@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_
23671 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
23672 };
23673
23674 -static struct ata_port_operations atiixp_port_ops = {
23675 +static const struct ata_port_operations atiixp_port_ops = {
23676 .inherits = &ata_bmdma_port_ops,
23677
23678 .qc_prep = ata_sff_dumb_qc_prep,
23679 diff -urNp linux-2.6.32.41/drivers/ata/pata_atp867x.c linux-2.6.32.41/drivers/ata/pata_atp867x.c
23680 --- linux-2.6.32.41/drivers/ata/pata_atp867x.c 2011-03-27 14:31:47.000000000 -0400
23681 +++ linux-2.6.32.41/drivers/ata/pata_atp867x.c 2011-04-17 15:56:46.000000000 -0400
23682 @@ -274,7 +274,7 @@ static struct scsi_host_template atp867x
23683 ATA_BMDMA_SHT(DRV_NAME),
23684 };
23685
23686 -static struct ata_port_operations atp867x_ops = {
23687 +static const struct ata_port_operations atp867x_ops = {
23688 .inherits = &ata_bmdma_port_ops,
23689 .cable_detect = atp867x_cable_detect,
23690 .set_piomode = atp867x_set_piomode,
23691 diff -urNp linux-2.6.32.41/drivers/ata/pata_bf54x.c linux-2.6.32.41/drivers/ata/pata_bf54x.c
23692 --- linux-2.6.32.41/drivers/ata/pata_bf54x.c 2011-03-27 14:31:47.000000000 -0400
23693 +++ linux-2.6.32.41/drivers/ata/pata_bf54x.c 2011-04-17 15:56:46.000000000 -0400
23694 @@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sh
23695 .dma_boundary = ATA_DMA_BOUNDARY,
23696 };
23697
23698 -static struct ata_port_operations bfin_pata_ops = {
23699 +static const struct ata_port_operations bfin_pata_ops = {
23700 .inherits = &ata_sff_port_ops,
23701
23702 .set_piomode = bfin_set_piomode,
23703 diff -urNp linux-2.6.32.41/drivers/ata/pata_cmd640.c linux-2.6.32.41/drivers/ata/pata_cmd640.c
23704 --- linux-2.6.32.41/drivers/ata/pata_cmd640.c 2011-03-27 14:31:47.000000000 -0400
23705 +++ linux-2.6.32.41/drivers/ata/pata_cmd640.c 2011-04-17 15:56:46.000000000 -0400
23706 @@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_
23707 ATA_BMDMA_SHT(DRV_NAME),
23708 };
23709
23710 -static struct ata_port_operations cmd640_port_ops = {
23711 +static const struct ata_port_operations cmd640_port_ops = {
23712 .inherits = &ata_bmdma_port_ops,
23713 /* In theory xfer_noirq is not needed once we kill the prefetcher */
23714 .sff_data_xfer = ata_sff_data_xfer_noirq,
23715 diff -urNp linux-2.6.32.41/drivers/ata/pata_cmd64x.c linux-2.6.32.41/drivers/ata/pata_cmd64x.c
23716 --- linux-2.6.32.41/drivers/ata/pata_cmd64x.c 2011-03-27 14:31:47.000000000 -0400
23717 +++ linux-2.6.32.41/drivers/ata/pata_cmd64x.c 2011-04-17 15:56:46.000000000 -0400
23718 @@ -275,18 +275,18 @@ static const struct ata_port_operations
23719 .set_dmamode = cmd64x_set_dmamode,
23720 };
23721
23722 -static struct ata_port_operations cmd64x_port_ops = {
23723 +static const struct ata_port_operations cmd64x_port_ops = {
23724 .inherits = &cmd64x_base_ops,
23725 .cable_detect = ata_cable_40wire,
23726 };
23727
23728 -static struct ata_port_operations cmd646r1_port_ops = {
23729 +static const struct ata_port_operations cmd646r1_port_ops = {
23730 .inherits = &cmd64x_base_ops,
23731 .bmdma_stop = cmd646r1_bmdma_stop,
23732 .cable_detect = ata_cable_40wire,
23733 };
23734
23735 -static struct ata_port_operations cmd648_port_ops = {
23736 +static const struct ata_port_operations cmd648_port_ops = {
23737 .inherits = &cmd64x_base_ops,
23738 .bmdma_stop = cmd648_bmdma_stop,
23739 .cable_detect = cmd648_cable_detect,
23740 diff -urNp linux-2.6.32.41/drivers/ata/pata_cs5520.c linux-2.6.32.41/drivers/ata/pata_cs5520.c
23741 --- linux-2.6.32.41/drivers/ata/pata_cs5520.c 2011-03-27 14:31:47.000000000 -0400
23742 +++ linux-2.6.32.41/drivers/ata/pata_cs5520.c 2011-04-17 15:56:46.000000000 -0400
23743 @@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_
23744 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
23745 };
23746
23747 -static struct ata_port_operations cs5520_port_ops = {
23748 +static const struct ata_port_operations cs5520_port_ops = {
23749 .inherits = &ata_bmdma_port_ops,
23750 .qc_prep = ata_sff_dumb_qc_prep,
23751 .cable_detect = ata_cable_40wire,
23752 diff -urNp linux-2.6.32.41/drivers/ata/pata_cs5530.c linux-2.6.32.41/drivers/ata/pata_cs5530.c
23753 --- linux-2.6.32.41/drivers/ata/pata_cs5530.c 2011-03-27 14:31:47.000000000 -0400
23754 +++ linux-2.6.32.41/drivers/ata/pata_cs5530.c 2011-04-17 15:56:46.000000000 -0400
23755 @@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_
23756 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
23757 };
23758
23759 -static struct ata_port_operations cs5530_port_ops = {
23760 +static const struct ata_port_operations cs5530_port_ops = {
23761 .inherits = &ata_bmdma_port_ops,
23762
23763 .qc_prep = ata_sff_dumb_qc_prep,
23764 diff -urNp linux-2.6.32.41/drivers/ata/pata_cs5535.c linux-2.6.32.41/drivers/ata/pata_cs5535.c
23765 --- linux-2.6.32.41/drivers/ata/pata_cs5535.c 2011-03-27 14:31:47.000000000 -0400
23766 +++ linux-2.6.32.41/drivers/ata/pata_cs5535.c 2011-04-17 15:56:46.000000000 -0400
23767 @@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_
23768 ATA_BMDMA_SHT(DRV_NAME),
23769 };
23770
23771 -static struct ata_port_operations cs5535_port_ops = {
23772 +static const struct ata_port_operations cs5535_port_ops = {
23773 .inherits = &ata_bmdma_port_ops,
23774 .cable_detect = cs5535_cable_detect,
23775 .set_piomode = cs5535_set_piomode,
23776 diff -urNp linux-2.6.32.41/drivers/ata/pata_cs5536.c linux-2.6.32.41/drivers/ata/pata_cs5536.c
23777 --- linux-2.6.32.41/drivers/ata/pata_cs5536.c 2011-03-27 14:31:47.000000000 -0400
23778 +++ linux-2.6.32.41/drivers/ata/pata_cs5536.c 2011-04-17 15:56:46.000000000 -0400
23779 @@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_
23780 ATA_BMDMA_SHT(DRV_NAME),
23781 };
23782
23783 -static struct ata_port_operations cs5536_port_ops = {
23784 +static const struct ata_port_operations cs5536_port_ops = {
23785 .inherits = &ata_bmdma_port_ops,
23786 .cable_detect = cs5536_cable_detect,
23787 .set_piomode = cs5536_set_piomode,
23788 diff -urNp linux-2.6.32.41/drivers/ata/pata_cypress.c linux-2.6.32.41/drivers/ata/pata_cypress.c
23789 --- linux-2.6.32.41/drivers/ata/pata_cypress.c 2011-03-27 14:31:47.000000000 -0400
23790 +++ linux-2.6.32.41/drivers/ata/pata_cypress.c 2011-04-17 15:56:46.000000000 -0400
23791 @@ -113,7 +113,7 @@ static struct scsi_host_template cy82c69
23792 ATA_BMDMA_SHT(DRV_NAME),
23793 };
23794
23795 -static struct ata_port_operations cy82c693_port_ops = {
23796 +static const struct ata_port_operations cy82c693_port_ops = {
23797 .inherits = &ata_bmdma_port_ops,
23798 .cable_detect = ata_cable_40wire,
23799 .set_piomode = cy82c693_set_piomode,
23800 diff -urNp linux-2.6.32.41/drivers/ata/pata_efar.c linux-2.6.32.41/drivers/ata/pata_efar.c
23801 --- linux-2.6.32.41/drivers/ata/pata_efar.c 2011-03-27 14:31:47.000000000 -0400
23802 +++ linux-2.6.32.41/drivers/ata/pata_efar.c 2011-04-17 15:56:46.000000000 -0400
23803 @@ -222,7 +222,7 @@ static struct scsi_host_template efar_sh
23804 ATA_BMDMA_SHT(DRV_NAME),
23805 };
23806
23807 -static struct ata_port_operations efar_ops = {
23808 +static const struct ata_port_operations efar_ops = {
23809 .inherits = &ata_bmdma_port_ops,
23810 .cable_detect = efar_cable_detect,
23811 .set_piomode = efar_set_piomode,
23812 diff -urNp linux-2.6.32.41/drivers/ata/pata_hpt366.c linux-2.6.32.41/drivers/ata/pata_hpt366.c
23813 --- linux-2.6.32.41/drivers/ata/pata_hpt366.c 2011-03-27 14:31:47.000000000 -0400
23814 +++ linux-2.6.32.41/drivers/ata/pata_hpt366.c 2011-04-17 15:56:46.000000000 -0400
23815 @@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_
23816 * Configuration for HPT366/68
23817 */
23818
23819 -static struct ata_port_operations hpt366_port_ops = {
23820 +static const struct ata_port_operations hpt366_port_ops = {
23821 .inherits = &ata_bmdma_port_ops,
23822 .cable_detect = hpt36x_cable_detect,
23823 .mode_filter = hpt366_filter,
23824 diff -urNp linux-2.6.32.41/drivers/ata/pata_hpt37x.c linux-2.6.32.41/drivers/ata/pata_hpt37x.c
23825 --- linux-2.6.32.41/drivers/ata/pata_hpt37x.c 2011-03-27 14:31:47.000000000 -0400
23826 +++ linux-2.6.32.41/drivers/ata/pata_hpt37x.c 2011-04-17 15:56:46.000000000 -0400
23827 @@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_
23828 * Configuration for HPT370
23829 */
23830
23831 -static struct ata_port_operations hpt370_port_ops = {
23832 +static const struct ata_port_operations hpt370_port_ops = {
23833 .inherits = &ata_bmdma_port_ops,
23834
23835 .bmdma_stop = hpt370_bmdma_stop,
23836 @@ -591,7 +591,7 @@ static struct ata_port_operations hpt370
23837 * Configuration for HPT370A. Close to 370 but less filters
23838 */
23839
23840 -static struct ata_port_operations hpt370a_port_ops = {
23841 +static const struct ata_port_operations hpt370a_port_ops = {
23842 .inherits = &hpt370_port_ops,
23843 .mode_filter = hpt370a_filter,
23844 };
23845 @@ -601,7 +601,7 @@ static struct ata_port_operations hpt370
23846 * and DMA mode setting functionality.
23847 */
23848
23849 -static struct ata_port_operations hpt372_port_ops = {
23850 +static const struct ata_port_operations hpt372_port_ops = {
23851 .inherits = &ata_bmdma_port_ops,
23852
23853 .bmdma_stop = hpt37x_bmdma_stop,
23854 @@ -616,7 +616,7 @@ static struct ata_port_operations hpt372
23855 * but we have a different cable detection procedure for function 1.
23856 */
23857
23858 -static struct ata_port_operations hpt374_fn1_port_ops = {
23859 +static const struct ata_port_operations hpt374_fn1_port_ops = {
23860 .inherits = &hpt372_port_ops,
23861 .prereset = hpt374_fn1_pre_reset,
23862 };
23863 diff -urNp linux-2.6.32.41/drivers/ata/pata_hpt3x2n.c linux-2.6.32.41/drivers/ata/pata_hpt3x2n.c
23864 --- linux-2.6.32.41/drivers/ata/pata_hpt3x2n.c 2011-03-27 14:31:47.000000000 -0400
23865 +++ linux-2.6.32.41/drivers/ata/pata_hpt3x2n.c 2011-04-17 15:56:46.000000000 -0400
23866 @@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n
23867 * Configuration for HPT3x2n.
23868 */
23869
23870 -static struct ata_port_operations hpt3x2n_port_ops = {
23871 +static const struct ata_port_operations hpt3x2n_port_ops = {
23872 .inherits = &ata_bmdma_port_ops,
23873
23874 .bmdma_stop = hpt3x2n_bmdma_stop,
23875 diff -urNp linux-2.6.32.41/drivers/ata/pata_hpt3x3.c linux-2.6.32.41/drivers/ata/pata_hpt3x3.c
23876 --- linux-2.6.32.41/drivers/ata/pata_hpt3x3.c 2011-03-27 14:31:47.000000000 -0400
23877 +++ linux-2.6.32.41/drivers/ata/pata_hpt3x3.c 2011-04-17 15:56:46.000000000 -0400
23878 @@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_
23879 ATA_BMDMA_SHT(DRV_NAME),
23880 };
23881
23882 -static struct ata_port_operations hpt3x3_port_ops = {
23883 +static const struct ata_port_operations hpt3x3_port_ops = {
23884 .inherits = &ata_bmdma_port_ops,
23885 .cable_detect = ata_cable_40wire,
23886 .set_piomode = hpt3x3_set_piomode,
23887 diff -urNp linux-2.6.32.41/drivers/ata/pata_icside.c linux-2.6.32.41/drivers/ata/pata_icside.c
23888 --- linux-2.6.32.41/drivers/ata/pata_icside.c 2011-03-27 14:31:47.000000000 -0400
23889 +++ linux-2.6.32.41/drivers/ata/pata_icside.c 2011-04-17 15:56:46.000000000 -0400
23890 @@ -319,7 +319,7 @@ static void pata_icside_postreset(struct
23891 }
23892 }
23893
23894 -static struct ata_port_operations pata_icside_port_ops = {
23895 +static const struct ata_port_operations pata_icside_port_ops = {
23896 .inherits = &ata_sff_port_ops,
23897 /* no need to build any PRD tables for DMA */
23898 .qc_prep = ata_noop_qc_prep,
23899 diff -urNp linux-2.6.32.41/drivers/ata/pata_isapnp.c linux-2.6.32.41/drivers/ata/pata_isapnp.c
23900 --- linux-2.6.32.41/drivers/ata/pata_isapnp.c 2011-03-27 14:31:47.000000000 -0400
23901 +++ linux-2.6.32.41/drivers/ata/pata_isapnp.c 2011-04-17 15:56:46.000000000 -0400
23902 @@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_
23903 ATA_PIO_SHT(DRV_NAME),
23904 };
23905
23906 -static struct ata_port_operations isapnp_port_ops = {
23907 +static const struct ata_port_operations isapnp_port_ops = {
23908 .inherits = &ata_sff_port_ops,
23909 .cable_detect = ata_cable_40wire,
23910 };
23911
23912 -static struct ata_port_operations isapnp_noalt_port_ops = {
23913 +static const struct ata_port_operations isapnp_noalt_port_ops = {
23914 .inherits = &ata_sff_port_ops,
23915 .cable_detect = ata_cable_40wire,
23916 /* No altstatus so we don't want to use the lost interrupt poll */
23917 diff -urNp linux-2.6.32.41/drivers/ata/pata_it8213.c linux-2.6.32.41/drivers/ata/pata_it8213.c
23918 --- linux-2.6.32.41/drivers/ata/pata_it8213.c 2011-03-27 14:31:47.000000000 -0400
23919 +++ linux-2.6.32.41/drivers/ata/pata_it8213.c 2011-04-17 15:56:46.000000000 -0400
23920 @@ -234,7 +234,7 @@ static struct scsi_host_template it8213_
23921 };
23922
23923
23924 -static struct ata_port_operations it8213_ops = {
23925 +static const struct ata_port_operations it8213_ops = {
23926 .inherits = &ata_bmdma_port_ops,
23927 .cable_detect = it8213_cable_detect,
23928 .set_piomode = it8213_set_piomode,
23929 diff -urNp linux-2.6.32.41/drivers/ata/pata_it821x.c linux-2.6.32.41/drivers/ata/pata_it821x.c
23930 --- linux-2.6.32.41/drivers/ata/pata_it821x.c 2011-03-27 14:31:47.000000000 -0400
23931 +++ linux-2.6.32.41/drivers/ata/pata_it821x.c 2011-04-17 15:56:46.000000000 -0400
23932 @@ -800,7 +800,7 @@ static struct scsi_host_template it821x_
23933 ATA_BMDMA_SHT(DRV_NAME),
23934 };
23935
23936 -static struct ata_port_operations it821x_smart_port_ops = {
23937 +static const struct ata_port_operations it821x_smart_port_ops = {
23938 .inherits = &ata_bmdma_port_ops,
23939
23940 .check_atapi_dma= it821x_check_atapi_dma,
23941 @@ -814,7 +814,7 @@ static struct ata_port_operations it821x
23942 .port_start = it821x_port_start,
23943 };
23944
23945 -static struct ata_port_operations it821x_passthru_port_ops = {
23946 +static const struct ata_port_operations it821x_passthru_port_ops = {
23947 .inherits = &ata_bmdma_port_ops,
23948
23949 .check_atapi_dma= it821x_check_atapi_dma,
23950 @@ -830,7 +830,7 @@ static struct ata_port_operations it821x
23951 .port_start = it821x_port_start,
23952 };
23953
23954 -static struct ata_port_operations it821x_rdc_port_ops = {
23955 +static const struct ata_port_operations it821x_rdc_port_ops = {
23956 .inherits = &ata_bmdma_port_ops,
23957
23958 .check_atapi_dma= it821x_check_atapi_dma,
23959 diff -urNp linux-2.6.32.41/drivers/ata/pata_ixp4xx_cf.c linux-2.6.32.41/drivers/ata/pata_ixp4xx_cf.c
23960 --- linux-2.6.32.41/drivers/ata/pata_ixp4xx_cf.c 2011-03-27 14:31:47.000000000 -0400
23961 +++ linux-2.6.32.41/drivers/ata/pata_ixp4xx_cf.c 2011-04-17 15:56:46.000000000 -0400
23962 @@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_
23963 ATA_PIO_SHT(DRV_NAME),
23964 };
23965
23966 -static struct ata_port_operations ixp4xx_port_ops = {
23967 +static const struct ata_port_operations ixp4xx_port_ops = {
23968 .inherits = &ata_sff_port_ops,
23969 .sff_data_xfer = ixp4xx_mmio_data_xfer,
23970 .cable_detect = ata_cable_40wire,
23971 diff -urNp linux-2.6.32.41/drivers/ata/pata_jmicron.c linux-2.6.32.41/drivers/ata/pata_jmicron.c
23972 --- linux-2.6.32.41/drivers/ata/pata_jmicron.c 2011-03-27 14:31:47.000000000 -0400
23973 +++ linux-2.6.32.41/drivers/ata/pata_jmicron.c 2011-04-17 15:56:46.000000000 -0400
23974 @@ -111,7 +111,7 @@ static struct scsi_host_template jmicron
23975 ATA_BMDMA_SHT(DRV_NAME),
23976 };
23977
23978 -static struct ata_port_operations jmicron_ops = {
23979 +static const struct ata_port_operations jmicron_ops = {
23980 .inherits = &ata_bmdma_port_ops,
23981 .prereset = jmicron_pre_reset,
23982 };
23983 diff -urNp linux-2.6.32.41/drivers/ata/pata_legacy.c linux-2.6.32.41/drivers/ata/pata_legacy.c
23984 --- linux-2.6.32.41/drivers/ata/pata_legacy.c 2011-03-27 14:31:47.000000000 -0400
23985 +++ linux-2.6.32.41/drivers/ata/pata_legacy.c 2011-04-17 15:56:46.000000000 -0400
23986 @@ -106,7 +106,7 @@ struct legacy_probe {
23987
23988 struct legacy_controller {
23989 const char *name;
23990 - struct ata_port_operations *ops;
23991 + const struct ata_port_operations *ops;
23992 unsigned int pio_mask;
23993 unsigned int flags;
23994 unsigned int pflags;
23995 @@ -223,12 +223,12 @@ static const struct ata_port_operations
23996 * pio_mask as well.
23997 */
23998
23999 -static struct ata_port_operations simple_port_ops = {
24000 +static const struct ata_port_operations simple_port_ops = {
24001 .inherits = &legacy_base_port_ops,
24002 .sff_data_xfer = ata_sff_data_xfer_noirq,
24003 };
24004
24005 -static struct ata_port_operations legacy_port_ops = {
24006 +static const struct ata_port_operations legacy_port_ops = {
24007 .inherits = &legacy_base_port_ops,
24008 .sff_data_xfer = ata_sff_data_xfer_noirq,
24009 .set_mode = legacy_set_mode,
24010 @@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(st
24011 return buflen;
24012 }
24013
24014 -static struct ata_port_operations pdc20230_port_ops = {
24015 +static const struct ata_port_operations pdc20230_port_ops = {
24016 .inherits = &legacy_base_port_ops,
24017 .set_piomode = pdc20230_set_piomode,
24018 .sff_data_xfer = pdc_data_xfer_vlb,
24019 @@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct a
24020 ioread8(ap->ioaddr.status_addr);
24021 }
24022
24023 -static struct ata_port_operations ht6560a_port_ops = {
24024 +static const struct ata_port_operations ht6560a_port_ops = {
24025 .inherits = &legacy_base_port_ops,
24026 .set_piomode = ht6560a_set_piomode,
24027 };
24028 @@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct a
24029 ioread8(ap->ioaddr.status_addr);
24030 }
24031
24032 -static struct ata_port_operations ht6560b_port_ops = {
24033 +static const struct ata_port_operations ht6560b_port_ops = {
24034 .inherits = &legacy_base_port_ops,
24035 .set_piomode = ht6560b_set_piomode,
24036 };
24037 @@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(stru
24038 }
24039
24040
24041 -static struct ata_port_operations opti82c611a_port_ops = {
24042 +static const struct ata_port_operations opti82c611a_port_ops = {
24043 .inherits = &legacy_base_port_ops,
24044 .set_piomode = opti82c611a_set_piomode,
24045 };
24046 @@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(
24047 return ata_sff_qc_issue(qc);
24048 }
24049
24050 -static struct ata_port_operations opti82c46x_port_ops = {
24051 +static const struct ata_port_operations opti82c46x_port_ops = {
24052 .inherits = &legacy_base_port_ops,
24053 .set_piomode = opti82c46x_set_piomode,
24054 .qc_issue = opti82c46x_qc_issue,
24055 @@ -771,20 +771,20 @@ static int qdi_port(struct platform_devi
24056 return 0;
24057 }
24058
24059 -static struct ata_port_operations qdi6500_port_ops = {
24060 +static const struct ata_port_operations qdi6500_port_ops = {
24061 .inherits = &legacy_base_port_ops,
24062 .set_piomode = qdi6500_set_piomode,
24063 .qc_issue = qdi_qc_issue,
24064 .sff_data_xfer = vlb32_data_xfer,
24065 };
24066
24067 -static struct ata_port_operations qdi6580_port_ops = {
24068 +static const struct ata_port_operations qdi6580_port_ops = {
24069 .inherits = &legacy_base_port_ops,
24070 .set_piomode = qdi6580_set_piomode,
24071 .sff_data_xfer = vlb32_data_xfer,
24072 };
24073
24074 -static struct ata_port_operations qdi6580dp_port_ops = {
24075 +static const struct ata_port_operations qdi6580dp_port_ops = {
24076 .inherits = &legacy_base_port_ops,
24077 .set_piomode = qdi6580dp_set_piomode,
24078 .sff_data_xfer = vlb32_data_xfer,
24079 @@ -855,7 +855,7 @@ static int winbond_port(struct platform_
24080 return 0;
24081 }
24082
24083 -static struct ata_port_operations winbond_port_ops = {
24084 +static const struct ata_port_operations winbond_port_ops = {
24085 .inherits = &legacy_base_port_ops,
24086 .set_piomode = winbond_set_piomode,
24087 .sff_data_xfer = vlb32_data_xfer,
24088 @@ -978,7 +978,7 @@ static __init int legacy_init_one(struct
24089 int pio_modes = controller->pio_mask;
24090 unsigned long io = probe->port;
24091 u32 mask = (1 << probe->slot);
24092 - struct ata_port_operations *ops = controller->ops;
24093 + const struct ata_port_operations *ops = controller->ops;
24094 struct legacy_data *ld = &legacy_data[probe->slot];
24095 struct ata_host *host = NULL;
24096 struct ata_port *ap;
24097 diff -urNp linux-2.6.32.41/drivers/ata/pata_marvell.c linux-2.6.32.41/drivers/ata/pata_marvell.c
24098 --- linux-2.6.32.41/drivers/ata/pata_marvell.c 2011-03-27 14:31:47.000000000 -0400
24099 +++ linux-2.6.32.41/drivers/ata/pata_marvell.c 2011-04-17 15:56:46.000000000 -0400
24100 @@ -100,7 +100,7 @@ static struct scsi_host_template marvell
24101 ATA_BMDMA_SHT(DRV_NAME),
24102 };
24103
24104 -static struct ata_port_operations marvell_ops = {
24105 +static const struct ata_port_operations marvell_ops = {
24106 .inherits = &ata_bmdma_port_ops,
24107 .cable_detect = marvell_cable_detect,
24108 .prereset = marvell_pre_reset,
24109 diff -urNp linux-2.6.32.41/drivers/ata/pata_mpc52xx.c linux-2.6.32.41/drivers/ata/pata_mpc52xx.c
24110 --- linux-2.6.32.41/drivers/ata/pata_mpc52xx.c 2011-03-27 14:31:47.000000000 -0400
24111 +++ linux-2.6.32.41/drivers/ata/pata_mpc52xx.c 2011-04-17 15:56:46.000000000 -0400
24112 @@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx
24113 ATA_PIO_SHT(DRV_NAME),
24114 };
24115
24116 -static struct ata_port_operations mpc52xx_ata_port_ops = {
24117 +static const struct ata_port_operations mpc52xx_ata_port_ops = {
24118 .inherits = &ata_bmdma_port_ops,
24119 .sff_dev_select = mpc52xx_ata_dev_select,
24120 .set_piomode = mpc52xx_ata_set_piomode,
24121 diff -urNp linux-2.6.32.41/drivers/ata/pata_mpiix.c linux-2.6.32.41/drivers/ata/pata_mpiix.c
24122 --- linux-2.6.32.41/drivers/ata/pata_mpiix.c 2011-03-27 14:31:47.000000000 -0400
24123 +++ linux-2.6.32.41/drivers/ata/pata_mpiix.c 2011-04-17 15:56:46.000000000 -0400
24124 @@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_s
24125 ATA_PIO_SHT(DRV_NAME),
24126 };
24127
24128 -static struct ata_port_operations mpiix_port_ops = {
24129 +static const struct ata_port_operations mpiix_port_ops = {
24130 .inherits = &ata_sff_port_ops,
24131 .qc_issue = mpiix_qc_issue,
24132 .cable_detect = ata_cable_40wire,
24133 diff -urNp linux-2.6.32.41/drivers/ata/pata_netcell.c linux-2.6.32.41/drivers/ata/pata_netcell.c
24134 --- linux-2.6.32.41/drivers/ata/pata_netcell.c 2011-03-27 14:31:47.000000000 -0400
24135 +++ linux-2.6.32.41/drivers/ata/pata_netcell.c 2011-04-17 15:56:46.000000000 -0400
24136 @@ -34,7 +34,7 @@ static struct scsi_host_template netcell
24137 ATA_BMDMA_SHT(DRV_NAME),
24138 };
24139
24140 -static struct ata_port_operations netcell_ops = {
24141 +static const struct ata_port_operations netcell_ops = {
24142 .inherits = &ata_bmdma_port_ops,
24143 .cable_detect = ata_cable_80wire,
24144 .read_id = netcell_read_id,
24145 diff -urNp linux-2.6.32.41/drivers/ata/pata_ninja32.c linux-2.6.32.41/drivers/ata/pata_ninja32.c
24146 --- linux-2.6.32.41/drivers/ata/pata_ninja32.c 2011-03-27 14:31:47.000000000 -0400
24147 +++ linux-2.6.32.41/drivers/ata/pata_ninja32.c 2011-04-17 15:56:46.000000000 -0400
24148 @@ -81,7 +81,7 @@ static struct scsi_host_template ninja32
24149 ATA_BMDMA_SHT(DRV_NAME),
24150 };
24151
24152 -static struct ata_port_operations ninja32_port_ops = {
24153 +static const struct ata_port_operations ninja32_port_ops = {
24154 .inherits = &ata_bmdma_port_ops,
24155 .sff_dev_select = ninja32_dev_select,
24156 .cable_detect = ata_cable_40wire,
24157 diff -urNp linux-2.6.32.41/drivers/ata/pata_ns87410.c linux-2.6.32.41/drivers/ata/pata_ns87410.c
24158 --- linux-2.6.32.41/drivers/ata/pata_ns87410.c 2011-03-27 14:31:47.000000000 -0400
24159 +++ linux-2.6.32.41/drivers/ata/pata_ns87410.c 2011-04-17 15:56:46.000000000 -0400
24160 @@ -132,7 +132,7 @@ static struct scsi_host_template ns87410
24161 ATA_PIO_SHT(DRV_NAME),
24162 };
24163
24164 -static struct ata_port_operations ns87410_port_ops = {
24165 +static const struct ata_port_operations ns87410_port_ops = {
24166 .inherits = &ata_sff_port_ops,
24167 .qc_issue = ns87410_qc_issue,
24168 .cable_detect = ata_cable_40wire,
24169 diff -urNp linux-2.6.32.41/drivers/ata/pata_ns87415.c linux-2.6.32.41/drivers/ata/pata_ns87415.c
24170 --- linux-2.6.32.41/drivers/ata/pata_ns87415.c 2011-03-27 14:31:47.000000000 -0400
24171 +++ linux-2.6.32.41/drivers/ata/pata_ns87415.c 2011-04-17 15:56:46.000000000 -0400
24172 @@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct at
24173 }
24174 #endif /* 87560 SuperIO Support */
24175
24176 -static struct ata_port_operations ns87415_pata_ops = {
24177 +static const struct ata_port_operations ns87415_pata_ops = {
24178 .inherits = &ata_bmdma_port_ops,
24179
24180 .check_atapi_dma = ns87415_check_atapi_dma,
24181 @@ -313,7 +313,7 @@ static struct ata_port_operations ns8741
24182 };
24183
24184 #if defined(CONFIG_SUPERIO)
24185 -static struct ata_port_operations ns87560_pata_ops = {
24186 +static const struct ata_port_operations ns87560_pata_ops = {
24187 .inherits = &ns87415_pata_ops,
24188 .sff_tf_read = ns87560_tf_read,
24189 .sff_check_status = ns87560_check_status,
24190 diff -urNp linux-2.6.32.41/drivers/ata/pata_octeon_cf.c linux-2.6.32.41/drivers/ata/pata_octeon_cf.c
24191 --- linux-2.6.32.41/drivers/ata/pata_octeon_cf.c 2011-03-27 14:31:47.000000000 -0400
24192 +++ linux-2.6.32.41/drivers/ata/pata_octeon_cf.c 2011-04-17 15:56:46.000000000 -0400
24193 @@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(s
24194 return 0;
24195 }
24196
24197 +/* cannot be const */
24198 static struct ata_port_operations octeon_cf_ops = {
24199 .inherits = &ata_sff_port_ops,
24200 .check_atapi_dma = octeon_cf_check_atapi_dma,
24201 diff -urNp linux-2.6.32.41/drivers/ata/pata_oldpiix.c linux-2.6.32.41/drivers/ata/pata_oldpiix.c
24202 --- linux-2.6.32.41/drivers/ata/pata_oldpiix.c 2011-03-27 14:31:47.000000000 -0400
24203 +++ linux-2.6.32.41/drivers/ata/pata_oldpiix.c 2011-04-17 15:56:46.000000000 -0400
24204 @@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix
24205 ATA_BMDMA_SHT(DRV_NAME),
24206 };
24207
24208 -static struct ata_port_operations oldpiix_pata_ops = {
24209 +static const struct ata_port_operations oldpiix_pata_ops = {
24210 .inherits = &ata_bmdma_port_ops,
24211 .qc_issue = oldpiix_qc_issue,
24212 .cable_detect = ata_cable_40wire,
24213 diff -urNp linux-2.6.32.41/drivers/ata/pata_opti.c linux-2.6.32.41/drivers/ata/pata_opti.c
24214 --- linux-2.6.32.41/drivers/ata/pata_opti.c 2011-03-27 14:31:47.000000000 -0400
24215 +++ linux-2.6.32.41/drivers/ata/pata_opti.c 2011-04-17 15:56:46.000000000 -0400
24216 @@ -152,7 +152,7 @@ static struct scsi_host_template opti_sh
24217 ATA_PIO_SHT(DRV_NAME),
24218 };
24219
24220 -static struct ata_port_operations opti_port_ops = {
24221 +static const struct ata_port_operations opti_port_ops = {
24222 .inherits = &ata_sff_port_ops,
24223 .cable_detect = ata_cable_40wire,
24224 .set_piomode = opti_set_piomode,
24225 diff -urNp linux-2.6.32.41/drivers/ata/pata_optidma.c linux-2.6.32.41/drivers/ata/pata_optidma.c
24226 --- linux-2.6.32.41/drivers/ata/pata_optidma.c 2011-03-27 14:31:47.000000000 -0400
24227 +++ linux-2.6.32.41/drivers/ata/pata_optidma.c 2011-04-17 15:56:46.000000000 -0400
24228 @@ -337,7 +337,7 @@ static struct scsi_host_template optidma
24229 ATA_BMDMA_SHT(DRV_NAME),
24230 };
24231
24232 -static struct ata_port_operations optidma_port_ops = {
24233 +static const struct ata_port_operations optidma_port_ops = {
24234 .inherits = &ata_bmdma_port_ops,
24235 .cable_detect = ata_cable_40wire,
24236 .set_piomode = optidma_set_pio_mode,
24237 @@ -346,7 +346,7 @@ static struct ata_port_operations optidm
24238 .prereset = optidma_pre_reset,
24239 };
24240
24241 -static struct ata_port_operations optiplus_port_ops = {
24242 +static const struct ata_port_operations optiplus_port_ops = {
24243 .inherits = &optidma_port_ops,
24244 .set_piomode = optiplus_set_pio_mode,
24245 .set_dmamode = optiplus_set_dma_mode,
24246 diff -urNp linux-2.6.32.41/drivers/ata/pata_palmld.c linux-2.6.32.41/drivers/ata/pata_palmld.c
24247 --- linux-2.6.32.41/drivers/ata/pata_palmld.c 2011-03-27 14:31:47.000000000 -0400
24248 +++ linux-2.6.32.41/drivers/ata/pata_palmld.c 2011-04-17 15:56:46.000000000 -0400
24249 @@ -37,7 +37,7 @@ static struct scsi_host_template palmld_
24250 ATA_PIO_SHT(DRV_NAME),
24251 };
24252
24253 -static struct ata_port_operations palmld_port_ops = {
24254 +static const struct ata_port_operations palmld_port_ops = {
24255 .inherits = &ata_sff_port_ops,
24256 .sff_data_xfer = ata_sff_data_xfer_noirq,
24257 .cable_detect = ata_cable_40wire,
24258 diff -urNp linux-2.6.32.41/drivers/ata/pata_pcmcia.c linux-2.6.32.41/drivers/ata/pata_pcmcia.c
24259 --- linux-2.6.32.41/drivers/ata/pata_pcmcia.c 2011-03-27 14:31:47.000000000 -0400
24260 +++ linux-2.6.32.41/drivers/ata/pata_pcmcia.c 2011-04-17 15:56:46.000000000 -0400
24261 @@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_
24262 ATA_PIO_SHT(DRV_NAME),
24263 };
24264
24265 -static struct ata_port_operations pcmcia_port_ops = {
24266 +static const struct ata_port_operations pcmcia_port_ops = {
24267 .inherits = &ata_sff_port_ops,
24268 .sff_data_xfer = ata_sff_data_xfer_noirq,
24269 .cable_detect = ata_cable_40wire,
24270 .set_mode = pcmcia_set_mode,
24271 };
24272
24273 -static struct ata_port_operations pcmcia_8bit_port_ops = {
24274 +static const struct ata_port_operations pcmcia_8bit_port_ops = {
24275 .inherits = &ata_sff_port_ops,
24276 .sff_data_xfer = ata_data_xfer_8bit,
24277 .cable_detect = ata_cable_40wire,
24278 @@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia
24279 unsigned long io_base, ctl_base;
24280 void __iomem *io_addr, *ctl_addr;
24281 int n_ports = 1;
24282 - struct ata_port_operations *ops = &pcmcia_port_ops;
24283 + const struct ata_port_operations *ops = &pcmcia_port_ops;
24284
24285 info = kzalloc(sizeof(*info), GFP_KERNEL);
24286 if (info == NULL)
24287 diff -urNp linux-2.6.32.41/drivers/ata/pata_pdc2027x.c linux-2.6.32.41/drivers/ata/pata_pdc2027x.c
24288 --- linux-2.6.32.41/drivers/ata/pata_pdc2027x.c 2011-03-27 14:31:47.000000000 -0400
24289 +++ linux-2.6.32.41/drivers/ata/pata_pdc2027x.c 2011-04-17 15:56:46.000000000 -0400
24290 @@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027
24291 ATA_BMDMA_SHT(DRV_NAME),
24292 };
24293
24294 -static struct ata_port_operations pdc2027x_pata100_ops = {
24295 +static const struct ata_port_operations pdc2027x_pata100_ops = {
24296 .inherits = &ata_bmdma_port_ops,
24297 .check_atapi_dma = pdc2027x_check_atapi_dma,
24298 .cable_detect = pdc2027x_cable_detect,
24299 .prereset = pdc2027x_prereset,
24300 };
24301
24302 -static struct ata_port_operations pdc2027x_pata133_ops = {
24303 +static const struct ata_port_operations pdc2027x_pata133_ops = {
24304 .inherits = &pdc2027x_pata100_ops,
24305 .mode_filter = pdc2027x_mode_filter,
24306 .set_piomode = pdc2027x_set_piomode,
24307 diff -urNp linux-2.6.32.41/drivers/ata/pata_pdc202xx_old.c linux-2.6.32.41/drivers/ata/pata_pdc202xx_old.c
24308 --- linux-2.6.32.41/drivers/ata/pata_pdc202xx_old.c 2011-03-27 14:31:47.000000000 -0400
24309 +++ linux-2.6.32.41/drivers/ata/pata_pdc202xx_old.c 2011-04-17 15:56:46.000000000 -0400
24310 @@ -274,7 +274,7 @@ static struct scsi_host_template pdc202x
24311 ATA_BMDMA_SHT(DRV_NAME),
24312 };
24313
24314 -static struct ata_port_operations pdc2024x_port_ops = {
24315 +static const struct ata_port_operations pdc2024x_port_ops = {
24316 .inherits = &ata_bmdma_port_ops,
24317
24318 .cable_detect = ata_cable_40wire,
24319 @@ -284,7 +284,7 @@ static struct ata_port_operations pdc202
24320 .sff_exec_command = pdc202xx_exec_command,
24321 };
24322
24323 -static struct ata_port_operations pdc2026x_port_ops = {
24324 +static const struct ata_port_operations pdc2026x_port_ops = {
24325 .inherits = &pdc2024x_port_ops,
24326
24327 .check_atapi_dma = pdc2026x_check_atapi_dma,
24328 diff -urNp linux-2.6.32.41/drivers/ata/pata_platform.c linux-2.6.32.41/drivers/ata/pata_platform.c
24329 --- linux-2.6.32.41/drivers/ata/pata_platform.c 2011-03-27 14:31:47.000000000 -0400
24330 +++ linux-2.6.32.41/drivers/ata/pata_platform.c 2011-04-17 15:56:46.000000000 -0400
24331 @@ -48,7 +48,7 @@ static struct scsi_host_template pata_pl
24332 ATA_PIO_SHT(DRV_NAME),
24333 };
24334
24335 -static struct ata_port_operations pata_platform_port_ops = {
24336 +static const struct ata_port_operations pata_platform_port_ops = {
24337 .inherits = &ata_sff_port_ops,
24338 .sff_data_xfer = ata_sff_data_xfer_noirq,
24339 .cable_detect = ata_cable_unknown,
24340 diff -urNp linux-2.6.32.41/drivers/ata/pata_qdi.c linux-2.6.32.41/drivers/ata/pata_qdi.c
24341 --- linux-2.6.32.41/drivers/ata/pata_qdi.c 2011-03-27 14:31:47.000000000 -0400
24342 +++ linux-2.6.32.41/drivers/ata/pata_qdi.c 2011-04-17 15:56:46.000000000 -0400
24343 @@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht
24344 ATA_PIO_SHT(DRV_NAME),
24345 };
24346
24347 -static struct ata_port_operations qdi6500_port_ops = {
24348 +static const struct ata_port_operations qdi6500_port_ops = {
24349 .inherits = &ata_sff_port_ops,
24350 .qc_issue = qdi_qc_issue,
24351 .sff_data_xfer = qdi_data_xfer,
24352 @@ -165,7 +165,7 @@ static struct ata_port_operations qdi650
24353 .set_piomode = qdi6500_set_piomode,
24354 };
24355
24356 -static struct ata_port_operations qdi6580_port_ops = {
24357 +static const struct ata_port_operations qdi6580_port_ops = {
24358 .inherits = &qdi6500_port_ops,
24359 .set_piomode = qdi6580_set_piomode,
24360 };
24361 diff -urNp linux-2.6.32.41/drivers/ata/pata_radisys.c linux-2.6.32.41/drivers/ata/pata_radisys.c
24362 --- linux-2.6.32.41/drivers/ata/pata_radisys.c 2011-03-27 14:31:47.000000000 -0400
24363 +++ linux-2.6.32.41/drivers/ata/pata_radisys.c 2011-04-17 15:56:46.000000000 -0400
24364 @@ -187,7 +187,7 @@ static struct scsi_host_template radisys
24365 ATA_BMDMA_SHT(DRV_NAME),
24366 };
24367
24368 -static struct ata_port_operations radisys_pata_ops = {
24369 +static const struct ata_port_operations radisys_pata_ops = {
24370 .inherits = &ata_bmdma_port_ops,
24371 .qc_issue = radisys_qc_issue,
24372 .cable_detect = ata_cable_unknown,
24373 diff -urNp linux-2.6.32.41/drivers/ata/pata_rb532_cf.c linux-2.6.32.41/drivers/ata/pata_rb532_cf.c
24374 --- linux-2.6.32.41/drivers/ata/pata_rb532_cf.c 2011-03-27 14:31:47.000000000 -0400
24375 +++ linux-2.6.32.41/drivers/ata/pata_rb532_cf.c 2011-04-17 15:56:46.000000000 -0400
24376 @@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handle
24377 return IRQ_HANDLED;
24378 }
24379
24380 -static struct ata_port_operations rb532_pata_port_ops = {
24381 +static const struct ata_port_operations rb532_pata_port_ops = {
24382 .inherits = &ata_sff_port_ops,
24383 .sff_data_xfer = ata_sff_data_xfer32,
24384 };
24385 diff -urNp linux-2.6.32.41/drivers/ata/pata_rdc.c linux-2.6.32.41/drivers/ata/pata_rdc.c
24386 --- linux-2.6.32.41/drivers/ata/pata_rdc.c 2011-03-27 14:31:47.000000000 -0400
24387 +++ linux-2.6.32.41/drivers/ata/pata_rdc.c 2011-04-17 15:56:46.000000000 -0400
24388 @@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_p
24389 pci_write_config_byte(dev, 0x48, udma_enable);
24390 }
24391
24392 -static struct ata_port_operations rdc_pata_ops = {
24393 +static const struct ata_port_operations rdc_pata_ops = {
24394 .inherits = &ata_bmdma32_port_ops,
24395 .cable_detect = rdc_pata_cable_detect,
24396 .set_piomode = rdc_set_piomode,
24397 diff -urNp linux-2.6.32.41/drivers/ata/pata_rz1000.c linux-2.6.32.41/drivers/ata/pata_rz1000.c
24398 --- linux-2.6.32.41/drivers/ata/pata_rz1000.c 2011-03-27 14:31:47.000000000 -0400
24399 +++ linux-2.6.32.41/drivers/ata/pata_rz1000.c 2011-04-17 15:56:46.000000000 -0400
24400 @@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_
24401 ATA_PIO_SHT(DRV_NAME),
24402 };
24403
24404 -static struct ata_port_operations rz1000_port_ops = {
24405 +static const struct ata_port_operations rz1000_port_ops = {
24406 .inherits = &ata_sff_port_ops,
24407 .cable_detect = ata_cable_40wire,
24408 .set_mode = rz1000_set_mode,
24409 diff -urNp linux-2.6.32.41/drivers/ata/pata_sc1200.c linux-2.6.32.41/drivers/ata/pata_sc1200.c
24410 --- linux-2.6.32.41/drivers/ata/pata_sc1200.c 2011-03-27 14:31:47.000000000 -0400
24411 +++ linux-2.6.32.41/drivers/ata/pata_sc1200.c 2011-04-17 15:56:46.000000000 -0400
24412 @@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_
24413 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24414 };
24415
24416 -static struct ata_port_operations sc1200_port_ops = {
24417 +static const struct ata_port_operations sc1200_port_ops = {
24418 .inherits = &ata_bmdma_port_ops,
24419 .qc_prep = ata_sff_dumb_qc_prep,
24420 .qc_issue = sc1200_qc_issue,
24421 diff -urNp linux-2.6.32.41/drivers/ata/pata_scc.c linux-2.6.32.41/drivers/ata/pata_scc.c
24422 --- linux-2.6.32.41/drivers/ata/pata_scc.c 2011-03-27 14:31:47.000000000 -0400
24423 +++ linux-2.6.32.41/drivers/ata/pata_scc.c 2011-04-17 15:56:46.000000000 -0400
24424 @@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht
24425 ATA_BMDMA_SHT(DRV_NAME),
24426 };
24427
24428 -static struct ata_port_operations scc_pata_ops = {
24429 +static const struct ata_port_operations scc_pata_ops = {
24430 .inherits = &ata_bmdma_port_ops,
24431
24432 .set_piomode = scc_set_piomode,
24433 diff -urNp linux-2.6.32.41/drivers/ata/pata_sch.c linux-2.6.32.41/drivers/ata/pata_sch.c
24434 --- linux-2.6.32.41/drivers/ata/pata_sch.c 2011-03-27 14:31:47.000000000 -0400
24435 +++ linux-2.6.32.41/drivers/ata/pata_sch.c 2011-04-17 15:56:46.000000000 -0400
24436 @@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht
24437 ATA_BMDMA_SHT(DRV_NAME),
24438 };
24439
24440 -static struct ata_port_operations sch_pata_ops = {
24441 +static const struct ata_port_operations sch_pata_ops = {
24442 .inherits = &ata_bmdma_port_ops,
24443 .cable_detect = ata_cable_unknown,
24444 .set_piomode = sch_set_piomode,
24445 diff -urNp linux-2.6.32.41/drivers/ata/pata_serverworks.c linux-2.6.32.41/drivers/ata/pata_serverworks.c
24446 --- linux-2.6.32.41/drivers/ata/pata_serverworks.c 2011-03-27 14:31:47.000000000 -0400
24447 +++ linux-2.6.32.41/drivers/ata/pata_serverworks.c 2011-04-17 15:56:46.000000000 -0400
24448 @@ -299,7 +299,7 @@ static struct scsi_host_template serverw
24449 ATA_BMDMA_SHT(DRV_NAME),
24450 };
24451
24452 -static struct ata_port_operations serverworks_osb4_port_ops = {
24453 +static const struct ata_port_operations serverworks_osb4_port_ops = {
24454 .inherits = &ata_bmdma_port_ops,
24455 .cable_detect = serverworks_cable_detect,
24456 .mode_filter = serverworks_osb4_filter,
24457 @@ -307,7 +307,7 @@ static struct ata_port_operations server
24458 .set_dmamode = serverworks_set_dmamode,
24459 };
24460
24461 -static struct ata_port_operations serverworks_csb_port_ops = {
24462 +static const struct ata_port_operations serverworks_csb_port_ops = {
24463 .inherits = &serverworks_osb4_port_ops,
24464 .mode_filter = serverworks_csb_filter,
24465 };
24466 diff -urNp linux-2.6.32.41/drivers/ata/pata_sil680.c linux-2.6.32.41/drivers/ata/pata_sil680.c
24467 --- linux-2.6.32.41/drivers/ata/pata_sil680.c 2011-03-27 14:31:47.000000000 -0400
24468 +++ linux-2.6.32.41/drivers/ata/pata_sil680.c 2011-04-17 15:56:46.000000000 -0400
24469 @@ -194,7 +194,7 @@ static struct scsi_host_template sil680_
24470 ATA_BMDMA_SHT(DRV_NAME),
24471 };
24472
24473 -static struct ata_port_operations sil680_port_ops = {
24474 +static const struct ata_port_operations sil680_port_ops = {
24475 .inherits = &ata_bmdma32_port_ops,
24476 .cable_detect = sil680_cable_detect,
24477 .set_piomode = sil680_set_piomode,
24478 diff -urNp linux-2.6.32.41/drivers/ata/pata_sis.c linux-2.6.32.41/drivers/ata/pata_sis.c
24479 --- linux-2.6.32.41/drivers/ata/pata_sis.c 2011-03-27 14:31:47.000000000 -0400
24480 +++ linux-2.6.32.41/drivers/ata/pata_sis.c 2011-04-17 15:56:46.000000000 -0400
24481 @@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht
24482 ATA_BMDMA_SHT(DRV_NAME),
24483 };
24484
24485 -static struct ata_port_operations sis_133_for_sata_ops = {
24486 +static const struct ata_port_operations sis_133_for_sata_ops = {
24487 .inherits = &ata_bmdma_port_ops,
24488 .set_piomode = sis_133_set_piomode,
24489 .set_dmamode = sis_133_set_dmamode,
24490 .cable_detect = sis_133_cable_detect,
24491 };
24492
24493 -static struct ata_port_operations sis_base_ops = {
24494 +static const struct ata_port_operations sis_base_ops = {
24495 .inherits = &ata_bmdma_port_ops,
24496 .prereset = sis_pre_reset,
24497 };
24498
24499 -static struct ata_port_operations sis_133_ops = {
24500 +static const struct ata_port_operations sis_133_ops = {
24501 .inherits = &sis_base_ops,
24502 .set_piomode = sis_133_set_piomode,
24503 .set_dmamode = sis_133_set_dmamode,
24504 .cable_detect = sis_133_cable_detect,
24505 };
24506
24507 -static struct ata_port_operations sis_133_early_ops = {
24508 +static const struct ata_port_operations sis_133_early_ops = {
24509 .inherits = &sis_base_ops,
24510 .set_piomode = sis_100_set_piomode,
24511 .set_dmamode = sis_133_early_set_dmamode,
24512 .cable_detect = sis_66_cable_detect,
24513 };
24514
24515 -static struct ata_port_operations sis_100_ops = {
24516 +static const struct ata_port_operations sis_100_ops = {
24517 .inherits = &sis_base_ops,
24518 .set_piomode = sis_100_set_piomode,
24519 .set_dmamode = sis_100_set_dmamode,
24520 .cable_detect = sis_66_cable_detect,
24521 };
24522
24523 -static struct ata_port_operations sis_66_ops = {
24524 +static const struct ata_port_operations sis_66_ops = {
24525 .inherits = &sis_base_ops,
24526 .set_piomode = sis_old_set_piomode,
24527 .set_dmamode = sis_66_set_dmamode,
24528 .cable_detect = sis_66_cable_detect,
24529 };
24530
24531 -static struct ata_port_operations sis_old_ops = {
24532 +static const struct ata_port_operations sis_old_ops = {
24533 .inherits = &sis_base_ops,
24534 .set_piomode = sis_old_set_piomode,
24535 .set_dmamode = sis_old_set_dmamode,
24536 diff -urNp linux-2.6.32.41/drivers/ata/pata_sl82c105.c linux-2.6.32.41/drivers/ata/pata_sl82c105.c
24537 --- linux-2.6.32.41/drivers/ata/pata_sl82c105.c 2011-03-27 14:31:47.000000000 -0400
24538 +++ linux-2.6.32.41/drivers/ata/pata_sl82c105.c 2011-04-17 15:56:46.000000000 -0400
24539 @@ -231,7 +231,7 @@ static struct scsi_host_template sl82c10
24540 ATA_BMDMA_SHT(DRV_NAME),
24541 };
24542
24543 -static struct ata_port_operations sl82c105_port_ops = {
24544 +static const struct ata_port_operations sl82c105_port_ops = {
24545 .inherits = &ata_bmdma_port_ops,
24546 .qc_defer = sl82c105_qc_defer,
24547 .bmdma_start = sl82c105_bmdma_start,
24548 diff -urNp linux-2.6.32.41/drivers/ata/pata_triflex.c linux-2.6.32.41/drivers/ata/pata_triflex.c
24549 --- linux-2.6.32.41/drivers/ata/pata_triflex.c 2011-03-27 14:31:47.000000000 -0400
24550 +++ linux-2.6.32.41/drivers/ata/pata_triflex.c 2011-04-17 15:56:46.000000000 -0400
24551 @@ -178,7 +178,7 @@ static struct scsi_host_template triflex
24552 ATA_BMDMA_SHT(DRV_NAME),
24553 };
24554
24555 -static struct ata_port_operations triflex_port_ops = {
24556 +static const struct ata_port_operations triflex_port_ops = {
24557 .inherits = &ata_bmdma_port_ops,
24558 .bmdma_start = triflex_bmdma_start,
24559 .bmdma_stop = triflex_bmdma_stop,
24560 diff -urNp linux-2.6.32.41/drivers/ata/pata_via.c linux-2.6.32.41/drivers/ata/pata_via.c
24561 --- linux-2.6.32.41/drivers/ata/pata_via.c 2011-03-27 14:31:47.000000000 -0400
24562 +++ linux-2.6.32.41/drivers/ata/pata_via.c 2011-04-17 15:56:46.000000000 -0400
24563 @@ -419,7 +419,7 @@ static struct scsi_host_template via_sht
24564 ATA_BMDMA_SHT(DRV_NAME),
24565 };
24566
24567 -static struct ata_port_operations via_port_ops = {
24568 +static const struct ata_port_operations via_port_ops = {
24569 .inherits = &ata_bmdma_port_ops,
24570 .cable_detect = via_cable_detect,
24571 .set_piomode = via_set_piomode,
24572 @@ -429,7 +429,7 @@ static struct ata_port_operations via_po
24573 .port_start = via_port_start,
24574 };
24575
24576 -static struct ata_port_operations via_port_ops_noirq = {
24577 +static const struct ata_port_operations via_port_ops_noirq = {
24578 .inherits = &via_port_ops,
24579 .sff_data_xfer = ata_sff_data_xfer_noirq,
24580 };
24581 diff -urNp linux-2.6.32.41/drivers/ata/pata_winbond.c linux-2.6.32.41/drivers/ata/pata_winbond.c
24582 --- linux-2.6.32.41/drivers/ata/pata_winbond.c 2011-03-27 14:31:47.000000000 -0400
24583 +++ linux-2.6.32.41/drivers/ata/pata_winbond.c 2011-04-17 15:56:46.000000000 -0400
24584 @@ -125,7 +125,7 @@ static struct scsi_host_template winbond
24585 ATA_PIO_SHT(DRV_NAME),
24586 };
24587
24588 -static struct ata_port_operations winbond_port_ops = {
24589 +static const struct ata_port_operations winbond_port_ops = {
24590 .inherits = &ata_sff_port_ops,
24591 .sff_data_xfer = winbond_data_xfer,
24592 .cable_detect = ata_cable_40wire,
24593 diff -urNp linux-2.6.32.41/drivers/ata/pdc_adma.c linux-2.6.32.41/drivers/ata/pdc_adma.c
24594 --- linux-2.6.32.41/drivers/ata/pdc_adma.c 2011-03-27 14:31:47.000000000 -0400
24595 +++ linux-2.6.32.41/drivers/ata/pdc_adma.c 2011-04-17 15:56:46.000000000 -0400
24596 @@ -145,7 +145,7 @@ static struct scsi_host_template adma_at
24597 .dma_boundary = ADMA_DMA_BOUNDARY,
24598 };
24599
24600 -static struct ata_port_operations adma_ata_ops = {
24601 +static const struct ata_port_operations adma_ata_ops = {
24602 .inherits = &ata_sff_port_ops,
24603
24604 .lost_interrupt = ATA_OP_NULL,
24605 diff -urNp linux-2.6.32.41/drivers/ata/sata_fsl.c linux-2.6.32.41/drivers/ata/sata_fsl.c
24606 --- linux-2.6.32.41/drivers/ata/sata_fsl.c 2011-03-27 14:31:47.000000000 -0400
24607 +++ linux-2.6.32.41/drivers/ata/sata_fsl.c 2011-04-17 15:56:46.000000000 -0400
24608 @@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fs
24609 .dma_boundary = ATA_DMA_BOUNDARY,
24610 };
24611
24612 -static struct ata_port_operations sata_fsl_ops = {
24613 +static const struct ata_port_operations sata_fsl_ops = {
24614 .inherits = &sata_pmp_port_ops,
24615
24616 .qc_defer = ata_std_qc_defer,
24617 diff -urNp linux-2.6.32.41/drivers/ata/sata_inic162x.c linux-2.6.32.41/drivers/ata/sata_inic162x.c
24618 --- linux-2.6.32.41/drivers/ata/sata_inic162x.c 2011-03-27 14:31:47.000000000 -0400
24619 +++ linux-2.6.32.41/drivers/ata/sata_inic162x.c 2011-04-17 15:56:46.000000000 -0400
24620 @@ -721,7 +721,7 @@ static int inic_port_start(struct ata_po
24621 return 0;
24622 }
24623
24624 -static struct ata_port_operations inic_port_ops = {
24625 +static const struct ata_port_operations inic_port_ops = {
24626 .inherits = &sata_port_ops,
24627
24628 .check_atapi_dma = inic_check_atapi_dma,
24629 diff -urNp linux-2.6.32.41/drivers/ata/sata_mv.c linux-2.6.32.41/drivers/ata/sata_mv.c
24630 --- linux-2.6.32.41/drivers/ata/sata_mv.c 2011-03-27 14:31:47.000000000 -0400
24631 +++ linux-2.6.32.41/drivers/ata/sata_mv.c 2011-04-17 15:56:46.000000000 -0400
24632 @@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht
24633 .dma_boundary = MV_DMA_BOUNDARY,
24634 };
24635
24636 -static struct ata_port_operations mv5_ops = {
24637 +static const struct ata_port_operations mv5_ops = {
24638 .inherits = &ata_sff_port_ops,
24639
24640 .lost_interrupt = ATA_OP_NULL,
24641 @@ -678,7 +678,7 @@ static struct ata_port_operations mv5_op
24642 .port_stop = mv_port_stop,
24643 };
24644
24645 -static struct ata_port_operations mv6_ops = {
24646 +static const struct ata_port_operations mv6_ops = {
24647 .inherits = &mv5_ops,
24648 .dev_config = mv6_dev_config,
24649 .scr_read = mv_scr_read,
24650 @@ -698,7 +698,7 @@ static struct ata_port_operations mv6_op
24651 .bmdma_status = mv_bmdma_status,
24652 };
24653
24654 -static struct ata_port_operations mv_iie_ops = {
24655 +static const struct ata_port_operations mv_iie_ops = {
24656 .inherits = &mv6_ops,
24657 .dev_config = ATA_OP_NULL,
24658 .qc_prep = mv_qc_prep_iie,
24659 diff -urNp linux-2.6.32.41/drivers/ata/sata_nv.c linux-2.6.32.41/drivers/ata/sata_nv.c
24660 --- linux-2.6.32.41/drivers/ata/sata_nv.c 2011-03-27 14:31:47.000000000 -0400
24661 +++ linux-2.6.32.41/drivers/ata/sata_nv.c 2011-04-17 15:56:46.000000000 -0400
24662 @@ -464,7 +464,7 @@ static struct scsi_host_template nv_swnc
24663 * cases. Define nv_hardreset() which only kicks in for post-boot
24664 * probing and use it for all variants.
24665 */
24666 -static struct ata_port_operations nv_generic_ops = {
24667 +static const struct ata_port_operations nv_generic_ops = {
24668 .inherits = &ata_bmdma_port_ops,
24669 .lost_interrupt = ATA_OP_NULL,
24670 .scr_read = nv_scr_read,
24671 @@ -472,20 +472,20 @@ static struct ata_port_operations nv_gen
24672 .hardreset = nv_hardreset,
24673 };
24674
24675 -static struct ata_port_operations nv_nf2_ops = {
24676 +static const struct ata_port_operations nv_nf2_ops = {
24677 .inherits = &nv_generic_ops,
24678 .freeze = nv_nf2_freeze,
24679 .thaw = nv_nf2_thaw,
24680 };
24681
24682 -static struct ata_port_operations nv_ck804_ops = {
24683 +static const struct ata_port_operations nv_ck804_ops = {
24684 .inherits = &nv_generic_ops,
24685 .freeze = nv_ck804_freeze,
24686 .thaw = nv_ck804_thaw,
24687 .host_stop = nv_ck804_host_stop,
24688 };
24689
24690 -static struct ata_port_operations nv_adma_ops = {
24691 +static const struct ata_port_operations nv_adma_ops = {
24692 .inherits = &nv_ck804_ops,
24693
24694 .check_atapi_dma = nv_adma_check_atapi_dma,
24695 @@ -509,7 +509,7 @@ static struct ata_port_operations nv_adm
24696 .host_stop = nv_adma_host_stop,
24697 };
24698
24699 -static struct ata_port_operations nv_swncq_ops = {
24700 +static const struct ata_port_operations nv_swncq_ops = {
24701 .inherits = &nv_generic_ops,
24702
24703 .qc_defer = ata_std_qc_defer,
24704 diff -urNp linux-2.6.32.41/drivers/ata/sata_promise.c linux-2.6.32.41/drivers/ata/sata_promise.c
24705 --- linux-2.6.32.41/drivers/ata/sata_promise.c 2011-03-27 14:31:47.000000000 -0400
24706 +++ linux-2.6.32.41/drivers/ata/sata_promise.c 2011-04-17 15:56:46.000000000 -0400
24707 @@ -195,7 +195,7 @@ static const struct ata_port_operations
24708 .error_handler = pdc_error_handler,
24709 };
24710
24711 -static struct ata_port_operations pdc_sata_ops = {
24712 +static const struct ata_port_operations pdc_sata_ops = {
24713 .inherits = &pdc_common_ops,
24714 .cable_detect = pdc_sata_cable_detect,
24715 .freeze = pdc_sata_freeze,
24716 @@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sa
24717
24718 /* First-generation chips need a more restrictive ->check_atapi_dma op,
24719 and ->freeze/thaw that ignore the hotplug controls. */
24720 -static struct ata_port_operations pdc_old_sata_ops = {
24721 +static const struct ata_port_operations pdc_old_sata_ops = {
24722 .inherits = &pdc_sata_ops,
24723 .freeze = pdc_freeze,
24724 .thaw = pdc_thaw,
24725 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
24726 };
24727
24728 -static struct ata_port_operations pdc_pata_ops = {
24729 +static const struct ata_port_operations pdc_pata_ops = {
24730 .inherits = &pdc_common_ops,
24731 .cable_detect = pdc_pata_cable_detect,
24732 .freeze = pdc_freeze,
24733 diff -urNp linux-2.6.32.41/drivers/ata/sata_qstor.c linux-2.6.32.41/drivers/ata/sata_qstor.c
24734 --- linux-2.6.32.41/drivers/ata/sata_qstor.c 2011-03-27 14:31:47.000000000 -0400
24735 +++ linux-2.6.32.41/drivers/ata/sata_qstor.c 2011-04-17 15:56:46.000000000 -0400
24736 @@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_
24737 .dma_boundary = QS_DMA_BOUNDARY,
24738 };
24739
24740 -static struct ata_port_operations qs_ata_ops = {
24741 +static const struct ata_port_operations qs_ata_ops = {
24742 .inherits = &ata_sff_port_ops,
24743
24744 .check_atapi_dma = qs_check_atapi_dma,
24745 diff -urNp linux-2.6.32.41/drivers/ata/sata_sil24.c linux-2.6.32.41/drivers/ata/sata_sil24.c
24746 --- linux-2.6.32.41/drivers/ata/sata_sil24.c 2011-03-27 14:31:47.000000000 -0400
24747 +++ linux-2.6.32.41/drivers/ata/sata_sil24.c 2011-04-17 15:56:46.000000000 -0400
24748 @@ -388,7 +388,7 @@ static struct scsi_host_template sil24_s
24749 .dma_boundary = ATA_DMA_BOUNDARY,
24750 };
24751
24752 -static struct ata_port_operations sil24_ops = {
24753 +static const struct ata_port_operations sil24_ops = {
24754 .inherits = &sata_pmp_port_ops,
24755
24756 .qc_defer = sil24_qc_defer,
24757 diff -urNp linux-2.6.32.41/drivers/ata/sata_sil.c linux-2.6.32.41/drivers/ata/sata_sil.c
24758 --- linux-2.6.32.41/drivers/ata/sata_sil.c 2011-03-27 14:31:47.000000000 -0400
24759 +++ linux-2.6.32.41/drivers/ata/sata_sil.c 2011-04-17 15:56:46.000000000 -0400
24760 @@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht
24761 .sg_tablesize = ATA_MAX_PRD
24762 };
24763
24764 -static struct ata_port_operations sil_ops = {
24765 +static const struct ata_port_operations sil_ops = {
24766 .inherits = &ata_bmdma32_port_ops,
24767 .dev_config = sil_dev_config,
24768 .set_mode = sil_set_mode,
24769 diff -urNp linux-2.6.32.41/drivers/ata/sata_sis.c linux-2.6.32.41/drivers/ata/sata_sis.c
24770 --- linux-2.6.32.41/drivers/ata/sata_sis.c 2011-03-27 14:31:47.000000000 -0400
24771 +++ linux-2.6.32.41/drivers/ata/sata_sis.c 2011-04-17 15:56:46.000000000 -0400
24772 @@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht
24773 ATA_BMDMA_SHT(DRV_NAME),
24774 };
24775
24776 -static struct ata_port_operations sis_ops = {
24777 +static const struct ata_port_operations sis_ops = {
24778 .inherits = &ata_bmdma_port_ops,
24779 .scr_read = sis_scr_read,
24780 .scr_write = sis_scr_write,
24781 diff -urNp linux-2.6.32.41/drivers/ata/sata_svw.c linux-2.6.32.41/drivers/ata/sata_svw.c
24782 --- linux-2.6.32.41/drivers/ata/sata_svw.c 2011-03-27 14:31:47.000000000 -0400
24783 +++ linux-2.6.32.41/drivers/ata/sata_svw.c 2011-04-17 15:56:46.000000000 -0400
24784 @@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata
24785 };
24786
24787
24788 -static struct ata_port_operations k2_sata_ops = {
24789 +static const struct ata_port_operations k2_sata_ops = {
24790 .inherits = &ata_bmdma_port_ops,
24791 .sff_tf_load = k2_sata_tf_load,
24792 .sff_tf_read = k2_sata_tf_read,
24793 diff -urNp linux-2.6.32.41/drivers/ata/sata_sx4.c linux-2.6.32.41/drivers/ata/sata_sx4.c
24794 --- linux-2.6.32.41/drivers/ata/sata_sx4.c 2011-03-27 14:31:47.000000000 -0400
24795 +++ linux-2.6.32.41/drivers/ata/sata_sx4.c 2011-04-17 15:56:46.000000000 -0400
24796 @@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sat
24797 };
24798
24799 /* TODO: inherit from base port_ops after converting to new EH */
24800 -static struct ata_port_operations pdc_20621_ops = {
24801 +static const struct ata_port_operations pdc_20621_ops = {
24802 .inherits = &ata_sff_port_ops,
24803
24804 .check_atapi_dma = pdc_check_atapi_dma,
24805 diff -urNp linux-2.6.32.41/drivers/ata/sata_uli.c linux-2.6.32.41/drivers/ata/sata_uli.c
24806 --- linux-2.6.32.41/drivers/ata/sata_uli.c 2011-03-27 14:31:47.000000000 -0400
24807 +++ linux-2.6.32.41/drivers/ata/sata_uli.c 2011-04-17 15:56:46.000000000 -0400
24808 @@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht
24809 ATA_BMDMA_SHT(DRV_NAME),
24810 };
24811
24812 -static struct ata_port_operations uli_ops = {
24813 +static const struct ata_port_operations uli_ops = {
24814 .inherits = &ata_bmdma_port_ops,
24815 .scr_read = uli_scr_read,
24816 .scr_write = uli_scr_write,
24817 diff -urNp linux-2.6.32.41/drivers/ata/sata_via.c linux-2.6.32.41/drivers/ata/sata_via.c
24818 --- linux-2.6.32.41/drivers/ata/sata_via.c 2011-05-10 22:12:01.000000000 -0400
24819 +++ linux-2.6.32.41/drivers/ata/sata_via.c 2011-05-10 22:15:08.000000000 -0400
24820 @@ -115,32 +115,32 @@ static struct scsi_host_template svia_sh
24821 ATA_BMDMA_SHT(DRV_NAME),
24822 };
24823
24824 -static struct ata_port_operations svia_base_ops = {
24825 +static const struct ata_port_operations svia_base_ops = {
24826 .inherits = &ata_bmdma_port_ops,
24827 .sff_tf_load = svia_tf_load,
24828 };
24829
24830 -static struct ata_port_operations vt6420_sata_ops = {
24831 +static const struct ata_port_operations vt6420_sata_ops = {
24832 .inherits = &svia_base_ops,
24833 .freeze = svia_noop_freeze,
24834 .prereset = vt6420_prereset,
24835 .bmdma_start = vt6420_bmdma_start,
24836 };
24837
24838 -static struct ata_port_operations vt6421_pata_ops = {
24839 +static const struct ata_port_operations vt6421_pata_ops = {
24840 .inherits = &svia_base_ops,
24841 .cable_detect = vt6421_pata_cable_detect,
24842 .set_piomode = vt6421_set_pio_mode,
24843 .set_dmamode = vt6421_set_dma_mode,
24844 };
24845
24846 -static struct ata_port_operations vt6421_sata_ops = {
24847 +static const struct ata_port_operations vt6421_sata_ops = {
24848 .inherits = &svia_base_ops,
24849 .scr_read = svia_scr_read,
24850 .scr_write = svia_scr_write,
24851 };
24852
24853 -static struct ata_port_operations vt8251_ops = {
24854 +static const struct ata_port_operations vt8251_ops = {
24855 .inherits = &svia_base_ops,
24856 .hardreset = sata_std_hardreset,
24857 .scr_read = vt8251_scr_read,
24858 diff -urNp linux-2.6.32.41/drivers/ata/sata_vsc.c linux-2.6.32.41/drivers/ata/sata_vsc.c
24859 --- linux-2.6.32.41/drivers/ata/sata_vsc.c 2011-03-27 14:31:47.000000000 -0400
24860 +++ linux-2.6.32.41/drivers/ata/sata_vsc.c 2011-04-17 15:56:46.000000000 -0400
24861 @@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sat
24862 };
24863
24864
24865 -static struct ata_port_operations vsc_sata_ops = {
24866 +static const struct ata_port_operations vsc_sata_ops = {
24867 .inherits = &ata_bmdma_port_ops,
24868 /* The IRQ handling is not quite standard SFF behaviour so we
24869 cannot use the default lost interrupt handler */
24870 diff -urNp linux-2.6.32.41/drivers/atm/adummy.c linux-2.6.32.41/drivers/atm/adummy.c
24871 --- linux-2.6.32.41/drivers/atm/adummy.c 2011-03-27 14:31:47.000000000 -0400
24872 +++ linux-2.6.32.41/drivers/atm/adummy.c 2011-04-17 15:56:46.000000000 -0400
24873 @@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct
24874 vcc->pop(vcc, skb);
24875 else
24876 dev_kfree_skb_any(skb);
24877 - atomic_inc(&vcc->stats->tx);
24878 + atomic_inc_unchecked(&vcc->stats->tx);
24879
24880 return 0;
24881 }
24882 diff -urNp linux-2.6.32.41/drivers/atm/ambassador.c linux-2.6.32.41/drivers/atm/ambassador.c
24883 --- linux-2.6.32.41/drivers/atm/ambassador.c 2011-03-27 14:31:47.000000000 -0400
24884 +++ linux-2.6.32.41/drivers/atm/ambassador.c 2011-04-17 15:56:46.000000000 -0400
24885 @@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev,
24886 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
24887
24888 // VC layer stats
24889 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
24890 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
24891
24892 // free the descriptor
24893 kfree (tx_descr);
24894 @@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev,
24895 dump_skb ("<<<", vc, skb);
24896
24897 // VC layer stats
24898 - atomic_inc(&atm_vcc->stats->rx);
24899 + atomic_inc_unchecked(&atm_vcc->stats->rx);
24900 __net_timestamp(skb);
24901 // end of our responsability
24902 atm_vcc->push (atm_vcc, skb);
24903 @@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev,
24904 } else {
24905 PRINTK (KERN_INFO, "dropped over-size frame");
24906 // should we count this?
24907 - atomic_inc(&atm_vcc->stats->rx_drop);
24908 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
24909 }
24910
24911 } else {
24912 @@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * at
24913 }
24914
24915 if (check_area (skb->data, skb->len)) {
24916 - atomic_inc(&atm_vcc->stats->tx_err);
24917 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
24918 return -ENOMEM; // ?
24919 }
24920
24921 diff -urNp linux-2.6.32.41/drivers/atm/atmtcp.c linux-2.6.32.41/drivers/atm/atmtcp.c
24922 --- linux-2.6.32.41/drivers/atm/atmtcp.c 2011-03-27 14:31:47.000000000 -0400
24923 +++ linux-2.6.32.41/drivers/atm/atmtcp.c 2011-04-17 15:56:46.000000000 -0400
24924 @@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc
24925 if (vcc->pop) vcc->pop(vcc,skb);
24926 else dev_kfree_skb(skb);
24927 if (dev_data) return 0;
24928 - atomic_inc(&vcc->stats->tx_err);
24929 + atomic_inc_unchecked(&vcc->stats->tx_err);
24930 return -ENOLINK;
24931 }
24932 size = skb->len+sizeof(struct atmtcp_hdr);
24933 @@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc
24934 if (!new_skb) {
24935 if (vcc->pop) vcc->pop(vcc,skb);
24936 else dev_kfree_skb(skb);
24937 - atomic_inc(&vcc->stats->tx_err);
24938 + atomic_inc_unchecked(&vcc->stats->tx_err);
24939 return -ENOBUFS;
24940 }
24941 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
24942 @@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc
24943 if (vcc->pop) vcc->pop(vcc,skb);
24944 else dev_kfree_skb(skb);
24945 out_vcc->push(out_vcc,new_skb);
24946 - atomic_inc(&vcc->stats->tx);
24947 - atomic_inc(&out_vcc->stats->rx);
24948 + atomic_inc_unchecked(&vcc->stats->tx);
24949 + atomic_inc_unchecked(&out_vcc->stats->rx);
24950 return 0;
24951 }
24952
24953 @@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc
24954 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
24955 read_unlock(&vcc_sklist_lock);
24956 if (!out_vcc) {
24957 - atomic_inc(&vcc->stats->tx_err);
24958 + atomic_inc_unchecked(&vcc->stats->tx_err);
24959 goto done;
24960 }
24961 skb_pull(skb,sizeof(struct atmtcp_hdr));
24962 @@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc
24963 __net_timestamp(new_skb);
24964 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
24965 out_vcc->push(out_vcc,new_skb);
24966 - atomic_inc(&vcc->stats->tx);
24967 - atomic_inc(&out_vcc->stats->rx);
24968 + atomic_inc_unchecked(&vcc->stats->tx);
24969 + atomic_inc_unchecked(&out_vcc->stats->rx);
24970 done:
24971 if (vcc->pop) vcc->pop(vcc,skb);
24972 else dev_kfree_skb(skb);
24973 diff -urNp linux-2.6.32.41/drivers/atm/eni.c linux-2.6.32.41/drivers/atm/eni.c
24974 --- linux-2.6.32.41/drivers/atm/eni.c 2011-03-27 14:31:47.000000000 -0400
24975 +++ linux-2.6.32.41/drivers/atm/eni.c 2011-04-17 15:56:46.000000000 -0400
24976 @@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
24977 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
24978 vcc->dev->number);
24979 length = 0;
24980 - atomic_inc(&vcc->stats->rx_err);
24981 + atomic_inc_unchecked(&vcc->stats->rx_err);
24982 }
24983 else {
24984 length = ATM_CELL_SIZE-1; /* no HEC */
24985 @@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
24986 size);
24987 }
24988 eff = length = 0;
24989 - atomic_inc(&vcc->stats->rx_err);
24990 + atomic_inc_unchecked(&vcc->stats->rx_err);
24991 }
24992 else {
24993 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
24994 @@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
24995 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
24996 vcc->dev->number,vcc->vci,length,size << 2,descr);
24997 length = eff = 0;
24998 - atomic_inc(&vcc->stats->rx_err);
24999 + atomic_inc_unchecked(&vcc->stats->rx_err);
25000 }
25001 }
25002 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
25003 @@ -770,7 +770,7 @@ rx_dequeued++;
25004 vcc->push(vcc,skb);
25005 pushed++;
25006 }
25007 - atomic_inc(&vcc->stats->rx);
25008 + atomic_inc_unchecked(&vcc->stats->rx);
25009 }
25010 wake_up(&eni_dev->rx_wait);
25011 }
25012 @@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *d
25013 PCI_DMA_TODEVICE);
25014 if (vcc->pop) vcc->pop(vcc,skb);
25015 else dev_kfree_skb_irq(skb);
25016 - atomic_inc(&vcc->stats->tx);
25017 + atomic_inc_unchecked(&vcc->stats->tx);
25018 wake_up(&eni_dev->tx_wait);
25019 dma_complete++;
25020 }
25021 diff -urNp linux-2.6.32.41/drivers/atm/firestream.c linux-2.6.32.41/drivers/atm/firestream.c
25022 --- linux-2.6.32.41/drivers/atm/firestream.c 2011-03-27 14:31:47.000000000 -0400
25023 +++ linux-2.6.32.41/drivers/atm/firestream.c 2011-04-17 15:56:46.000000000 -0400
25024 @@ -748,7 +748,7 @@ static void process_txdone_queue (struct
25025 }
25026 }
25027
25028 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25029 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25030
25031 fs_dprintk (FS_DEBUG_TXMEM, "i");
25032 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
25033 @@ -815,7 +815,7 @@ static void process_incoming (struct fs_
25034 #endif
25035 skb_put (skb, qe->p1 & 0xffff);
25036 ATM_SKB(skb)->vcc = atm_vcc;
25037 - atomic_inc(&atm_vcc->stats->rx);
25038 + atomic_inc_unchecked(&atm_vcc->stats->rx);
25039 __net_timestamp(skb);
25040 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
25041 atm_vcc->push (atm_vcc, skb);
25042 @@ -836,12 +836,12 @@ static void process_incoming (struct fs_
25043 kfree (pe);
25044 }
25045 if (atm_vcc)
25046 - atomic_inc(&atm_vcc->stats->rx_drop);
25047 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25048 break;
25049 case 0x1f: /* Reassembly abort: no buffers. */
25050 /* Silently increment error counter. */
25051 if (atm_vcc)
25052 - atomic_inc(&atm_vcc->stats->rx_drop);
25053 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25054 break;
25055 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
25056 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
25057 diff -urNp linux-2.6.32.41/drivers/atm/fore200e.c linux-2.6.32.41/drivers/atm/fore200e.c
25058 --- linux-2.6.32.41/drivers/atm/fore200e.c 2011-03-27 14:31:47.000000000 -0400
25059 +++ linux-2.6.32.41/drivers/atm/fore200e.c 2011-04-17 15:56:46.000000000 -0400
25060 @@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200
25061 #endif
25062 /* check error condition */
25063 if (*entry->status & STATUS_ERROR)
25064 - atomic_inc(&vcc->stats->tx_err);
25065 + atomic_inc_unchecked(&vcc->stats->tx_err);
25066 else
25067 - atomic_inc(&vcc->stats->tx);
25068 + atomic_inc_unchecked(&vcc->stats->tx);
25069 }
25070 }
25071
25072 @@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore2
25073 if (skb == NULL) {
25074 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
25075
25076 - atomic_inc(&vcc->stats->rx_drop);
25077 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25078 return -ENOMEM;
25079 }
25080
25081 @@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore2
25082
25083 dev_kfree_skb_any(skb);
25084
25085 - atomic_inc(&vcc->stats->rx_drop);
25086 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25087 return -ENOMEM;
25088 }
25089
25090 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25091
25092 vcc->push(vcc, skb);
25093 - atomic_inc(&vcc->stats->rx);
25094 + atomic_inc_unchecked(&vcc->stats->rx);
25095
25096 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25097
25098 @@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200
25099 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
25100 fore200e->atm_dev->number,
25101 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
25102 - atomic_inc(&vcc->stats->rx_err);
25103 + atomic_inc_unchecked(&vcc->stats->rx_err);
25104 }
25105 }
25106
25107 @@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struc
25108 goto retry_here;
25109 }
25110
25111 - atomic_inc(&vcc->stats->tx_err);
25112 + atomic_inc_unchecked(&vcc->stats->tx_err);
25113
25114 fore200e->tx_sat++;
25115 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
25116 diff -urNp linux-2.6.32.41/drivers/atm/he.c linux-2.6.32.41/drivers/atm/he.c
25117 --- linux-2.6.32.41/drivers/atm/he.c 2011-03-27 14:31:47.000000000 -0400
25118 +++ linux-2.6.32.41/drivers/atm/he.c 2011-04-17 15:56:46.000000000 -0400
25119 @@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, i
25120
25121 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
25122 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
25123 - atomic_inc(&vcc->stats->rx_drop);
25124 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25125 goto return_host_buffers;
25126 }
25127
25128 @@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, i
25129 RBRQ_LEN_ERR(he_dev->rbrq_head)
25130 ? "LEN_ERR" : "",
25131 vcc->vpi, vcc->vci);
25132 - atomic_inc(&vcc->stats->rx_err);
25133 + atomic_inc_unchecked(&vcc->stats->rx_err);
25134 goto return_host_buffers;
25135 }
25136
25137 @@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, i
25138 vcc->push(vcc, skb);
25139 spin_lock(&he_dev->global_lock);
25140
25141 - atomic_inc(&vcc->stats->rx);
25142 + atomic_inc_unchecked(&vcc->stats->rx);
25143
25144 return_host_buffers:
25145 ++pdus_assembled;
25146 @@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
25147 tpd->vcc->pop(tpd->vcc, tpd->skb);
25148 else
25149 dev_kfree_skb_any(tpd->skb);
25150 - atomic_inc(&tpd->vcc->stats->tx_err);
25151 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
25152 }
25153 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
25154 return;
25155 @@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25156 vcc->pop(vcc, skb);
25157 else
25158 dev_kfree_skb_any(skb);
25159 - atomic_inc(&vcc->stats->tx_err);
25160 + atomic_inc_unchecked(&vcc->stats->tx_err);
25161 return -EINVAL;
25162 }
25163
25164 @@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25165 vcc->pop(vcc, skb);
25166 else
25167 dev_kfree_skb_any(skb);
25168 - atomic_inc(&vcc->stats->tx_err);
25169 + atomic_inc_unchecked(&vcc->stats->tx_err);
25170 return -EINVAL;
25171 }
25172 #endif
25173 @@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25174 vcc->pop(vcc, skb);
25175 else
25176 dev_kfree_skb_any(skb);
25177 - atomic_inc(&vcc->stats->tx_err);
25178 + atomic_inc_unchecked(&vcc->stats->tx_err);
25179 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25180 return -ENOMEM;
25181 }
25182 @@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25183 vcc->pop(vcc, skb);
25184 else
25185 dev_kfree_skb_any(skb);
25186 - atomic_inc(&vcc->stats->tx_err);
25187 + atomic_inc_unchecked(&vcc->stats->tx_err);
25188 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25189 return -ENOMEM;
25190 }
25191 @@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25192 __enqueue_tpd(he_dev, tpd, cid);
25193 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25194
25195 - atomic_inc(&vcc->stats->tx);
25196 + atomic_inc_unchecked(&vcc->stats->tx);
25197
25198 return 0;
25199 }
25200 diff -urNp linux-2.6.32.41/drivers/atm/horizon.c linux-2.6.32.41/drivers/atm/horizon.c
25201 --- linux-2.6.32.41/drivers/atm/horizon.c 2011-03-27 14:31:47.000000000 -0400
25202 +++ linux-2.6.32.41/drivers/atm/horizon.c 2011-04-17 15:56:46.000000000 -0400
25203 @@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev,
25204 {
25205 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
25206 // VC layer stats
25207 - atomic_inc(&vcc->stats->rx);
25208 + atomic_inc_unchecked(&vcc->stats->rx);
25209 __net_timestamp(skb);
25210 // end of our responsability
25211 vcc->push (vcc, skb);
25212 @@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const
25213 dev->tx_iovec = NULL;
25214
25215 // VC layer stats
25216 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25217 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25218
25219 // free the skb
25220 hrz_kfree_skb (skb);
25221 diff -urNp linux-2.6.32.41/drivers/atm/idt77252.c linux-2.6.32.41/drivers/atm/idt77252.c
25222 --- linux-2.6.32.41/drivers/atm/idt77252.c 2011-03-27 14:31:47.000000000 -0400
25223 +++ linux-2.6.32.41/drivers/atm/idt77252.c 2011-04-17 15:56:46.000000000 -0400
25224 @@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, str
25225 else
25226 dev_kfree_skb(skb);
25227
25228 - atomic_inc(&vcc->stats->tx);
25229 + atomic_inc_unchecked(&vcc->stats->tx);
25230 }
25231
25232 atomic_dec(&scq->used);
25233 @@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, st
25234 if ((sb = dev_alloc_skb(64)) == NULL) {
25235 printk("%s: Can't allocate buffers for aal0.\n",
25236 card->name);
25237 - atomic_add(i, &vcc->stats->rx_drop);
25238 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
25239 break;
25240 }
25241 if (!atm_charge(vcc, sb->truesize)) {
25242 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
25243 card->name);
25244 - atomic_add(i - 1, &vcc->stats->rx_drop);
25245 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
25246 dev_kfree_skb(sb);
25247 break;
25248 }
25249 @@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, st
25250 ATM_SKB(sb)->vcc = vcc;
25251 __net_timestamp(sb);
25252 vcc->push(vcc, sb);
25253 - atomic_inc(&vcc->stats->rx);
25254 + atomic_inc_unchecked(&vcc->stats->rx);
25255
25256 cell += ATM_CELL_PAYLOAD;
25257 }
25258 @@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, st
25259 "(CDC: %08x)\n",
25260 card->name, len, rpp->len, readl(SAR_REG_CDC));
25261 recycle_rx_pool_skb(card, rpp);
25262 - atomic_inc(&vcc->stats->rx_err);
25263 + atomic_inc_unchecked(&vcc->stats->rx_err);
25264 return;
25265 }
25266 if (stat & SAR_RSQE_CRC) {
25267 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
25268 recycle_rx_pool_skb(card, rpp);
25269 - atomic_inc(&vcc->stats->rx_err);
25270 + atomic_inc_unchecked(&vcc->stats->rx_err);
25271 return;
25272 }
25273 if (skb_queue_len(&rpp->queue) > 1) {
25274 @@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, st
25275 RXPRINTK("%s: Can't alloc RX skb.\n",
25276 card->name);
25277 recycle_rx_pool_skb(card, rpp);
25278 - atomic_inc(&vcc->stats->rx_err);
25279 + atomic_inc_unchecked(&vcc->stats->rx_err);
25280 return;
25281 }
25282 if (!atm_charge(vcc, skb->truesize)) {
25283 @@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, st
25284 __net_timestamp(skb);
25285
25286 vcc->push(vcc, skb);
25287 - atomic_inc(&vcc->stats->rx);
25288 + atomic_inc_unchecked(&vcc->stats->rx);
25289
25290 return;
25291 }
25292 @@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, st
25293 __net_timestamp(skb);
25294
25295 vcc->push(vcc, skb);
25296 - atomic_inc(&vcc->stats->rx);
25297 + atomic_inc_unchecked(&vcc->stats->rx);
25298
25299 if (skb->truesize > SAR_FB_SIZE_3)
25300 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
25301 @@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
25302 if (vcc->qos.aal != ATM_AAL0) {
25303 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
25304 card->name, vpi, vci);
25305 - atomic_inc(&vcc->stats->rx_drop);
25306 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25307 goto drop;
25308 }
25309
25310 if ((sb = dev_alloc_skb(64)) == NULL) {
25311 printk("%s: Can't allocate buffers for AAL0.\n",
25312 card->name);
25313 - atomic_inc(&vcc->stats->rx_err);
25314 + atomic_inc_unchecked(&vcc->stats->rx_err);
25315 goto drop;
25316 }
25317
25318 @@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
25319 ATM_SKB(sb)->vcc = vcc;
25320 __net_timestamp(sb);
25321 vcc->push(vcc, sb);
25322 - atomic_inc(&vcc->stats->rx);
25323 + atomic_inc_unchecked(&vcc->stats->rx);
25324
25325 drop:
25326 skb_pull(queue, 64);
25327 @@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
25328
25329 if (vc == NULL) {
25330 printk("%s: NULL connection in send().\n", card->name);
25331 - atomic_inc(&vcc->stats->tx_err);
25332 + atomic_inc_unchecked(&vcc->stats->tx_err);
25333 dev_kfree_skb(skb);
25334 return -EINVAL;
25335 }
25336 if (!test_bit(VCF_TX, &vc->flags)) {
25337 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
25338 - atomic_inc(&vcc->stats->tx_err);
25339 + atomic_inc_unchecked(&vcc->stats->tx_err);
25340 dev_kfree_skb(skb);
25341 return -EINVAL;
25342 }
25343 @@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
25344 break;
25345 default:
25346 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
25347 - atomic_inc(&vcc->stats->tx_err);
25348 + atomic_inc_unchecked(&vcc->stats->tx_err);
25349 dev_kfree_skb(skb);
25350 return -EINVAL;
25351 }
25352
25353 if (skb_shinfo(skb)->nr_frags != 0) {
25354 printk("%s: No scatter-gather yet.\n", card->name);
25355 - atomic_inc(&vcc->stats->tx_err);
25356 + atomic_inc_unchecked(&vcc->stats->tx_err);
25357 dev_kfree_skb(skb);
25358 return -EINVAL;
25359 }
25360 @@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
25361
25362 err = queue_skb(card, vc, skb, oam);
25363 if (err) {
25364 - atomic_inc(&vcc->stats->tx_err);
25365 + atomic_inc_unchecked(&vcc->stats->tx_err);
25366 dev_kfree_skb(skb);
25367 return err;
25368 }
25369 @@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
25370 skb = dev_alloc_skb(64);
25371 if (!skb) {
25372 printk("%s: Out of memory in send_oam().\n", card->name);
25373 - atomic_inc(&vcc->stats->tx_err);
25374 + atomic_inc_unchecked(&vcc->stats->tx_err);
25375 return -ENOMEM;
25376 }
25377 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
25378 diff -urNp linux-2.6.32.41/drivers/atm/iphase.c linux-2.6.32.41/drivers/atm/iphase.c
25379 --- linux-2.6.32.41/drivers/atm/iphase.c 2011-03-27 14:31:47.000000000 -0400
25380 +++ linux-2.6.32.41/drivers/atm/iphase.c 2011-04-17 15:56:46.000000000 -0400
25381 @@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
25382 status = (u_short) (buf_desc_ptr->desc_mode);
25383 if (status & (RX_CER | RX_PTE | RX_OFL))
25384 {
25385 - atomic_inc(&vcc->stats->rx_err);
25386 + atomic_inc_unchecked(&vcc->stats->rx_err);
25387 IF_ERR(printk("IA: bad packet, dropping it");)
25388 if (status & RX_CER) {
25389 IF_ERR(printk(" cause: packet CRC error\n");)
25390 @@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
25391 len = dma_addr - buf_addr;
25392 if (len > iadev->rx_buf_sz) {
25393 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
25394 - atomic_inc(&vcc->stats->rx_err);
25395 + atomic_inc_unchecked(&vcc->stats->rx_err);
25396 goto out_free_desc;
25397 }
25398
25399 @@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *
25400 ia_vcc = INPH_IA_VCC(vcc);
25401 if (ia_vcc == NULL)
25402 {
25403 - atomic_inc(&vcc->stats->rx_err);
25404 + atomic_inc_unchecked(&vcc->stats->rx_err);
25405 dev_kfree_skb_any(skb);
25406 atm_return(vcc, atm_guess_pdu2truesize(len));
25407 goto INCR_DLE;
25408 @@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *
25409 if ((length > iadev->rx_buf_sz) || (length >
25410 (skb->len - sizeof(struct cpcs_trailer))))
25411 {
25412 - atomic_inc(&vcc->stats->rx_err);
25413 + atomic_inc_unchecked(&vcc->stats->rx_err);
25414 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
25415 length, skb->len);)
25416 dev_kfree_skb_any(skb);
25417 @@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *
25418
25419 IF_RX(printk("rx_dle_intr: skb push");)
25420 vcc->push(vcc,skb);
25421 - atomic_inc(&vcc->stats->rx);
25422 + atomic_inc_unchecked(&vcc->stats->rx);
25423 iadev->rx_pkt_cnt++;
25424 }
25425 INCR_DLE:
25426 @@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev,
25427 {
25428 struct k_sonet_stats *stats;
25429 stats = &PRIV(_ia_dev[board])->sonet_stats;
25430 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
25431 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
25432 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
25433 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
25434 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
25435 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
25436 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
25437 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
25438 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
25439 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
25440 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
25441 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
25442 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
25443 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
25444 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
25445 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
25446 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
25447 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
25448 }
25449 ia_cmds.status = 0;
25450 break;
25451 @@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
25452 if ((desc == 0) || (desc > iadev->num_tx_desc))
25453 {
25454 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
25455 - atomic_inc(&vcc->stats->tx);
25456 + atomic_inc_unchecked(&vcc->stats->tx);
25457 if (vcc->pop)
25458 vcc->pop(vcc, skb);
25459 else
25460 @@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
25461 ATM_DESC(skb) = vcc->vci;
25462 skb_queue_tail(&iadev->tx_dma_q, skb);
25463
25464 - atomic_inc(&vcc->stats->tx);
25465 + atomic_inc_unchecked(&vcc->stats->tx);
25466 iadev->tx_pkt_cnt++;
25467 /* Increment transaction counter */
25468 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
25469
25470 #if 0
25471 /* add flow control logic */
25472 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
25473 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
25474 if (iavcc->vc_desc_cnt > 10) {
25475 vcc->tx_quota = vcc->tx_quota * 3 / 4;
25476 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
25477 diff -urNp linux-2.6.32.41/drivers/atm/lanai.c linux-2.6.32.41/drivers/atm/lanai.c
25478 --- linux-2.6.32.41/drivers/atm/lanai.c 2011-03-27 14:31:47.000000000 -0400
25479 +++ linux-2.6.32.41/drivers/atm/lanai.c 2011-04-17 15:56:46.000000000 -0400
25480 @@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct l
25481 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
25482 lanai_endtx(lanai, lvcc);
25483 lanai_free_skb(lvcc->tx.atmvcc, skb);
25484 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
25485 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
25486 }
25487
25488 /* Try to fill the buffer - don't call unless there is backlog */
25489 @@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc
25490 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
25491 __net_timestamp(skb);
25492 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
25493 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
25494 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
25495 out:
25496 lvcc->rx.buf.ptr = end;
25497 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
25498 @@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_d
25499 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
25500 "vcc %d\n", lanai->number, (unsigned int) s, vci);
25501 lanai->stats.service_rxnotaal5++;
25502 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25503 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25504 return 0;
25505 }
25506 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
25507 @@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_d
25508 int bytes;
25509 read_unlock(&vcc_sklist_lock);
25510 DPRINTK("got trashed rx pdu on vci %d\n", vci);
25511 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25512 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25513 lvcc->stats.x.aal5.service_trash++;
25514 bytes = (SERVICE_GET_END(s) * 16) -
25515 (((unsigned long) lvcc->rx.buf.ptr) -
25516 @@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_d
25517 }
25518 if (s & SERVICE_STREAM) {
25519 read_unlock(&vcc_sklist_lock);
25520 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25521 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25522 lvcc->stats.x.aal5.service_stream++;
25523 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
25524 "PDU on VCI %d!\n", lanai->number, vci);
25525 @@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_d
25526 return 0;
25527 }
25528 DPRINTK("got rx crc error on vci %d\n", vci);
25529 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25530 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25531 lvcc->stats.x.aal5.service_rxcrc++;
25532 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
25533 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
25534 diff -urNp linux-2.6.32.41/drivers/atm/nicstar.c linux-2.6.32.41/drivers/atm/nicstar.c
25535 --- linux-2.6.32.41/drivers/atm/nicstar.c 2011-03-27 14:31:47.000000000 -0400
25536 +++ linux-2.6.32.41/drivers/atm/nicstar.c 2011-04-17 15:56:46.000000000 -0400
25537 @@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc,
25538 if ((vc = (vc_map *) vcc->dev_data) == NULL)
25539 {
25540 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
25541 - atomic_inc(&vcc->stats->tx_err);
25542 + atomic_inc_unchecked(&vcc->stats->tx_err);
25543 dev_kfree_skb_any(skb);
25544 return -EINVAL;
25545 }
25546 @@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc,
25547 if (!vc->tx)
25548 {
25549 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
25550 - atomic_inc(&vcc->stats->tx_err);
25551 + atomic_inc_unchecked(&vcc->stats->tx_err);
25552 dev_kfree_skb_any(skb);
25553 return -EINVAL;
25554 }
25555 @@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc,
25556 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
25557 {
25558 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
25559 - atomic_inc(&vcc->stats->tx_err);
25560 + atomic_inc_unchecked(&vcc->stats->tx_err);
25561 dev_kfree_skb_any(skb);
25562 return -EINVAL;
25563 }
25564 @@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc,
25565 if (skb_shinfo(skb)->nr_frags != 0)
25566 {
25567 printk("nicstar%d: No scatter-gather yet.\n", card->index);
25568 - atomic_inc(&vcc->stats->tx_err);
25569 + atomic_inc_unchecked(&vcc->stats->tx_err);
25570 dev_kfree_skb_any(skb);
25571 return -EINVAL;
25572 }
25573 @@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc,
25574
25575 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
25576 {
25577 - atomic_inc(&vcc->stats->tx_err);
25578 + atomic_inc_unchecked(&vcc->stats->tx_err);
25579 dev_kfree_skb_any(skb);
25580 return -EIO;
25581 }
25582 - atomic_inc(&vcc->stats->tx);
25583 + atomic_inc_unchecked(&vcc->stats->tx);
25584
25585 return 0;
25586 }
25587 @@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_
25588 {
25589 printk("nicstar%d: Can't allocate buffers for aal0.\n",
25590 card->index);
25591 - atomic_add(i,&vcc->stats->rx_drop);
25592 + atomic_add_unchecked(i,&vcc->stats->rx_drop);
25593 break;
25594 }
25595 if (!atm_charge(vcc, sb->truesize))
25596 {
25597 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
25598 card->index);
25599 - atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
25600 + atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
25601 dev_kfree_skb_any(sb);
25602 break;
25603 }
25604 @@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_
25605 ATM_SKB(sb)->vcc = vcc;
25606 __net_timestamp(sb);
25607 vcc->push(vcc, sb);
25608 - atomic_inc(&vcc->stats->rx);
25609 + atomic_inc_unchecked(&vcc->stats->rx);
25610 cell += ATM_CELL_PAYLOAD;
25611 }
25612
25613 @@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_
25614 if (iovb == NULL)
25615 {
25616 printk("nicstar%d: Out of iovec buffers.\n", card->index);
25617 - atomic_inc(&vcc->stats->rx_drop);
25618 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25619 recycle_rx_buf(card, skb);
25620 return;
25621 }
25622 @@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_
25623 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
25624 {
25625 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
25626 - atomic_inc(&vcc->stats->rx_err);
25627 + atomic_inc_unchecked(&vcc->stats->rx_err);
25628 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
25629 NS_SKB(iovb)->iovcnt = 0;
25630 iovb->len = 0;
25631 @@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_
25632 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
25633 card->index);
25634 which_list(card, skb);
25635 - atomic_inc(&vcc->stats->rx_err);
25636 + atomic_inc_unchecked(&vcc->stats->rx_err);
25637 recycle_rx_buf(card, skb);
25638 vc->rx_iov = NULL;
25639 recycle_iov_buf(card, iovb);
25640 @@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_
25641 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
25642 card->index);
25643 which_list(card, skb);
25644 - atomic_inc(&vcc->stats->rx_err);
25645 + atomic_inc_unchecked(&vcc->stats->rx_err);
25646 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
25647 NS_SKB(iovb)->iovcnt);
25648 vc->rx_iov = NULL;
25649 @@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_
25650 printk(" - PDU size mismatch.\n");
25651 else
25652 printk(".\n");
25653 - atomic_inc(&vcc->stats->rx_err);
25654 + atomic_inc_unchecked(&vcc->stats->rx_err);
25655 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
25656 NS_SKB(iovb)->iovcnt);
25657 vc->rx_iov = NULL;
25658 @@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_
25659 if (!atm_charge(vcc, skb->truesize))
25660 {
25661 push_rxbufs(card, skb);
25662 - atomic_inc(&vcc->stats->rx_drop);
25663 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25664 }
25665 else
25666 {
25667 @@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_
25668 ATM_SKB(skb)->vcc = vcc;
25669 __net_timestamp(skb);
25670 vcc->push(vcc, skb);
25671 - atomic_inc(&vcc->stats->rx);
25672 + atomic_inc_unchecked(&vcc->stats->rx);
25673 }
25674 }
25675 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
25676 @@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_
25677 if (!atm_charge(vcc, sb->truesize))
25678 {
25679 push_rxbufs(card, sb);
25680 - atomic_inc(&vcc->stats->rx_drop);
25681 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25682 }
25683 else
25684 {
25685 @@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_
25686 ATM_SKB(sb)->vcc = vcc;
25687 __net_timestamp(sb);
25688 vcc->push(vcc, sb);
25689 - atomic_inc(&vcc->stats->rx);
25690 + atomic_inc_unchecked(&vcc->stats->rx);
25691 }
25692
25693 push_rxbufs(card, skb);
25694 @@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_
25695 if (!atm_charge(vcc, skb->truesize))
25696 {
25697 push_rxbufs(card, skb);
25698 - atomic_inc(&vcc->stats->rx_drop);
25699 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25700 }
25701 else
25702 {
25703 @@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_
25704 ATM_SKB(skb)->vcc = vcc;
25705 __net_timestamp(skb);
25706 vcc->push(vcc, skb);
25707 - atomic_inc(&vcc->stats->rx);
25708 + atomic_inc_unchecked(&vcc->stats->rx);
25709 }
25710
25711 push_rxbufs(card, sb);
25712 @@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_
25713 if (hb == NULL)
25714 {
25715 printk("nicstar%d: Out of huge buffers.\n", card->index);
25716 - atomic_inc(&vcc->stats->rx_drop);
25717 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25718 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
25719 NS_SKB(iovb)->iovcnt);
25720 vc->rx_iov = NULL;
25721 @@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_
25722 }
25723 else
25724 dev_kfree_skb_any(hb);
25725 - atomic_inc(&vcc->stats->rx_drop);
25726 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25727 }
25728 else
25729 {
25730 @@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_
25731 #endif /* NS_USE_DESTRUCTORS */
25732 __net_timestamp(hb);
25733 vcc->push(vcc, hb);
25734 - atomic_inc(&vcc->stats->rx);
25735 + atomic_inc_unchecked(&vcc->stats->rx);
25736 }
25737 }
25738
25739 diff -urNp linux-2.6.32.41/drivers/atm/solos-pci.c linux-2.6.32.41/drivers/atm/solos-pci.c
25740 --- linux-2.6.32.41/drivers/atm/solos-pci.c 2011-04-17 17:00:52.000000000 -0400
25741 +++ linux-2.6.32.41/drivers/atm/solos-pci.c 2011-05-16 21:46:57.000000000 -0400
25742 @@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
25743 }
25744 atm_charge(vcc, skb->truesize);
25745 vcc->push(vcc, skb);
25746 - atomic_inc(&vcc->stats->rx);
25747 + atomic_inc_unchecked(&vcc->stats->rx);
25748 break;
25749
25750 case PKT_STATUS:
25751 @@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *
25752 char msg[500];
25753 char item[10];
25754
25755 + pax_track_stack();
25756 +
25757 len = buf->len;
25758 for (i = 0; i < len; i++){
25759 if(i % 8 == 0)
25760 @@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_car
25761 vcc = SKB_CB(oldskb)->vcc;
25762
25763 if (vcc) {
25764 - atomic_inc(&vcc->stats->tx);
25765 + atomic_inc_unchecked(&vcc->stats->tx);
25766 solos_pop(vcc, oldskb);
25767 } else
25768 dev_kfree_skb_irq(oldskb);
25769 diff -urNp linux-2.6.32.41/drivers/atm/suni.c linux-2.6.32.41/drivers/atm/suni.c
25770 --- linux-2.6.32.41/drivers/atm/suni.c 2011-03-27 14:31:47.000000000 -0400
25771 +++ linux-2.6.32.41/drivers/atm/suni.c 2011-04-17 15:56:46.000000000 -0400
25772 @@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
25773
25774
25775 #define ADD_LIMITED(s,v) \
25776 - atomic_add((v),&stats->s); \
25777 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
25778 + atomic_add_unchecked((v),&stats->s); \
25779 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
25780
25781
25782 static void suni_hz(unsigned long from_timer)
25783 diff -urNp linux-2.6.32.41/drivers/atm/uPD98402.c linux-2.6.32.41/drivers/atm/uPD98402.c
25784 --- linux-2.6.32.41/drivers/atm/uPD98402.c 2011-03-27 14:31:47.000000000 -0400
25785 +++ linux-2.6.32.41/drivers/atm/uPD98402.c 2011-04-17 15:56:46.000000000 -0400
25786 @@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *d
25787 struct sonet_stats tmp;
25788 int error = 0;
25789
25790 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
25791 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
25792 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
25793 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
25794 if (zero && !error) {
25795 @@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev
25796
25797
25798 #define ADD_LIMITED(s,v) \
25799 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
25800 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
25801 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
25802 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
25803 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
25804 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
25805
25806
25807 static void stat_event(struct atm_dev *dev)
25808 @@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev
25809 if (reason & uPD98402_INT_PFM) stat_event(dev);
25810 if (reason & uPD98402_INT_PCO) {
25811 (void) GET(PCOCR); /* clear interrupt cause */
25812 - atomic_add(GET(HECCT),
25813 + atomic_add_unchecked(GET(HECCT),
25814 &PRIV(dev)->sonet_stats.uncorr_hcs);
25815 }
25816 if ((reason & uPD98402_INT_RFO) &&
25817 @@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev
25818 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
25819 uPD98402_INT_LOS),PIMR); /* enable them */
25820 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
25821 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
25822 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
25823 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
25824 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
25825 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
25826 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
25827 return 0;
25828 }
25829
25830 diff -urNp linux-2.6.32.41/drivers/atm/zatm.c linux-2.6.32.41/drivers/atm/zatm.c
25831 --- linux-2.6.32.41/drivers/atm/zatm.c 2011-03-27 14:31:47.000000000 -0400
25832 +++ linux-2.6.32.41/drivers/atm/zatm.c 2011-04-17 15:56:46.000000000 -0400
25833 @@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
25834 }
25835 if (!size) {
25836 dev_kfree_skb_irq(skb);
25837 - if (vcc) atomic_inc(&vcc->stats->rx_err);
25838 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
25839 continue;
25840 }
25841 if (!atm_charge(vcc,skb->truesize)) {
25842 @@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
25843 skb->len = size;
25844 ATM_SKB(skb)->vcc = vcc;
25845 vcc->push(vcc,skb);
25846 - atomic_inc(&vcc->stats->rx);
25847 + atomic_inc_unchecked(&vcc->stats->rx);
25848 }
25849 zout(pos & 0xffff,MTA(mbx));
25850 #if 0 /* probably a stupid idea */
25851 @@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
25852 skb_queue_head(&zatm_vcc->backlog,skb);
25853 break;
25854 }
25855 - atomic_inc(&vcc->stats->tx);
25856 + atomic_inc_unchecked(&vcc->stats->tx);
25857 wake_up(&zatm_vcc->tx_wait);
25858 }
25859
25860 diff -urNp linux-2.6.32.41/drivers/base/bus.c linux-2.6.32.41/drivers/base/bus.c
25861 --- linux-2.6.32.41/drivers/base/bus.c 2011-03-27 14:31:47.000000000 -0400
25862 +++ linux-2.6.32.41/drivers/base/bus.c 2011-04-17 15:56:46.000000000 -0400
25863 @@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kob
25864 return ret;
25865 }
25866
25867 -static struct sysfs_ops driver_sysfs_ops = {
25868 +static const struct sysfs_ops driver_sysfs_ops = {
25869 .show = drv_attr_show,
25870 .store = drv_attr_store,
25871 };
25872 @@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kob
25873 return ret;
25874 }
25875
25876 -static struct sysfs_ops bus_sysfs_ops = {
25877 +static const struct sysfs_ops bus_sysfs_ops = {
25878 .show = bus_attr_show,
25879 .store = bus_attr_store,
25880 };
25881 @@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset
25882 return 0;
25883 }
25884
25885 -static struct kset_uevent_ops bus_uevent_ops = {
25886 +static const struct kset_uevent_ops bus_uevent_ops = {
25887 .filter = bus_uevent_filter,
25888 };
25889
25890 diff -urNp linux-2.6.32.41/drivers/base/class.c linux-2.6.32.41/drivers/base/class.c
25891 --- linux-2.6.32.41/drivers/base/class.c 2011-03-27 14:31:47.000000000 -0400
25892 +++ linux-2.6.32.41/drivers/base/class.c 2011-04-17 15:56:46.000000000 -0400
25893 @@ -63,7 +63,7 @@ static void class_release(struct kobject
25894 kfree(cp);
25895 }
25896
25897 -static struct sysfs_ops class_sysfs_ops = {
25898 +static const struct sysfs_ops class_sysfs_ops = {
25899 .show = class_attr_show,
25900 .store = class_attr_store,
25901 };
25902 diff -urNp linux-2.6.32.41/drivers/base/core.c linux-2.6.32.41/drivers/base/core.c
25903 --- linux-2.6.32.41/drivers/base/core.c 2011-03-27 14:31:47.000000000 -0400
25904 +++ linux-2.6.32.41/drivers/base/core.c 2011-04-17 15:56:46.000000000 -0400
25905 @@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kob
25906 return ret;
25907 }
25908
25909 -static struct sysfs_ops dev_sysfs_ops = {
25910 +static const struct sysfs_ops dev_sysfs_ops = {
25911 .show = dev_attr_show,
25912 .store = dev_attr_store,
25913 };
25914 @@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset,
25915 return retval;
25916 }
25917
25918 -static struct kset_uevent_ops device_uevent_ops = {
25919 +static const struct kset_uevent_ops device_uevent_ops = {
25920 .filter = dev_uevent_filter,
25921 .name = dev_uevent_name,
25922 .uevent = dev_uevent,
25923 diff -urNp linux-2.6.32.41/drivers/base/memory.c linux-2.6.32.41/drivers/base/memory.c
25924 --- linux-2.6.32.41/drivers/base/memory.c 2011-03-27 14:31:47.000000000 -0400
25925 +++ linux-2.6.32.41/drivers/base/memory.c 2011-04-17 15:56:46.000000000 -0400
25926 @@ -44,7 +44,7 @@ static int memory_uevent(struct kset *ks
25927 return retval;
25928 }
25929
25930 -static struct kset_uevent_ops memory_uevent_ops = {
25931 +static const struct kset_uevent_ops memory_uevent_ops = {
25932 .name = memory_uevent_name,
25933 .uevent = memory_uevent,
25934 };
25935 diff -urNp linux-2.6.32.41/drivers/base/sys.c linux-2.6.32.41/drivers/base/sys.c
25936 --- linux-2.6.32.41/drivers/base/sys.c 2011-03-27 14:31:47.000000000 -0400
25937 +++ linux-2.6.32.41/drivers/base/sys.c 2011-04-17 15:56:46.000000000 -0400
25938 @@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struc
25939 return -EIO;
25940 }
25941
25942 -static struct sysfs_ops sysfs_ops = {
25943 +static const struct sysfs_ops sysfs_ops = {
25944 .show = sysdev_show,
25945 .store = sysdev_store,
25946 };
25947 @@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct
25948 return -EIO;
25949 }
25950
25951 -static struct sysfs_ops sysfs_class_ops = {
25952 +static const struct sysfs_ops sysfs_class_ops = {
25953 .show = sysdev_class_show,
25954 .store = sysdev_class_store,
25955 };
25956 diff -urNp linux-2.6.32.41/drivers/block/cciss.c linux-2.6.32.41/drivers/block/cciss.c
25957 --- linux-2.6.32.41/drivers/block/cciss.c 2011-03-27 14:31:47.000000000 -0400
25958 +++ linux-2.6.32.41/drivers/block/cciss.c 2011-04-17 15:56:46.000000000 -0400
25959 @@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct
25960 int err;
25961 u32 cp;
25962
25963 + memset(&arg64, 0, sizeof(arg64));
25964 +
25965 err = 0;
25966 err |=
25967 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
25968 diff -urNp linux-2.6.32.41/drivers/block/cpqarray.c linux-2.6.32.41/drivers/block/cpqarray.c
25969 --- linux-2.6.32.41/drivers/block/cpqarray.c 2011-03-27 14:31:47.000000000 -0400
25970 +++ linux-2.6.32.41/drivers/block/cpqarray.c 2011-05-16 21:46:57.000000000 -0400
25971 @@ -896,6 +896,8 @@ static void do_ida_request(struct reques
25972 struct scatterlist tmp_sg[SG_MAX];
25973 int i, dir, seg;
25974
25975 + pax_track_stack();
25976 +
25977 if (blk_queue_plugged(q))
25978 goto startio;
25979
25980 diff -urNp linux-2.6.32.41/drivers/block/DAC960.c linux-2.6.32.41/drivers/block/DAC960.c
25981 --- linux-2.6.32.41/drivers/block/DAC960.c 2011-03-27 14:31:47.000000000 -0400
25982 +++ linux-2.6.32.41/drivers/block/DAC960.c 2011-05-16 21:46:57.000000000 -0400
25983 @@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfigur
25984 unsigned long flags;
25985 int Channel, TargetID;
25986
25987 + pax_track_stack();
25988 +
25989 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
25990 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
25991 sizeof(DAC960_SCSI_Inquiry_T) +
25992 diff -urNp linux-2.6.32.41/drivers/block/nbd.c linux-2.6.32.41/drivers/block/nbd.c
25993 --- linux-2.6.32.41/drivers/block/nbd.c 2011-03-27 14:31:47.000000000 -0400
25994 +++ linux-2.6.32.41/drivers/block/nbd.c 2011-05-16 21:46:57.000000000 -0400
25995 @@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *
25996 struct kvec iov;
25997 sigset_t blocked, oldset;
25998
25999 + pax_track_stack();
26000 +
26001 if (unlikely(!sock)) {
26002 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
26003 lo->disk->disk_name, (send ? "send" : "recv"));
26004 @@ -569,6 +571,8 @@ static void do_nbd_request(struct reques
26005 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
26006 unsigned int cmd, unsigned long arg)
26007 {
26008 + pax_track_stack();
26009 +
26010 switch (cmd) {
26011 case NBD_DISCONNECT: {
26012 struct request sreq;
26013 diff -urNp linux-2.6.32.41/drivers/block/pktcdvd.c linux-2.6.32.41/drivers/block/pktcdvd.c
26014 --- linux-2.6.32.41/drivers/block/pktcdvd.c 2011-03-27 14:31:47.000000000 -0400
26015 +++ linux-2.6.32.41/drivers/block/pktcdvd.c 2011-04-17 15:56:46.000000000 -0400
26016 @@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kob
26017 return len;
26018 }
26019
26020 -static struct sysfs_ops kobj_pkt_ops = {
26021 +static const struct sysfs_ops kobj_pkt_ops = {
26022 .show = kobj_pkt_show,
26023 .store = kobj_pkt_store
26024 };
26025 diff -urNp linux-2.6.32.41/drivers/char/agp/frontend.c linux-2.6.32.41/drivers/char/agp/frontend.c
26026 --- linux-2.6.32.41/drivers/char/agp/frontend.c 2011-03-27 14:31:47.000000000 -0400
26027 +++ linux-2.6.32.41/drivers/char/agp/frontend.c 2011-04-17 15:56:46.000000000 -0400
26028 @@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct ag
26029 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
26030 return -EFAULT;
26031
26032 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
26033 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
26034 return -EFAULT;
26035
26036 client = agp_find_client_by_pid(reserve.pid);
26037 diff -urNp linux-2.6.32.41/drivers/char/briq_panel.c linux-2.6.32.41/drivers/char/briq_panel.c
26038 --- linux-2.6.32.41/drivers/char/briq_panel.c 2011-03-27 14:31:47.000000000 -0400
26039 +++ linux-2.6.32.41/drivers/char/briq_panel.c 2011-04-18 19:48:57.000000000 -0400
26040 @@ -10,6 +10,7 @@
26041 #include <linux/types.h>
26042 #include <linux/errno.h>
26043 #include <linux/tty.h>
26044 +#include <linux/mutex.h>
26045 #include <linux/timer.h>
26046 #include <linux/kernel.h>
26047 #include <linux/wait.h>
26048 @@ -36,6 +37,7 @@ static int vfd_is_open;
26049 static unsigned char vfd[40];
26050 static int vfd_cursor;
26051 static unsigned char ledpb, led;
26052 +static DEFINE_MUTEX(vfd_mutex);
26053
26054 static void update_vfd(void)
26055 {
26056 @@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct f
26057 if (!vfd_is_open)
26058 return -EBUSY;
26059
26060 + mutex_lock(&vfd_mutex);
26061 for (;;) {
26062 char c;
26063 if (!indx)
26064 break;
26065 - if (get_user(c, buf))
26066 + if (get_user(c, buf)) {
26067 + mutex_unlock(&vfd_mutex);
26068 return -EFAULT;
26069 + }
26070 if (esc) {
26071 set_led(c);
26072 esc = 0;
26073 @@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct f
26074 buf++;
26075 }
26076 update_vfd();
26077 + mutex_unlock(&vfd_mutex);
26078
26079 return len;
26080 }
26081 diff -urNp linux-2.6.32.41/drivers/char/genrtc.c linux-2.6.32.41/drivers/char/genrtc.c
26082 --- linux-2.6.32.41/drivers/char/genrtc.c 2011-03-27 14:31:47.000000000 -0400
26083 +++ linux-2.6.32.41/drivers/char/genrtc.c 2011-04-18 19:45:42.000000000 -0400
26084 @@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *i
26085 switch (cmd) {
26086
26087 case RTC_PLL_GET:
26088 + memset(&pll, 0, sizeof(pll));
26089 if (get_rtc_pll(&pll))
26090 return -EINVAL;
26091 else
26092 diff -urNp linux-2.6.32.41/drivers/char/hpet.c linux-2.6.32.41/drivers/char/hpet.c
26093 --- linux-2.6.32.41/drivers/char/hpet.c 2011-03-27 14:31:47.000000000 -0400
26094 +++ linux-2.6.32.41/drivers/char/hpet.c 2011-04-23 12:56:11.000000000 -0400
26095 @@ -430,7 +430,7 @@ static int hpet_release(struct inode *in
26096 return 0;
26097 }
26098
26099 -static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
26100 +static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
26101
26102 static int
26103 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
26104 @@ -565,7 +565,7 @@ static inline unsigned long hpet_time_di
26105 }
26106
26107 static int
26108 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
26109 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
26110 {
26111 struct hpet_timer __iomem *timer;
26112 struct hpet __iomem *hpet;
26113 @@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp,
26114 {
26115 struct hpet_info info;
26116
26117 + memset(&info, 0, sizeof(info));
26118 +
26119 if (devp->hd_ireqfreq)
26120 info.hi_ireqfreq =
26121 hpet_time_div(hpetp, devp->hd_ireqfreq);
26122 - else
26123 - info.hi_ireqfreq = 0;
26124 info.hi_flags =
26125 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
26126 info.hi_hpet = hpetp->hp_which;
26127 diff -urNp linux-2.6.32.41/drivers/char/hvc_beat.c linux-2.6.32.41/drivers/char/hvc_beat.c
26128 --- linux-2.6.32.41/drivers/char/hvc_beat.c 2011-03-27 14:31:47.000000000 -0400
26129 +++ linux-2.6.32.41/drivers/char/hvc_beat.c 2011-04-17 15:56:46.000000000 -0400
26130 @@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t v
26131 return cnt;
26132 }
26133
26134 -static struct hv_ops hvc_beat_get_put_ops = {
26135 +static const struct hv_ops hvc_beat_get_put_ops = {
26136 .get_chars = hvc_beat_get_chars,
26137 .put_chars = hvc_beat_put_chars,
26138 };
26139 diff -urNp linux-2.6.32.41/drivers/char/hvc_console.c linux-2.6.32.41/drivers/char/hvc_console.c
26140 --- linux-2.6.32.41/drivers/char/hvc_console.c 2011-03-27 14:31:47.000000000 -0400
26141 +++ linux-2.6.32.41/drivers/char/hvc_console.c 2011-04-17 15:56:46.000000000 -0400
26142 @@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_ind
26143 * console interfaces but can still be used as a tty device. This has to be
26144 * static because kmalloc will not work during early console init.
26145 */
26146 -static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
26147 +static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
26148 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
26149 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
26150
26151 @@ -247,7 +247,7 @@ static void destroy_hvc_struct(struct kr
26152 * vty adapters do NOT get an hvc_instantiate() callback since they
26153 * appear after early console init.
26154 */
26155 -int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
26156 +int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
26157 {
26158 struct hvc_struct *hp;
26159
26160 @@ -756,7 +756,7 @@ static const struct tty_operations hvc_o
26161 };
26162
26163 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
26164 - struct hv_ops *ops, int outbuf_size)
26165 + const struct hv_ops *ops, int outbuf_size)
26166 {
26167 struct hvc_struct *hp;
26168 int i;
26169 diff -urNp linux-2.6.32.41/drivers/char/hvc_console.h linux-2.6.32.41/drivers/char/hvc_console.h
26170 --- linux-2.6.32.41/drivers/char/hvc_console.h 2011-03-27 14:31:47.000000000 -0400
26171 +++ linux-2.6.32.41/drivers/char/hvc_console.h 2011-04-17 15:56:46.000000000 -0400
26172 @@ -55,7 +55,7 @@ struct hvc_struct {
26173 int outbuf_size;
26174 int n_outbuf;
26175 uint32_t vtermno;
26176 - struct hv_ops *ops;
26177 + const struct hv_ops *ops;
26178 int irq_requested;
26179 int data;
26180 struct winsize ws;
26181 @@ -76,11 +76,11 @@ struct hv_ops {
26182 };
26183
26184 /* Register a vterm and a slot index for use as a console (console_init) */
26185 -extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
26186 +extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
26187
26188 /* register a vterm for hvc tty operation (module_init or hotplug add) */
26189 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
26190 - struct hv_ops *ops, int outbuf_size);
26191 + const struct hv_ops *ops, int outbuf_size);
26192 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
26193 extern int hvc_remove(struct hvc_struct *hp);
26194
26195 diff -urNp linux-2.6.32.41/drivers/char/hvc_iseries.c linux-2.6.32.41/drivers/char/hvc_iseries.c
26196 --- linux-2.6.32.41/drivers/char/hvc_iseries.c 2011-03-27 14:31:47.000000000 -0400
26197 +++ linux-2.6.32.41/drivers/char/hvc_iseries.c 2011-04-17 15:56:46.000000000 -0400
26198 @@ -197,7 +197,7 @@ done:
26199 return sent;
26200 }
26201
26202 -static struct hv_ops hvc_get_put_ops = {
26203 +static const struct hv_ops hvc_get_put_ops = {
26204 .get_chars = get_chars,
26205 .put_chars = put_chars,
26206 .notifier_add = notifier_add_irq,
26207 diff -urNp linux-2.6.32.41/drivers/char/hvc_iucv.c linux-2.6.32.41/drivers/char/hvc_iucv.c
26208 --- linux-2.6.32.41/drivers/char/hvc_iucv.c 2011-03-27 14:31:47.000000000 -0400
26209 +++ linux-2.6.32.41/drivers/char/hvc_iucv.c 2011-04-17 15:56:46.000000000 -0400
26210 @@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(stru
26211
26212
26213 /* HVC operations */
26214 -static struct hv_ops hvc_iucv_ops = {
26215 +static const struct hv_ops hvc_iucv_ops = {
26216 .get_chars = hvc_iucv_get_chars,
26217 .put_chars = hvc_iucv_put_chars,
26218 .notifier_add = hvc_iucv_notifier_add,
26219 diff -urNp linux-2.6.32.41/drivers/char/hvc_rtas.c linux-2.6.32.41/drivers/char/hvc_rtas.c
26220 --- linux-2.6.32.41/drivers/char/hvc_rtas.c 2011-03-27 14:31:47.000000000 -0400
26221 +++ linux-2.6.32.41/drivers/char/hvc_rtas.c 2011-04-17 15:56:46.000000000 -0400
26222 @@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_
26223 return i;
26224 }
26225
26226 -static struct hv_ops hvc_rtas_get_put_ops = {
26227 +static const struct hv_ops hvc_rtas_get_put_ops = {
26228 .get_chars = hvc_rtas_read_console,
26229 .put_chars = hvc_rtas_write_console,
26230 };
26231 diff -urNp linux-2.6.32.41/drivers/char/hvcs.c linux-2.6.32.41/drivers/char/hvcs.c
26232 --- linux-2.6.32.41/drivers/char/hvcs.c 2011-03-27 14:31:47.000000000 -0400
26233 +++ linux-2.6.32.41/drivers/char/hvcs.c 2011-04-17 15:56:46.000000000 -0400
26234 @@ -82,6 +82,7 @@
26235 #include <asm/hvcserver.h>
26236 #include <asm/uaccess.h>
26237 #include <asm/vio.h>
26238 +#include <asm/local.h>
26239
26240 /*
26241 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
26242 @@ -269,7 +270,7 @@ struct hvcs_struct {
26243 unsigned int index;
26244
26245 struct tty_struct *tty;
26246 - int open_count;
26247 + local_t open_count;
26248
26249 /*
26250 * Used to tell the driver kernel_thread what operations need to take
26251 @@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(st
26252
26253 spin_lock_irqsave(&hvcsd->lock, flags);
26254
26255 - if (hvcsd->open_count > 0) {
26256 + if (local_read(&hvcsd->open_count) > 0) {
26257 spin_unlock_irqrestore(&hvcsd->lock, flags);
26258 printk(KERN_INFO "HVCS: vterm state unchanged. "
26259 "The hvcs device node is still in use.\n");
26260 @@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *
26261 if ((retval = hvcs_partner_connect(hvcsd)))
26262 goto error_release;
26263
26264 - hvcsd->open_count = 1;
26265 + local_set(&hvcsd->open_count, 1);
26266 hvcsd->tty = tty;
26267 tty->driver_data = hvcsd;
26268
26269 @@ -1169,7 +1170,7 @@ fast_open:
26270
26271 spin_lock_irqsave(&hvcsd->lock, flags);
26272 kref_get(&hvcsd->kref);
26273 - hvcsd->open_count++;
26274 + local_inc(&hvcsd->open_count);
26275 hvcsd->todo_mask |= HVCS_SCHED_READ;
26276 spin_unlock_irqrestore(&hvcsd->lock, flags);
26277
26278 @@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct
26279 hvcsd = tty->driver_data;
26280
26281 spin_lock_irqsave(&hvcsd->lock, flags);
26282 - if (--hvcsd->open_count == 0) {
26283 + if (local_dec_and_test(&hvcsd->open_count)) {
26284
26285 vio_disable_interrupts(hvcsd->vdev);
26286
26287 @@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct
26288 free_irq(irq, hvcsd);
26289 kref_put(&hvcsd->kref, destroy_hvcs_struct);
26290 return;
26291 - } else if (hvcsd->open_count < 0) {
26292 + } else if (local_read(&hvcsd->open_count) < 0) {
26293 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
26294 " is missmanaged.\n",
26295 - hvcsd->vdev->unit_address, hvcsd->open_count);
26296 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
26297 }
26298
26299 spin_unlock_irqrestore(&hvcsd->lock, flags);
26300 @@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struc
26301
26302 spin_lock_irqsave(&hvcsd->lock, flags);
26303 /* Preserve this so that we know how many kref refs to put */
26304 - temp_open_count = hvcsd->open_count;
26305 + temp_open_count = local_read(&hvcsd->open_count);
26306
26307 /*
26308 * Don't kref put inside the spinlock because the destruction
26309 @@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struc
26310 hvcsd->tty->driver_data = NULL;
26311 hvcsd->tty = NULL;
26312
26313 - hvcsd->open_count = 0;
26314 + local_set(&hvcsd->open_count, 0);
26315
26316 /* This will drop any buffered data on the floor which is OK in a hangup
26317 * scenario. */
26318 @@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct
26319 * the middle of a write operation? This is a crummy place to do this
26320 * but we want to keep it all in the spinlock.
26321 */
26322 - if (hvcsd->open_count <= 0) {
26323 + if (local_read(&hvcsd->open_count) <= 0) {
26324 spin_unlock_irqrestore(&hvcsd->lock, flags);
26325 return -ENODEV;
26326 }
26327 @@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_st
26328 {
26329 struct hvcs_struct *hvcsd = tty->driver_data;
26330
26331 - if (!hvcsd || hvcsd->open_count <= 0)
26332 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
26333 return 0;
26334
26335 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
26336 diff -urNp linux-2.6.32.41/drivers/char/hvc_udbg.c linux-2.6.32.41/drivers/char/hvc_udbg.c
26337 --- linux-2.6.32.41/drivers/char/hvc_udbg.c 2011-03-27 14:31:47.000000000 -0400
26338 +++ linux-2.6.32.41/drivers/char/hvc_udbg.c 2011-04-17 15:56:46.000000000 -0400
26339 @@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno
26340 return i;
26341 }
26342
26343 -static struct hv_ops hvc_udbg_ops = {
26344 +static const struct hv_ops hvc_udbg_ops = {
26345 .get_chars = hvc_udbg_get,
26346 .put_chars = hvc_udbg_put,
26347 };
26348 diff -urNp linux-2.6.32.41/drivers/char/hvc_vio.c linux-2.6.32.41/drivers/char/hvc_vio.c
26349 --- linux-2.6.32.41/drivers/char/hvc_vio.c 2011-03-27 14:31:47.000000000 -0400
26350 +++ linux-2.6.32.41/drivers/char/hvc_vio.c 2011-04-17 15:56:46.000000000 -0400
26351 @@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t v
26352 return got;
26353 }
26354
26355 -static struct hv_ops hvc_get_put_ops = {
26356 +static const struct hv_ops hvc_get_put_ops = {
26357 .get_chars = filtered_get_chars,
26358 .put_chars = hvc_put_chars,
26359 .notifier_add = notifier_add_irq,
26360 diff -urNp linux-2.6.32.41/drivers/char/hvc_xen.c linux-2.6.32.41/drivers/char/hvc_xen.c
26361 --- linux-2.6.32.41/drivers/char/hvc_xen.c 2011-03-27 14:31:47.000000000 -0400
26362 +++ linux-2.6.32.41/drivers/char/hvc_xen.c 2011-04-17 15:56:46.000000000 -0400
26363 @@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno
26364 return recv;
26365 }
26366
26367 -static struct hv_ops hvc_ops = {
26368 +static const struct hv_ops hvc_ops = {
26369 .get_chars = read_console,
26370 .put_chars = write_console,
26371 .notifier_add = notifier_add_irq,
26372 diff -urNp linux-2.6.32.41/drivers/char/ipmi/ipmi_msghandler.c linux-2.6.32.41/drivers/char/ipmi/ipmi_msghandler.c
26373 --- linux-2.6.32.41/drivers/char/ipmi/ipmi_msghandler.c 2011-03-27 14:31:47.000000000 -0400
26374 +++ linux-2.6.32.41/drivers/char/ipmi/ipmi_msghandler.c 2011-05-16 21:46:57.000000000 -0400
26375 @@ -414,7 +414,7 @@ struct ipmi_smi {
26376 struct proc_dir_entry *proc_dir;
26377 char proc_dir_name[10];
26378
26379 - atomic_t stats[IPMI_NUM_STATS];
26380 + atomic_unchecked_t stats[IPMI_NUM_STATS];
26381
26382 /*
26383 * run_to_completion duplicate of smb_info, smi_info
26384 @@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
26385
26386
26387 #define ipmi_inc_stat(intf, stat) \
26388 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
26389 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
26390 #define ipmi_get_stat(intf, stat) \
26391 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
26392 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
26393
26394 static int is_lan_addr(struct ipmi_addr *addr)
26395 {
26396 @@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
26397 INIT_LIST_HEAD(&intf->cmd_rcvrs);
26398 init_waitqueue_head(&intf->waitq);
26399 for (i = 0; i < IPMI_NUM_STATS; i++)
26400 - atomic_set(&intf->stats[i], 0);
26401 + atomic_set_unchecked(&intf->stats[i], 0);
26402
26403 intf->proc_dir = NULL;
26404
26405 @@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
26406 struct ipmi_smi_msg smi_msg;
26407 struct ipmi_recv_msg recv_msg;
26408
26409 + pax_track_stack();
26410 +
26411 si = (struct ipmi_system_interface_addr *) &addr;
26412 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
26413 si->channel = IPMI_BMC_CHANNEL;
26414 diff -urNp linux-2.6.32.41/drivers/char/ipmi/ipmi_si_intf.c linux-2.6.32.41/drivers/char/ipmi/ipmi_si_intf.c
26415 --- linux-2.6.32.41/drivers/char/ipmi/ipmi_si_intf.c 2011-03-27 14:31:47.000000000 -0400
26416 +++ linux-2.6.32.41/drivers/char/ipmi/ipmi_si_intf.c 2011-04-17 15:56:46.000000000 -0400
26417 @@ -277,7 +277,7 @@ struct smi_info {
26418 unsigned char slave_addr;
26419
26420 /* Counters and things for the proc filesystem. */
26421 - atomic_t stats[SI_NUM_STATS];
26422 + atomic_unchecked_t stats[SI_NUM_STATS];
26423
26424 struct task_struct *thread;
26425
26426 @@ -285,9 +285,9 @@ struct smi_info {
26427 };
26428
26429 #define smi_inc_stat(smi, stat) \
26430 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
26431 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
26432 #define smi_get_stat(smi, stat) \
26433 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
26434 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
26435
26436 #define SI_MAX_PARMS 4
26437
26438 @@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info
26439 atomic_set(&new_smi->req_events, 0);
26440 new_smi->run_to_completion = 0;
26441 for (i = 0; i < SI_NUM_STATS; i++)
26442 - atomic_set(&new_smi->stats[i], 0);
26443 + atomic_set_unchecked(&new_smi->stats[i], 0);
26444
26445 new_smi->interrupt_disabled = 0;
26446 atomic_set(&new_smi->stop_operation, 0);
26447 diff -urNp linux-2.6.32.41/drivers/char/istallion.c linux-2.6.32.41/drivers/char/istallion.c
26448 --- linux-2.6.32.41/drivers/char/istallion.c 2011-03-27 14:31:47.000000000 -0400
26449 +++ linux-2.6.32.41/drivers/char/istallion.c 2011-05-16 21:46:57.000000000 -0400
26450 @@ -187,7 +187,6 @@ static struct ktermios stli_deftermios
26451 * re-used for each stats call.
26452 */
26453 static comstats_t stli_comstats;
26454 -static combrd_t stli_brdstats;
26455 static struct asystats stli_cdkstats;
26456
26457 /*****************************************************************************/
26458 @@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __u
26459 {
26460 struct stlibrd *brdp;
26461 unsigned int i;
26462 + combrd_t stli_brdstats;
26463
26464 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
26465 return -EFAULT;
26466 @@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stl
26467 struct stliport stli_dummyport;
26468 struct stliport *portp;
26469
26470 + pax_track_stack();
26471 +
26472 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
26473 return -EFAULT;
26474 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
26475 @@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stli
26476 struct stlibrd stli_dummybrd;
26477 struct stlibrd *brdp;
26478
26479 + pax_track_stack();
26480 +
26481 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
26482 return -EFAULT;
26483 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
26484 diff -urNp linux-2.6.32.41/drivers/char/Kconfig linux-2.6.32.41/drivers/char/Kconfig
26485 --- linux-2.6.32.41/drivers/char/Kconfig 2011-03-27 14:31:47.000000000 -0400
26486 +++ linux-2.6.32.41/drivers/char/Kconfig 2011-04-18 19:20:15.000000000 -0400
26487 @@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
26488
26489 config DEVKMEM
26490 bool "/dev/kmem virtual device support"
26491 - default y
26492 + default n
26493 + depends on !GRKERNSEC_KMEM
26494 help
26495 Say Y here if you want to support the /dev/kmem device. The
26496 /dev/kmem device is rarely used, but can be used for certain
26497 @@ -1114,6 +1115,7 @@ config DEVPORT
26498 bool
26499 depends on !M68K
26500 depends on ISA || PCI
26501 + depends on !GRKERNSEC_KMEM
26502 default y
26503
26504 source "drivers/s390/char/Kconfig"
26505 diff -urNp linux-2.6.32.41/drivers/char/keyboard.c linux-2.6.32.41/drivers/char/keyboard.c
26506 --- linux-2.6.32.41/drivers/char/keyboard.c 2011-03-27 14:31:47.000000000 -0400
26507 +++ linux-2.6.32.41/drivers/char/keyboard.c 2011-04-17 15:56:46.000000000 -0400
26508 @@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, u
26509 kbd->kbdmode == VC_MEDIUMRAW) &&
26510 value != KVAL(K_SAK))
26511 return; /* SAK is allowed even in raw mode */
26512 +
26513 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
26514 + {
26515 + void *func = fn_handler[value];
26516 + if (func == fn_show_state || func == fn_show_ptregs ||
26517 + func == fn_show_mem)
26518 + return;
26519 + }
26520 +#endif
26521 +
26522 fn_handler[value](vc);
26523 }
26524
26525 @@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_
26526 .evbit = { BIT_MASK(EV_SND) },
26527 },
26528
26529 - { }, /* Terminating entry */
26530 + { 0 }, /* Terminating entry */
26531 };
26532
26533 MODULE_DEVICE_TABLE(input, kbd_ids);
26534 diff -urNp linux-2.6.32.41/drivers/char/mem.c linux-2.6.32.41/drivers/char/mem.c
26535 --- linux-2.6.32.41/drivers/char/mem.c 2011-03-27 14:31:47.000000000 -0400
26536 +++ linux-2.6.32.41/drivers/char/mem.c 2011-04-17 15:56:46.000000000 -0400
26537 @@ -18,6 +18,7 @@
26538 #include <linux/raw.h>
26539 #include <linux/tty.h>
26540 #include <linux/capability.h>
26541 +#include <linux/security.h>
26542 #include <linux/ptrace.h>
26543 #include <linux/device.h>
26544 #include <linux/highmem.h>
26545 @@ -35,6 +36,10 @@
26546 # include <linux/efi.h>
26547 #endif
26548
26549 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
26550 +extern struct file_operations grsec_fops;
26551 +#endif
26552 +
26553 static inline unsigned long size_inside_page(unsigned long start,
26554 unsigned long size)
26555 {
26556 @@ -102,9 +107,13 @@ static inline int range_is_allowed(unsig
26557
26558 while (cursor < to) {
26559 if (!devmem_is_allowed(pfn)) {
26560 +#ifdef CONFIG_GRKERNSEC_KMEM
26561 + gr_handle_mem_readwrite(from, to);
26562 +#else
26563 printk(KERN_INFO
26564 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
26565 current->comm, from, to);
26566 +#endif
26567 return 0;
26568 }
26569 cursor += PAGE_SIZE;
26570 @@ -112,6 +121,11 @@ static inline int range_is_allowed(unsig
26571 }
26572 return 1;
26573 }
26574 +#elif defined(CONFIG_GRKERNSEC_KMEM)
26575 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
26576 +{
26577 + return 0;
26578 +}
26579 #else
26580 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
26581 {
26582 @@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * fi
26583 #endif
26584
26585 while (count > 0) {
26586 + char *temp;
26587 +
26588 /*
26589 * Handle first page in case it's not aligned
26590 */
26591 @@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * fi
26592 if (!ptr)
26593 return -EFAULT;
26594
26595 - if (copy_to_user(buf, ptr, sz)) {
26596 +#ifdef CONFIG_PAX_USERCOPY
26597 + temp = kmalloc(sz, GFP_KERNEL);
26598 + if (!temp) {
26599 + unxlate_dev_mem_ptr(p, ptr);
26600 + return -ENOMEM;
26601 + }
26602 + memcpy(temp, ptr, sz);
26603 +#else
26604 + temp = ptr;
26605 +#endif
26606 +
26607 + if (copy_to_user(buf, temp, sz)) {
26608 +
26609 +#ifdef CONFIG_PAX_USERCOPY
26610 + kfree(temp);
26611 +#endif
26612 +
26613 unxlate_dev_mem_ptr(p, ptr);
26614 return -EFAULT;
26615 }
26616
26617 +#ifdef CONFIG_PAX_USERCOPY
26618 + kfree(temp);
26619 +#endif
26620 +
26621 unxlate_dev_mem_ptr(p, ptr);
26622
26623 buf += sz;
26624 @@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *fi
26625 size_t count, loff_t *ppos)
26626 {
26627 unsigned long p = *ppos;
26628 - ssize_t low_count, read, sz;
26629 + ssize_t low_count, read, sz, err = 0;
26630 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
26631 - int err = 0;
26632
26633 read = 0;
26634 if (p < (unsigned long) high_memory) {
26635 @@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *fi
26636 }
26637 #endif
26638 while (low_count > 0) {
26639 + char *temp;
26640 +
26641 sz = size_inside_page(p, low_count);
26642
26643 /*
26644 @@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *fi
26645 */
26646 kbuf = xlate_dev_kmem_ptr((char *)p);
26647
26648 - if (copy_to_user(buf, kbuf, sz))
26649 +#ifdef CONFIG_PAX_USERCOPY
26650 + temp = kmalloc(sz, GFP_KERNEL);
26651 + if (!temp)
26652 + return -ENOMEM;
26653 + memcpy(temp, kbuf, sz);
26654 +#else
26655 + temp = kbuf;
26656 +#endif
26657 +
26658 + err = copy_to_user(buf, temp, sz);
26659 +
26660 +#ifdef CONFIG_PAX_USERCOPY
26661 + kfree(temp);
26662 +#endif
26663 +
26664 + if (err)
26665 return -EFAULT;
26666 buf += sz;
26667 p += sz;
26668 @@ -889,6 +941,9 @@ static const struct memdev {
26669 #ifdef CONFIG_CRASH_DUMP
26670 [12] = { "oldmem", 0, &oldmem_fops, NULL },
26671 #endif
26672 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
26673 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
26674 +#endif
26675 };
26676
26677 static int memory_open(struct inode *inode, struct file *filp)
26678 diff -urNp linux-2.6.32.41/drivers/char/pcmcia/ipwireless/tty.c linux-2.6.32.41/drivers/char/pcmcia/ipwireless/tty.c
26679 --- linux-2.6.32.41/drivers/char/pcmcia/ipwireless/tty.c 2011-03-27 14:31:47.000000000 -0400
26680 +++ linux-2.6.32.41/drivers/char/pcmcia/ipwireless/tty.c 2011-04-17 15:56:46.000000000 -0400
26681 @@ -29,6 +29,7 @@
26682 #include <linux/tty_driver.h>
26683 #include <linux/tty_flip.h>
26684 #include <linux/uaccess.h>
26685 +#include <asm/local.h>
26686
26687 #include "tty.h"
26688 #include "network.h"
26689 @@ -51,7 +52,7 @@ struct ipw_tty {
26690 int tty_type;
26691 struct ipw_network *network;
26692 struct tty_struct *linux_tty;
26693 - int open_count;
26694 + local_t open_count;
26695 unsigned int control_lines;
26696 struct mutex ipw_tty_mutex;
26697 int tx_bytes_queued;
26698 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
26699 mutex_unlock(&tty->ipw_tty_mutex);
26700 return -ENODEV;
26701 }
26702 - if (tty->open_count == 0)
26703 + if (local_read(&tty->open_count) == 0)
26704 tty->tx_bytes_queued = 0;
26705
26706 - tty->open_count++;
26707 + local_inc(&tty->open_count);
26708
26709 tty->linux_tty = linux_tty;
26710 linux_tty->driver_data = tty;
26711 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
26712
26713 static void do_ipw_close(struct ipw_tty *tty)
26714 {
26715 - tty->open_count--;
26716 -
26717 - if (tty->open_count == 0) {
26718 + if (local_dec_return(&tty->open_count) == 0) {
26719 struct tty_struct *linux_tty = tty->linux_tty;
26720
26721 if (linux_tty != NULL) {
26722 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
26723 return;
26724
26725 mutex_lock(&tty->ipw_tty_mutex);
26726 - if (tty->open_count == 0) {
26727 + if (local_read(&tty->open_count) == 0) {
26728 mutex_unlock(&tty->ipw_tty_mutex);
26729 return;
26730 }
26731 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
26732 return;
26733 }
26734
26735 - if (!tty->open_count) {
26736 + if (!local_read(&tty->open_count)) {
26737 mutex_unlock(&tty->ipw_tty_mutex);
26738 return;
26739 }
26740 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
26741 return -ENODEV;
26742
26743 mutex_lock(&tty->ipw_tty_mutex);
26744 - if (!tty->open_count) {
26745 + if (!local_read(&tty->open_count)) {
26746 mutex_unlock(&tty->ipw_tty_mutex);
26747 return -EINVAL;
26748 }
26749 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
26750 if (!tty)
26751 return -ENODEV;
26752
26753 - if (!tty->open_count)
26754 + if (!local_read(&tty->open_count))
26755 return -EINVAL;
26756
26757 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
26758 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
26759 if (!tty)
26760 return 0;
26761
26762 - if (!tty->open_count)
26763 + if (!local_read(&tty->open_count))
26764 return 0;
26765
26766 return tty->tx_bytes_queued;
26767 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
26768 if (!tty)
26769 return -ENODEV;
26770
26771 - if (!tty->open_count)
26772 + if (!local_read(&tty->open_count))
26773 return -EINVAL;
26774
26775 return get_control_lines(tty);
26776 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
26777 if (!tty)
26778 return -ENODEV;
26779
26780 - if (!tty->open_count)
26781 + if (!local_read(&tty->open_count))
26782 return -EINVAL;
26783
26784 return set_control_lines(tty, set, clear);
26785 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
26786 if (!tty)
26787 return -ENODEV;
26788
26789 - if (!tty->open_count)
26790 + if (!local_read(&tty->open_count))
26791 return -EINVAL;
26792
26793 /* FIXME: Exactly how is the tty object locked here .. */
26794 @@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty
26795 against a parallel ioctl etc */
26796 mutex_lock(&ttyj->ipw_tty_mutex);
26797 }
26798 - while (ttyj->open_count)
26799 + while (local_read(&ttyj->open_count))
26800 do_ipw_close(ttyj);
26801 ipwireless_disassociate_network_ttys(network,
26802 ttyj->channel_idx);
26803 diff -urNp linux-2.6.32.41/drivers/char/pty.c linux-2.6.32.41/drivers/char/pty.c
26804 --- linux-2.6.32.41/drivers/char/pty.c 2011-03-27 14:31:47.000000000 -0400
26805 +++ linux-2.6.32.41/drivers/char/pty.c 2011-04-17 15:56:46.000000000 -0400
26806 @@ -682,7 +682,18 @@ static int ptmx_open(struct inode *inode
26807 return ret;
26808 }
26809
26810 -static struct file_operations ptmx_fops;
26811 +static const struct file_operations ptmx_fops = {
26812 + .llseek = no_llseek,
26813 + .read = tty_read,
26814 + .write = tty_write,
26815 + .poll = tty_poll,
26816 + .unlocked_ioctl = tty_ioctl,
26817 + .compat_ioctl = tty_compat_ioctl,
26818 + .open = ptmx_open,
26819 + .release = tty_release,
26820 + .fasync = tty_fasync,
26821 +};
26822 +
26823
26824 static void __init unix98_pty_init(void)
26825 {
26826 @@ -736,9 +747,6 @@ static void __init unix98_pty_init(void)
26827 register_sysctl_table(pty_root_table);
26828
26829 /* Now create the /dev/ptmx special device */
26830 - tty_default_fops(&ptmx_fops);
26831 - ptmx_fops.open = ptmx_open;
26832 -
26833 cdev_init(&ptmx_cdev, &ptmx_fops);
26834 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
26835 register_chrdev_region(MKDEV(TTYAUX_MAJOR, 2), 1, "/dev/ptmx") < 0)
26836 diff -urNp linux-2.6.32.41/drivers/char/random.c linux-2.6.32.41/drivers/char/random.c
26837 --- linux-2.6.32.41/drivers/char/random.c 2011-03-27 14:31:47.000000000 -0400
26838 +++ linux-2.6.32.41/drivers/char/random.c 2011-04-17 15:56:46.000000000 -0400
26839 @@ -254,8 +254,13 @@
26840 /*
26841 * Configuration information
26842 */
26843 +#ifdef CONFIG_GRKERNSEC_RANDNET
26844 +#define INPUT_POOL_WORDS 512
26845 +#define OUTPUT_POOL_WORDS 128
26846 +#else
26847 #define INPUT_POOL_WORDS 128
26848 #define OUTPUT_POOL_WORDS 32
26849 +#endif
26850 #define SEC_XFER_SIZE 512
26851
26852 /*
26853 @@ -292,10 +297,17 @@ static struct poolinfo {
26854 int poolwords;
26855 int tap1, tap2, tap3, tap4, tap5;
26856 } poolinfo_table[] = {
26857 +#ifdef CONFIG_GRKERNSEC_RANDNET
26858 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
26859 + { 512, 411, 308, 208, 104, 1 },
26860 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
26861 + { 128, 103, 76, 51, 25, 1 },
26862 +#else
26863 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
26864 { 128, 103, 76, 51, 25, 1 },
26865 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
26866 { 32, 26, 20, 14, 7, 1 },
26867 +#endif
26868 #if 0
26869 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
26870 { 2048, 1638, 1231, 819, 411, 1 },
26871 @@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
26872 #include <linux/sysctl.h>
26873
26874 static int min_read_thresh = 8, min_write_thresh;
26875 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
26876 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
26877 static int max_write_thresh = INPUT_POOL_WORDS * 32;
26878 static char sysctl_bootid[16];
26879
26880 diff -urNp linux-2.6.32.41/drivers/char/rocket.c linux-2.6.32.41/drivers/char/rocket.c
26881 --- linux-2.6.32.41/drivers/char/rocket.c 2011-03-27 14:31:47.000000000 -0400
26882 +++ linux-2.6.32.41/drivers/char/rocket.c 2011-05-16 21:46:57.000000000 -0400
26883 @@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info
26884 struct rocket_ports tmp;
26885 int board;
26886
26887 + pax_track_stack();
26888 +
26889 if (!retports)
26890 return -EFAULT;
26891 memset(&tmp, 0, sizeof (tmp));
26892 diff -urNp linux-2.6.32.41/drivers/char/sonypi.c linux-2.6.32.41/drivers/char/sonypi.c
26893 --- linux-2.6.32.41/drivers/char/sonypi.c 2011-03-27 14:31:47.000000000 -0400
26894 +++ linux-2.6.32.41/drivers/char/sonypi.c 2011-04-17 15:56:46.000000000 -0400
26895 @@ -55,6 +55,7 @@
26896 #include <asm/uaccess.h>
26897 #include <asm/io.h>
26898 #include <asm/system.h>
26899 +#include <asm/local.h>
26900
26901 #include <linux/sonypi.h>
26902
26903 @@ -491,7 +492,7 @@ static struct sonypi_device {
26904 spinlock_t fifo_lock;
26905 wait_queue_head_t fifo_proc_list;
26906 struct fasync_struct *fifo_async;
26907 - int open_count;
26908 + local_t open_count;
26909 int model;
26910 struct input_dev *input_jog_dev;
26911 struct input_dev *input_key_dev;
26912 @@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, st
26913 static int sonypi_misc_release(struct inode *inode, struct file *file)
26914 {
26915 mutex_lock(&sonypi_device.lock);
26916 - sonypi_device.open_count--;
26917 + local_dec(&sonypi_device.open_count);
26918 mutex_unlock(&sonypi_device.lock);
26919 return 0;
26920 }
26921 @@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode
26922 lock_kernel();
26923 mutex_lock(&sonypi_device.lock);
26924 /* Flush input queue on first open */
26925 - if (!sonypi_device.open_count)
26926 + if (!local_read(&sonypi_device.open_count))
26927 kfifo_reset(sonypi_device.fifo);
26928 - sonypi_device.open_count++;
26929 + local_inc(&sonypi_device.open_count);
26930 mutex_unlock(&sonypi_device.lock);
26931 unlock_kernel();
26932 return 0;
26933 diff -urNp linux-2.6.32.41/drivers/char/stallion.c linux-2.6.32.41/drivers/char/stallion.c
26934 --- linux-2.6.32.41/drivers/char/stallion.c 2011-03-27 14:31:47.000000000 -0400
26935 +++ linux-2.6.32.41/drivers/char/stallion.c 2011-05-16 21:46:57.000000000 -0400
26936 @@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlp
26937 struct stlport stl_dummyport;
26938 struct stlport *portp;
26939
26940 + pax_track_stack();
26941 +
26942 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
26943 return -EFAULT;
26944 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
26945 diff -urNp linux-2.6.32.41/drivers/char/tpm/tpm_bios.c linux-2.6.32.41/drivers/char/tpm/tpm_bios.c
26946 --- linux-2.6.32.41/drivers/char/tpm/tpm_bios.c 2011-03-27 14:31:47.000000000 -0400
26947 +++ linux-2.6.32.41/drivers/char/tpm/tpm_bios.c 2011-04-17 15:56:46.000000000 -0400
26948 @@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start
26949 event = addr;
26950
26951 if ((event->event_type == 0 && event->event_size == 0) ||
26952 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
26953 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
26954 return NULL;
26955
26956 return addr;
26957 @@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(
26958 return NULL;
26959
26960 if ((event->event_type == 0 && event->event_size == 0) ||
26961 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
26962 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
26963 return NULL;
26964
26965 (*pos)++;
26966 @@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_
26967 int i;
26968
26969 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
26970 - seq_putc(m, data[i]);
26971 + if (!seq_putc(m, data[i]))
26972 + return -EFAULT;
26973
26974 return 0;
26975 }
26976 @@ -409,6 +410,11 @@ static int read_log(struct tpm_bios_log
26977 log->bios_event_log_end = log->bios_event_log + len;
26978
26979 virt = acpi_os_map_memory(start, len);
26980 + if (!virt) {
26981 + kfree(log->bios_event_log);
26982 + log->bios_event_log = NULL;
26983 + return -EFAULT;
26984 + }
26985
26986 memcpy(log->bios_event_log, virt, len);
26987
26988 diff -urNp linux-2.6.32.41/drivers/char/tpm/tpm.c linux-2.6.32.41/drivers/char/tpm/tpm.c
26989 --- linux-2.6.32.41/drivers/char/tpm/tpm.c 2011-04-17 17:00:52.000000000 -0400
26990 +++ linux-2.6.32.41/drivers/char/tpm/tpm.c 2011-05-16 21:46:57.000000000 -0400
26991 @@ -402,7 +402,7 @@ static ssize_t tpm_transmit(struct tpm_c
26992 chip->vendor.req_complete_val)
26993 goto out_recv;
26994
26995 - if ((status == chip->vendor.req_canceled)) {
26996 + if (status == chip->vendor.req_canceled) {
26997 dev_err(chip->dev, "Operation Canceled\n");
26998 rc = -ECANCELED;
26999 goto out;
27000 @@ -821,6 +821,8 @@ ssize_t tpm_show_pubek(struct device *de
27001
27002 struct tpm_chip *chip = dev_get_drvdata(dev);
27003
27004 + pax_track_stack();
27005 +
27006 tpm_cmd.header.in = tpm_readpubek_header;
27007 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
27008 "attempting to read the PUBEK");
27009 diff -urNp linux-2.6.32.41/drivers/char/tty_io.c linux-2.6.32.41/drivers/char/tty_io.c
27010 --- linux-2.6.32.41/drivers/char/tty_io.c 2011-03-27 14:31:47.000000000 -0400
27011 +++ linux-2.6.32.41/drivers/char/tty_io.c 2011-04-17 15:56:46.000000000 -0400
27012 @@ -136,21 +136,10 @@ LIST_HEAD(tty_drivers); /* linked list
27013 DEFINE_MUTEX(tty_mutex);
27014 EXPORT_SYMBOL(tty_mutex);
27015
27016 -static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
27017 -static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
27018 ssize_t redirected_tty_write(struct file *, const char __user *,
27019 size_t, loff_t *);
27020 -static unsigned int tty_poll(struct file *, poll_table *);
27021 static int tty_open(struct inode *, struct file *);
27022 -static int tty_release(struct inode *, struct file *);
27023 long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
27024 -#ifdef CONFIG_COMPAT
27025 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
27026 - unsigned long arg);
27027 -#else
27028 -#define tty_compat_ioctl NULL
27029 -#endif
27030 -static int tty_fasync(int fd, struct file *filp, int on);
27031 static void release_tty(struct tty_struct *tty, int idx);
27032 static void __proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
27033 static void proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
27034 @@ -870,7 +859,7 @@ EXPORT_SYMBOL(start_tty);
27035 * read calls may be outstanding in parallel.
27036 */
27037
27038 -static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
27039 +ssize_t tty_read(struct file *file, char __user *buf, size_t count,
27040 loff_t *ppos)
27041 {
27042 int i;
27043 @@ -898,6 +887,8 @@ static ssize_t tty_read(struct file *fil
27044 return i;
27045 }
27046
27047 +EXPORT_SYMBOL(tty_read);
27048 +
27049 void tty_write_unlock(struct tty_struct *tty)
27050 {
27051 mutex_unlock(&tty->atomic_write_lock);
27052 @@ -1045,7 +1036,7 @@ void tty_write_message(struct tty_struct
27053 * write method will not be invoked in parallel for each device.
27054 */
27055
27056 -static ssize_t tty_write(struct file *file, const char __user *buf,
27057 +ssize_t tty_write(struct file *file, const char __user *buf,
27058 size_t count, loff_t *ppos)
27059 {
27060 struct tty_struct *tty;
27061 @@ -1072,6 +1063,8 @@ static ssize_t tty_write(struct file *fi
27062 return ret;
27063 }
27064
27065 +EXPORT_SYMBOL(tty_write);
27066 +
27067 ssize_t redirected_tty_write(struct file *file, const char __user *buf,
27068 size_t count, loff_t *ppos)
27069 {
27070 @@ -1867,7 +1860,7 @@ static int tty_open(struct inode *inode,
27071 * Takes bkl. See tty_release_dev
27072 */
27073
27074 -static int tty_release(struct inode *inode, struct file *filp)
27075 +int tty_release(struct inode *inode, struct file *filp)
27076 {
27077 lock_kernel();
27078 tty_release_dev(filp);
27079 @@ -1875,6 +1868,8 @@ static int tty_release(struct inode *ino
27080 return 0;
27081 }
27082
27083 +EXPORT_SYMBOL(tty_release);
27084 +
27085 /**
27086 * tty_poll - check tty status
27087 * @filp: file being polled
27088 @@ -1887,7 +1882,7 @@ static int tty_release(struct inode *ino
27089 * may be re-entered freely by other callers.
27090 */
27091
27092 -static unsigned int tty_poll(struct file *filp, poll_table *wait)
27093 +unsigned int tty_poll(struct file *filp, poll_table *wait)
27094 {
27095 struct tty_struct *tty;
27096 struct tty_ldisc *ld;
27097 @@ -1904,7 +1899,9 @@ static unsigned int tty_poll(struct file
27098 return ret;
27099 }
27100
27101 -static int tty_fasync(int fd, struct file *filp, int on)
27102 +EXPORT_SYMBOL(tty_poll);
27103 +
27104 +int tty_fasync(int fd, struct file *filp, int on)
27105 {
27106 struct tty_struct *tty;
27107 unsigned long flags;
27108 @@ -1948,6 +1945,8 @@ out:
27109 return retval;
27110 }
27111
27112 +EXPORT_SYMBOL(tty_fasync);
27113 +
27114 /**
27115 * tiocsti - fake input character
27116 * @tty: tty to fake input into
27117 @@ -2582,8 +2581,10 @@ long tty_ioctl(struct file *file, unsign
27118 return retval;
27119 }
27120
27121 +EXPORT_SYMBOL(tty_ioctl);
27122 +
27123 #ifdef CONFIG_COMPAT
27124 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
27125 +long tty_compat_ioctl(struct file *file, unsigned int cmd,
27126 unsigned long arg)
27127 {
27128 struct inode *inode = file->f_dentry->d_inode;
27129 @@ -2607,6 +2608,8 @@ static long tty_compat_ioctl(struct file
27130
27131 return retval;
27132 }
27133 +
27134 +EXPORT_SYMBOL(tty_compat_ioctl);
27135 #endif
27136
27137 /*
27138 @@ -3050,11 +3053,6 @@ struct tty_struct *get_current_tty(void)
27139 }
27140 EXPORT_SYMBOL_GPL(get_current_tty);
27141
27142 -void tty_default_fops(struct file_operations *fops)
27143 -{
27144 - *fops = tty_fops;
27145 -}
27146 -
27147 /*
27148 * Initialize the console device. This is called *early*, so
27149 * we can't necessarily depend on lots of kernel help here.
27150 diff -urNp linux-2.6.32.41/drivers/char/tty_ldisc.c linux-2.6.32.41/drivers/char/tty_ldisc.c
27151 --- linux-2.6.32.41/drivers/char/tty_ldisc.c 2011-03-27 14:31:47.000000000 -0400
27152 +++ linux-2.6.32.41/drivers/char/tty_ldisc.c 2011-04-17 15:56:46.000000000 -0400
27153 @@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
27154 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
27155 struct tty_ldisc_ops *ldo = ld->ops;
27156
27157 - ldo->refcount--;
27158 + atomic_dec(&ldo->refcount);
27159 module_put(ldo->owner);
27160 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
27161
27162 @@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
27163 spin_lock_irqsave(&tty_ldisc_lock, flags);
27164 tty_ldiscs[disc] = new_ldisc;
27165 new_ldisc->num = disc;
27166 - new_ldisc->refcount = 0;
27167 + atomic_set(&new_ldisc->refcount, 0);
27168 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
27169
27170 return ret;
27171 @@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
27172 return -EINVAL;
27173
27174 spin_lock_irqsave(&tty_ldisc_lock, flags);
27175 - if (tty_ldiscs[disc]->refcount)
27176 + if (atomic_read(&tty_ldiscs[disc]->refcount))
27177 ret = -EBUSY;
27178 else
27179 tty_ldiscs[disc] = NULL;
27180 @@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
27181 if (ldops) {
27182 ret = ERR_PTR(-EAGAIN);
27183 if (try_module_get(ldops->owner)) {
27184 - ldops->refcount++;
27185 + atomic_inc(&ldops->refcount);
27186 ret = ldops;
27187 }
27188 }
27189 @@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
27190 unsigned long flags;
27191
27192 spin_lock_irqsave(&tty_ldisc_lock, flags);
27193 - ldops->refcount--;
27194 + atomic_dec(&ldops->refcount);
27195 module_put(ldops->owner);
27196 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
27197 }
27198 diff -urNp linux-2.6.32.41/drivers/char/virtio_console.c linux-2.6.32.41/drivers/char/virtio_console.c
27199 --- linux-2.6.32.41/drivers/char/virtio_console.c 2011-03-27 14:31:47.000000000 -0400
27200 +++ linux-2.6.32.41/drivers/char/virtio_console.c 2011-04-17 15:56:46.000000000 -0400
27201 @@ -44,6 +44,7 @@ static unsigned int in_len;
27202 static char *in, *inbuf;
27203
27204 /* The operations for our console. */
27205 +/* cannot be const */
27206 static struct hv_ops virtio_cons;
27207
27208 /* The hvc device */
27209 diff -urNp linux-2.6.32.41/drivers/char/vt.c linux-2.6.32.41/drivers/char/vt.c
27210 --- linux-2.6.32.41/drivers/char/vt.c 2011-03-27 14:31:47.000000000 -0400
27211 +++ linux-2.6.32.41/drivers/char/vt.c 2011-04-17 15:56:46.000000000 -0400
27212 @@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
27213
27214 static void notify_write(struct vc_data *vc, unsigned int unicode)
27215 {
27216 - struct vt_notifier_param param = { .vc = vc, unicode = unicode };
27217 + struct vt_notifier_param param = { .vc = vc, .c = unicode };
27218 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
27219 }
27220
27221 diff -urNp linux-2.6.32.41/drivers/char/vt_ioctl.c linux-2.6.32.41/drivers/char/vt_ioctl.c
27222 --- linux-2.6.32.41/drivers/char/vt_ioctl.c 2011-03-27 14:31:47.000000000 -0400
27223 +++ linux-2.6.32.41/drivers/char/vt_ioctl.c 2011-04-17 15:56:46.000000000 -0400
27224 @@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
27225 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
27226 return -EFAULT;
27227
27228 - if (!capable(CAP_SYS_TTY_CONFIG))
27229 - perm = 0;
27230 -
27231 switch (cmd) {
27232 case KDGKBENT:
27233 key_map = key_maps[s];
27234 @@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __
27235 val = (i ? K_HOLE : K_NOSUCHMAP);
27236 return put_user(val, &user_kbe->kb_value);
27237 case KDSKBENT:
27238 + if (!capable(CAP_SYS_TTY_CONFIG))
27239 + perm = 0;
27240 +
27241 if (!perm)
27242 return -EPERM;
27243 +
27244 if (!i && v == K_NOSUCHMAP) {
27245 /* deallocate map */
27246 key_map = key_maps[s];
27247 @@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
27248 int i, j, k;
27249 int ret;
27250
27251 - if (!capable(CAP_SYS_TTY_CONFIG))
27252 - perm = 0;
27253 -
27254 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
27255 if (!kbs) {
27256 ret = -ENOMEM;
27257 @@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
27258 kfree(kbs);
27259 return ((p && *p) ? -EOVERFLOW : 0);
27260 case KDSKBSENT:
27261 + if (!capable(CAP_SYS_TTY_CONFIG))
27262 + perm = 0;
27263 +
27264 if (!perm) {
27265 ret = -EPERM;
27266 goto reterr;
27267 diff -urNp linux-2.6.32.41/drivers/cpufreq/cpufreq.c linux-2.6.32.41/drivers/cpufreq/cpufreq.c
27268 --- linux-2.6.32.41/drivers/cpufreq/cpufreq.c 2011-03-27 14:31:47.000000000 -0400
27269 +++ linux-2.6.32.41/drivers/cpufreq/cpufreq.c 2011-04-17 15:56:46.000000000 -0400
27270 @@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct
27271 complete(&policy->kobj_unregister);
27272 }
27273
27274 -static struct sysfs_ops sysfs_ops = {
27275 +static const struct sysfs_ops sysfs_ops = {
27276 .show = show,
27277 .store = store,
27278 };
27279 diff -urNp linux-2.6.32.41/drivers/cpuidle/sysfs.c linux-2.6.32.41/drivers/cpuidle/sysfs.c
27280 --- linux-2.6.32.41/drivers/cpuidle/sysfs.c 2011-03-27 14:31:47.000000000 -0400
27281 +++ linux-2.6.32.41/drivers/cpuidle/sysfs.c 2011-04-17 15:56:46.000000000 -0400
27282 @@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobj
27283 return ret;
27284 }
27285
27286 -static struct sysfs_ops cpuidle_sysfs_ops = {
27287 +static const struct sysfs_ops cpuidle_sysfs_ops = {
27288 .show = cpuidle_show,
27289 .store = cpuidle_store,
27290 };
27291 @@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct
27292 return ret;
27293 }
27294
27295 -static struct sysfs_ops cpuidle_state_sysfs_ops = {
27296 +static const struct sysfs_ops cpuidle_state_sysfs_ops = {
27297 .show = cpuidle_state_show,
27298 };
27299
27300 @@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpui
27301 .release = cpuidle_state_sysfs_release,
27302 };
27303
27304 -static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
27305 +static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
27306 {
27307 kobject_put(&device->kobjs[i]->kobj);
27308 wait_for_completion(&device->kobjs[i]->kobj_unregister);
27309 diff -urNp linux-2.6.32.41/drivers/crypto/hifn_795x.c linux-2.6.32.41/drivers/crypto/hifn_795x.c
27310 --- linux-2.6.32.41/drivers/crypto/hifn_795x.c 2011-03-27 14:31:47.000000000 -0400
27311 +++ linux-2.6.32.41/drivers/crypto/hifn_795x.c 2011-05-16 21:46:57.000000000 -0400
27312 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
27313 0xCA, 0x34, 0x2B, 0x2E};
27314 struct scatterlist sg;
27315
27316 + pax_track_stack();
27317 +
27318 memset(src, 0, sizeof(src));
27319 memset(ctx.key, 0, sizeof(ctx.key));
27320
27321 diff -urNp linux-2.6.32.41/drivers/crypto/padlock-aes.c linux-2.6.32.41/drivers/crypto/padlock-aes.c
27322 --- linux-2.6.32.41/drivers/crypto/padlock-aes.c 2011-03-27 14:31:47.000000000 -0400
27323 +++ linux-2.6.32.41/drivers/crypto/padlock-aes.c 2011-05-16 21:46:57.000000000 -0400
27324 @@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm
27325 struct crypto_aes_ctx gen_aes;
27326 int cpu;
27327
27328 + pax_track_stack();
27329 +
27330 if (key_len % 8) {
27331 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
27332 return -EINVAL;
27333 diff -urNp linux-2.6.32.41/drivers/dma/ioat/dma.c linux-2.6.32.41/drivers/dma/ioat/dma.c
27334 --- linux-2.6.32.41/drivers/dma/ioat/dma.c 2011-03-27 14:31:47.000000000 -0400
27335 +++ linux-2.6.32.41/drivers/dma/ioat/dma.c 2011-04-17 15:56:46.000000000 -0400
27336 @@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, str
27337 return entry->show(&chan->common, page);
27338 }
27339
27340 -struct sysfs_ops ioat_sysfs_ops = {
27341 +const struct sysfs_ops ioat_sysfs_ops = {
27342 .show = ioat_attr_show,
27343 };
27344
27345 diff -urNp linux-2.6.32.41/drivers/dma/ioat/dma.h linux-2.6.32.41/drivers/dma/ioat/dma.h
27346 --- linux-2.6.32.41/drivers/dma/ioat/dma.h 2011-03-27 14:31:47.000000000 -0400
27347 +++ linux-2.6.32.41/drivers/dma/ioat/dma.h 2011-04-17 15:56:46.000000000 -0400
27348 @@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_c
27349 unsigned long *phys_complete);
27350 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
27351 void ioat_kobject_del(struct ioatdma_device *device);
27352 -extern struct sysfs_ops ioat_sysfs_ops;
27353 +extern const struct sysfs_ops ioat_sysfs_ops;
27354 extern struct ioat_sysfs_entry ioat_version_attr;
27355 extern struct ioat_sysfs_entry ioat_cap_attr;
27356 #endif /* IOATDMA_H */
27357 diff -urNp linux-2.6.32.41/drivers/edac/edac_device_sysfs.c linux-2.6.32.41/drivers/edac/edac_device_sysfs.c
27358 --- linux-2.6.32.41/drivers/edac/edac_device_sysfs.c 2011-03-27 14:31:47.000000000 -0400
27359 +++ linux-2.6.32.41/drivers/edac/edac_device_sysfs.c 2011-04-17 15:56:46.000000000 -0400
27360 @@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(s
27361 }
27362
27363 /* edac_dev file operations for an 'ctl_info' */
27364 -static struct sysfs_ops device_ctl_info_ops = {
27365 +static const struct sysfs_ops device_ctl_info_ops = {
27366 .show = edac_dev_ctl_info_show,
27367 .store = edac_dev_ctl_info_store
27368 };
27369 @@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(s
27370 }
27371
27372 /* edac_dev file operations for an 'instance' */
27373 -static struct sysfs_ops device_instance_ops = {
27374 +static const struct sysfs_ops device_instance_ops = {
27375 .show = edac_dev_instance_show,
27376 .store = edac_dev_instance_store
27377 };
27378 @@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(stru
27379 }
27380
27381 /* edac_dev file operations for a 'block' */
27382 -static struct sysfs_ops device_block_ops = {
27383 +static const struct sysfs_ops device_block_ops = {
27384 .show = edac_dev_block_show,
27385 .store = edac_dev_block_store
27386 };
27387 diff -urNp linux-2.6.32.41/drivers/edac/edac_mc_sysfs.c linux-2.6.32.41/drivers/edac/edac_mc_sysfs.c
27388 --- linux-2.6.32.41/drivers/edac/edac_mc_sysfs.c 2011-03-27 14:31:47.000000000 -0400
27389 +++ linux-2.6.32.41/drivers/edac/edac_mc_sysfs.c 2011-04-17 15:56:46.000000000 -0400
27390 @@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kob
27391 return -EIO;
27392 }
27393
27394 -static struct sysfs_ops csrowfs_ops = {
27395 +static const struct sysfs_ops csrowfs_ops = {
27396 .show = csrowdev_show,
27397 .store = csrowdev_store
27398 };
27399 @@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobje
27400 }
27401
27402 /* Intermediate show/store table */
27403 -static struct sysfs_ops mci_ops = {
27404 +static const struct sysfs_ops mci_ops = {
27405 .show = mcidev_show,
27406 .store = mcidev_store
27407 };
27408 diff -urNp linux-2.6.32.41/drivers/edac/edac_pci_sysfs.c linux-2.6.32.41/drivers/edac/edac_pci_sysfs.c
27409 --- linux-2.6.32.41/drivers/edac/edac_pci_sysfs.c 2011-03-27 14:31:47.000000000 -0400
27410 +++ linux-2.6.32.41/drivers/edac/edac_pci_sysfs.c 2011-05-04 17:56:20.000000000 -0400
27411 @@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log
27412 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
27413 static int edac_pci_poll_msec = 1000; /* one second workq period */
27414
27415 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
27416 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
27417 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
27418 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
27419
27420 static struct kobject *edac_pci_top_main_kobj;
27421 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
27422 @@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(s
27423 }
27424
27425 /* fs_ops table */
27426 -static struct sysfs_ops pci_instance_ops = {
27427 +static const struct sysfs_ops pci_instance_ops = {
27428 .show = edac_pci_instance_show,
27429 .store = edac_pci_instance_store
27430 };
27431 @@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct
27432 return -EIO;
27433 }
27434
27435 -static struct sysfs_ops edac_pci_sysfs_ops = {
27436 +static const struct sysfs_ops edac_pci_sysfs_ops = {
27437 .show = edac_pci_dev_show,
27438 .store = edac_pci_dev_store
27439 };
27440 @@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(str
27441 edac_printk(KERN_CRIT, EDAC_PCI,
27442 "Signaled System Error on %s\n",
27443 pci_name(dev));
27444 - atomic_inc(&pci_nonparity_count);
27445 + atomic_inc_unchecked(&pci_nonparity_count);
27446 }
27447
27448 if (status & (PCI_STATUS_PARITY)) {
27449 @@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(str
27450 "Master Data Parity Error on %s\n",
27451 pci_name(dev));
27452
27453 - atomic_inc(&pci_parity_count);
27454 + atomic_inc_unchecked(&pci_parity_count);
27455 }
27456
27457 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27458 @@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(str
27459 "Detected Parity Error on %s\n",
27460 pci_name(dev));
27461
27462 - atomic_inc(&pci_parity_count);
27463 + atomic_inc_unchecked(&pci_parity_count);
27464 }
27465 }
27466
27467 @@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(str
27468 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
27469 "Signaled System Error on %s\n",
27470 pci_name(dev));
27471 - atomic_inc(&pci_nonparity_count);
27472 + atomic_inc_unchecked(&pci_nonparity_count);
27473 }
27474
27475 if (status & (PCI_STATUS_PARITY)) {
27476 @@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(str
27477 "Master Data Parity Error on "
27478 "%s\n", pci_name(dev));
27479
27480 - atomic_inc(&pci_parity_count);
27481 + atomic_inc_unchecked(&pci_parity_count);
27482 }
27483
27484 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27485 @@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(str
27486 "Detected Parity Error on %s\n",
27487 pci_name(dev));
27488
27489 - atomic_inc(&pci_parity_count);
27490 + atomic_inc_unchecked(&pci_parity_count);
27491 }
27492 }
27493 }
27494 @@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
27495 if (!check_pci_errors)
27496 return;
27497
27498 - before_count = atomic_read(&pci_parity_count);
27499 + before_count = atomic_read_unchecked(&pci_parity_count);
27500
27501 /* scan all PCI devices looking for a Parity Error on devices and
27502 * bridges.
27503 @@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
27504 /* Only if operator has selected panic on PCI Error */
27505 if (edac_pci_get_panic_on_pe()) {
27506 /* If the count is different 'after' from 'before' */
27507 - if (before_count != atomic_read(&pci_parity_count))
27508 + if (before_count != atomic_read_unchecked(&pci_parity_count))
27509 panic("EDAC: PCI Parity Error");
27510 }
27511 }
27512 diff -urNp linux-2.6.32.41/drivers/firewire/core-cdev.c linux-2.6.32.41/drivers/firewire/core-cdev.c
27513 --- linux-2.6.32.41/drivers/firewire/core-cdev.c 2011-03-27 14:31:47.000000000 -0400
27514 +++ linux-2.6.32.41/drivers/firewire/core-cdev.c 2011-04-17 15:56:46.000000000 -0400
27515 @@ -1141,8 +1141,7 @@ static int init_iso_resource(struct clie
27516 int ret;
27517
27518 if ((request->channels == 0 && request->bandwidth == 0) ||
27519 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
27520 - request->bandwidth < 0)
27521 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
27522 return -EINVAL;
27523
27524 r = kmalloc(sizeof(*r), GFP_KERNEL);
27525 diff -urNp linux-2.6.32.41/drivers/firewire/core-transaction.c linux-2.6.32.41/drivers/firewire/core-transaction.c
27526 --- linux-2.6.32.41/drivers/firewire/core-transaction.c 2011-03-27 14:31:47.000000000 -0400
27527 +++ linux-2.6.32.41/drivers/firewire/core-transaction.c 2011-05-16 21:46:57.000000000 -0400
27528 @@ -36,6 +36,7 @@
27529 #include <linux/string.h>
27530 #include <linux/timer.h>
27531 #include <linux/types.h>
27532 +#include <linux/sched.h>
27533
27534 #include <asm/byteorder.h>
27535
27536 @@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *c
27537 struct transaction_callback_data d;
27538 struct fw_transaction t;
27539
27540 + pax_track_stack();
27541 +
27542 init_completion(&d.done);
27543 d.payload = payload;
27544 fw_send_request(card, &t, tcode, destination_id, generation, speed,
27545 diff -urNp linux-2.6.32.41/drivers/firmware/dmi_scan.c linux-2.6.32.41/drivers/firmware/dmi_scan.c
27546 --- linux-2.6.32.41/drivers/firmware/dmi_scan.c 2011-03-27 14:31:47.000000000 -0400
27547 +++ linux-2.6.32.41/drivers/firmware/dmi_scan.c 2011-04-17 15:56:46.000000000 -0400
27548 @@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
27549 }
27550 }
27551 else {
27552 - /*
27553 - * no iounmap() for that ioremap(); it would be a no-op, but
27554 - * it's so early in setup that sucker gets confused into doing
27555 - * what it shouldn't if we actually call it.
27556 - */
27557 p = dmi_ioremap(0xF0000, 0x10000);
27558 if (p == NULL)
27559 goto error;
27560 diff -urNp linux-2.6.32.41/drivers/firmware/edd.c linux-2.6.32.41/drivers/firmware/edd.c
27561 --- linux-2.6.32.41/drivers/firmware/edd.c 2011-03-27 14:31:47.000000000 -0400
27562 +++ linux-2.6.32.41/drivers/firmware/edd.c 2011-04-17 15:56:46.000000000 -0400
27563 @@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, str
27564 return ret;
27565 }
27566
27567 -static struct sysfs_ops edd_attr_ops = {
27568 +static const struct sysfs_ops edd_attr_ops = {
27569 .show = edd_attr_show,
27570 };
27571
27572 diff -urNp linux-2.6.32.41/drivers/firmware/efivars.c linux-2.6.32.41/drivers/firmware/efivars.c
27573 --- linux-2.6.32.41/drivers/firmware/efivars.c 2011-03-27 14:31:47.000000000 -0400
27574 +++ linux-2.6.32.41/drivers/firmware/efivars.c 2011-04-17 15:56:46.000000000 -0400
27575 @@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct
27576 return ret;
27577 }
27578
27579 -static struct sysfs_ops efivar_attr_ops = {
27580 +static const struct sysfs_ops efivar_attr_ops = {
27581 .show = efivar_attr_show,
27582 .store = efivar_attr_store,
27583 };
27584 diff -urNp linux-2.6.32.41/drivers/firmware/iscsi_ibft.c linux-2.6.32.41/drivers/firmware/iscsi_ibft.c
27585 --- linux-2.6.32.41/drivers/firmware/iscsi_ibft.c 2011-03-27 14:31:47.000000000 -0400
27586 +++ linux-2.6.32.41/drivers/firmware/iscsi_ibft.c 2011-04-17 15:56:46.000000000 -0400
27587 @@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struc
27588 return ret;
27589 }
27590
27591 -static struct sysfs_ops ibft_attr_ops = {
27592 +static const struct sysfs_ops ibft_attr_ops = {
27593 .show = ibft_show_attribute,
27594 };
27595
27596 diff -urNp linux-2.6.32.41/drivers/firmware/memmap.c linux-2.6.32.41/drivers/firmware/memmap.c
27597 --- linux-2.6.32.41/drivers/firmware/memmap.c 2011-03-27 14:31:47.000000000 -0400
27598 +++ linux-2.6.32.41/drivers/firmware/memmap.c 2011-04-17 15:56:46.000000000 -0400
27599 @@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
27600 NULL
27601 };
27602
27603 -static struct sysfs_ops memmap_attr_ops = {
27604 +static const struct sysfs_ops memmap_attr_ops = {
27605 .show = memmap_attr_show,
27606 };
27607
27608 diff -urNp linux-2.6.32.41/drivers/gpio/vr41xx_giu.c linux-2.6.32.41/drivers/gpio/vr41xx_giu.c
27609 --- linux-2.6.32.41/drivers/gpio/vr41xx_giu.c 2011-03-27 14:31:47.000000000 -0400
27610 +++ linux-2.6.32.41/drivers/gpio/vr41xx_giu.c 2011-05-04 17:56:28.000000000 -0400
27611 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
27612 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
27613 maskl, pendl, maskh, pendh);
27614
27615 - atomic_inc(&irq_err_count);
27616 + atomic_inc_unchecked(&irq_err_count);
27617
27618 return -EINVAL;
27619 }
27620 diff -urNp linux-2.6.32.41/drivers/gpu/drm/drm_crtc_helper.c linux-2.6.32.41/drivers/gpu/drm/drm_crtc_helper.c
27621 --- linux-2.6.32.41/drivers/gpu/drm/drm_crtc_helper.c 2011-03-27 14:31:47.000000000 -0400
27622 +++ linux-2.6.32.41/drivers/gpu/drm/drm_crtc_helper.c 2011-05-16 21:46:57.000000000 -0400
27623 @@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct d
27624 struct drm_crtc *tmp;
27625 int crtc_mask = 1;
27626
27627 - WARN(!crtc, "checking null crtc?");
27628 + BUG_ON(!crtc);
27629
27630 dev = crtc->dev;
27631
27632 @@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm
27633
27634 adjusted_mode = drm_mode_duplicate(dev, mode);
27635
27636 + pax_track_stack();
27637 +
27638 crtc->enabled = drm_helper_crtc_in_use(crtc);
27639
27640 if (!crtc->enabled)
27641 diff -urNp linux-2.6.32.41/drivers/gpu/drm/drm_drv.c linux-2.6.32.41/drivers/gpu/drm/drm_drv.c
27642 --- linux-2.6.32.41/drivers/gpu/drm/drm_drv.c 2011-03-27 14:31:47.000000000 -0400
27643 +++ linux-2.6.32.41/drivers/gpu/drm/drm_drv.c 2011-04-17 15:56:46.000000000 -0400
27644 @@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struc
27645 char *kdata = NULL;
27646
27647 atomic_inc(&dev->ioctl_count);
27648 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
27649 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
27650 ++file_priv->ioctl_count;
27651
27652 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
27653 diff -urNp linux-2.6.32.41/drivers/gpu/drm/drm_fops.c linux-2.6.32.41/drivers/gpu/drm/drm_fops.c
27654 --- linux-2.6.32.41/drivers/gpu/drm/drm_fops.c 2011-03-27 14:31:47.000000000 -0400
27655 +++ linux-2.6.32.41/drivers/gpu/drm/drm_fops.c 2011-04-17 15:56:46.000000000 -0400
27656 @@ -66,7 +66,7 @@ static int drm_setup(struct drm_device *
27657 }
27658
27659 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
27660 - atomic_set(&dev->counts[i], 0);
27661 + atomic_set_unchecked(&dev->counts[i], 0);
27662
27663 dev->sigdata.lock = NULL;
27664
27665 @@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct
27666
27667 retcode = drm_open_helper(inode, filp, dev);
27668 if (!retcode) {
27669 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
27670 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
27671 spin_lock(&dev->count_lock);
27672 - if (!dev->open_count++) {
27673 + if (local_inc_return(&dev->open_count) == 1) {
27674 spin_unlock(&dev->count_lock);
27675 retcode = drm_setup(dev);
27676 goto out;
27677 @@ -435,7 +435,7 @@ int drm_release(struct inode *inode, str
27678
27679 lock_kernel();
27680
27681 - DRM_DEBUG("open_count = %d\n", dev->open_count);
27682 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
27683
27684 if (dev->driver->preclose)
27685 dev->driver->preclose(dev, file_priv);
27686 @@ -447,7 +447,7 @@ int drm_release(struct inode *inode, str
27687 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
27688 task_pid_nr(current),
27689 (long)old_encode_dev(file_priv->minor->device),
27690 - dev->open_count);
27691 + local_read(&dev->open_count));
27692
27693 /* if the master has gone away we can't do anything with the lock */
27694 if (file_priv->minor->master)
27695 @@ -524,9 +524,9 @@ int drm_release(struct inode *inode, str
27696 * End inline drm_release
27697 */
27698
27699 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
27700 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
27701 spin_lock(&dev->count_lock);
27702 - if (!--dev->open_count) {
27703 + if (local_dec_and_test(&dev->open_count)) {
27704 if (atomic_read(&dev->ioctl_count)) {
27705 DRM_ERROR("Device busy: %d\n",
27706 atomic_read(&dev->ioctl_count));
27707 diff -urNp linux-2.6.32.41/drivers/gpu/drm/drm_gem.c linux-2.6.32.41/drivers/gpu/drm/drm_gem.c
27708 --- linux-2.6.32.41/drivers/gpu/drm/drm_gem.c 2011-03-27 14:31:47.000000000 -0400
27709 +++ linux-2.6.32.41/drivers/gpu/drm/drm_gem.c 2011-04-17 15:56:46.000000000 -0400
27710 @@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
27711 spin_lock_init(&dev->object_name_lock);
27712 idr_init(&dev->object_name_idr);
27713 atomic_set(&dev->object_count, 0);
27714 - atomic_set(&dev->object_memory, 0);
27715 + atomic_set_unchecked(&dev->object_memory, 0);
27716 atomic_set(&dev->pin_count, 0);
27717 - atomic_set(&dev->pin_memory, 0);
27718 + atomic_set_unchecked(&dev->pin_memory, 0);
27719 atomic_set(&dev->gtt_count, 0);
27720 - atomic_set(&dev->gtt_memory, 0);
27721 + atomic_set_unchecked(&dev->gtt_memory, 0);
27722
27723 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
27724 if (!mm) {
27725 @@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *
27726 goto fput;
27727 }
27728 atomic_inc(&dev->object_count);
27729 - atomic_add(obj->size, &dev->object_memory);
27730 + atomic_add_unchecked(obj->size, &dev->object_memory);
27731 return obj;
27732 fput:
27733 fput(obj->filp);
27734 @@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
27735
27736 fput(obj->filp);
27737 atomic_dec(&dev->object_count);
27738 - atomic_sub(obj->size, &dev->object_memory);
27739 + atomic_sub_unchecked(obj->size, &dev->object_memory);
27740 kfree(obj);
27741 }
27742 EXPORT_SYMBOL(drm_gem_object_free);
27743 diff -urNp linux-2.6.32.41/drivers/gpu/drm/drm_info.c linux-2.6.32.41/drivers/gpu/drm/drm_info.c
27744 --- linux-2.6.32.41/drivers/gpu/drm/drm_info.c 2011-03-27 14:31:47.000000000 -0400
27745 +++ linux-2.6.32.41/drivers/gpu/drm/drm_info.c 2011-04-17 15:56:46.000000000 -0400
27746 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
27747 struct drm_local_map *map;
27748 struct drm_map_list *r_list;
27749
27750 - /* Hardcoded from _DRM_FRAME_BUFFER,
27751 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
27752 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
27753 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
27754 + static const char * const types[] = {
27755 + [_DRM_FRAME_BUFFER] = "FB",
27756 + [_DRM_REGISTERS] = "REG",
27757 + [_DRM_SHM] = "SHM",
27758 + [_DRM_AGP] = "AGP",
27759 + [_DRM_SCATTER_GATHER] = "SG",
27760 + [_DRM_CONSISTENT] = "PCI",
27761 + [_DRM_GEM] = "GEM" };
27762 const char *type;
27763 int i;
27764
27765 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
27766 map = r_list->map;
27767 if (!map)
27768 continue;
27769 - if (map->type < 0 || map->type > 5)
27770 + if (map->type >= ARRAY_SIZE(types))
27771 type = "??";
27772 else
27773 type = types[map->type];
27774 @@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file
27775 struct drm_device *dev = node->minor->dev;
27776
27777 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
27778 - seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
27779 + seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
27780 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
27781 - seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
27782 - seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
27783 + seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
27784 + seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
27785 seq_printf(m, "%d gtt total\n", dev->gtt_total);
27786 return 0;
27787 }
27788 @@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, voi
27789 mutex_lock(&dev->struct_mutex);
27790 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
27791 atomic_read(&dev->vma_count),
27792 +#ifdef CONFIG_GRKERNSEC_HIDESYM
27793 + NULL, 0);
27794 +#else
27795 high_memory, (u64)virt_to_phys(high_memory));
27796 +#endif
27797
27798 list_for_each_entry(pt, &dev->vmalist, head) {
27799 vma = pt->vma;
27800 @@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, voi
27801 continue;
27802 seq_printf(m,
27803 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
27804 - pt->pid, vma->vm_start, vma->vm_end,
27805 + pt->pid,
27806 +#ifdef CONFIG_GRKERNSEC_HIDESYM
27807 + 0, 0,
27808 +#else
27809 + vma->vm_start, vma->vm_end,
27810 +#endif
27811 vma->vm_flags & VM_READ ? 'r' : '-',
27812 vma->vm_flags & VM_WRITE ? 'w' : '-',
27813 vma->vm_flags & VM_EXEC ? 'x' : '-',
27814 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
27815 vma->vm_flags & VM_LOCKED ? 'l' : '-',
27816 vma->vm_flags & VM_IO ? 'i' : '-',
27817 +#ifdef CONFIG_GRKERNSEC_HIDESYM
27818 + 0);
27819 +#else
27820 vma->vm_pgoff);
27821 +#endif
27822
27823 #if defined(__i386__)
27824 pgprot = pgprot_val(vma->vm_page_prot);
27825 diff -urNp linux-2.6.32.41/drivers/gpu/drm/drm_ioctl.c linux-2.6.32.41/drivers/gpu/drm/drm_ioctl.c
27826 --- linux-2.6.32.41/drivers/gpu/drm/drm_ioctl.c 2011-03-27 14:31:47.000000000 -0400
27827 +++ linux-2.6.32.41/drivers/gpu/drm/drm_ioctl.c 2011-04-17 15:56:46.000000000 -0400
27828 @@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev,
27829 stats->data[i].value =
27830 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
27831 else
27832 - stats->data[i].value = atomic_read(&dev->counts[i]);
27833 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
27834 stats->data[i].type = dev->types[i];
27835 }
27836
27837 diff -urNp linux-2.6.32.41/drivers/gpu/drm/drm_lock.c linux-2.6.32.41/drivers/gpu/drm/drm_lock.c
27838 --- linux-2.6.32.41/drivers/gpu/drm/drm_lock.c 2011-03-27 14:31:47.000000000 -0400
27839 +++ linux-2.6.32.41/drivers/gpu/drm/drm_lock.c 2011-04-17 15:56:46.000000000 -0400
27840 @@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, voi
27841 if (drm_lock_take(&master->lock, lock->context)) {
27842 master->lock.file_priv = file_priv;
27843 master->lock.lock_time = jiffies;
27844 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
27845 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
27846 break; /* Got lock */
27847 }
27848
27849 @@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, v
27850 return -EINVAL;
27851 }
27852
27853 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
27854 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
27855
27856 /* kernel_context_switch isn't used by any of the x86 drm
27857 * modules but is required by the Sparc driver.
27858 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i810/i810_dma.c linux-2.6.32.41/drivers/gpu/drm/i810/i810_dma.c
27859 --- linux-2.6.32.41/drivers/gpu/drm/i810/i810_dma.c 2011-03-27 14:31:47.000000000 -0400
27860 +++ linux-2.6.32.41/drivers/gpu/drm/i810/i810_dma.c 2011-04-17 15:56:46.000000000 -0400
27861 @@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_de
27862 dma->buflist[vertex->idx],
27863 vertex->discard, vertex->used);
27864
27865 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
27866 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
27867 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
27868 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
27869 sarea_priv->last_enqueue = dev_priv->counter - 1;
27870 sarea_priv->last_dispatch = (int)hw_status[5];
27871
27872 @@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device
27873 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
27874 mc->last_render);
27875
27876 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
27877 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
27878 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
27879 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
27880 sarea_priv->last_enqueue = dev_priv->counter - 1;
27881 sarea_priv->last_dispatch = (int)hw_status[5];
27882
27883 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i810/i810_drv.h linux-2.6.32.41/drivers/gpu/drm/i810/i810_drv.h
27884 --- linux-2.6.32.41/drivers/gpu/drm/i810/i810_drv.h 2011-03-27 14:31:47.000000000 -0400
27885 +++ linux-2.6.32.41/drivers/gpu/drm/i810/i810_drv.h 2011-05-04 17:56:28.000000000 -0400
27886 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
27887 int page_flipping;
27888
27889 wait_queue_head_t irq_queue;
27890 - atomic_t irq_received;
27891 - atomic_t irq_emitted;
27892 + atomic_unchecked_t irq_received;
27893 + atomic_unchecked_t irq_emitted;
27894
27895 int front_offset;
27896 } drm_i810_private_t;
27897 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i830/i830_drv.h linux-2.6.32.41/drivers/gpu/drm/i830/i830_drv.h
27898 --- linux-2.6.32.41/drivers/gpu/drm/i830/i830_drv.h 2011-03-27 14:31:47.000000000 -0400
27899 +++ linux-2.6.32.41/drivers/gpu/drm/i830/i830_drv.h 2011-05-04 17:56:28.000000000 -0400
27900 @@ -115,8 +115,8 @@ typedef struct drm_i830_private {
27901 int page_flipping;
27902
27903 wait_queue_head_t irq_queue;
27904 - atomic_t irq_received;
27905 - atomic_t irq_emitted;
27906 + atomic_unchecked_t irq_received;
27907 + atomic_unchecked_t irq_emitted;
27908
27909 int use_mi_batchbuffer_start;
27910
27911 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i830/i830_irq.c linux-2.6.32.41/drivers/gpu/drm/i830/i830_irq.c
27912 --- linux-2.6.32.41/drivers/gpu/drm/i830/i830_irq.c 2011-03-27 14:31:47.000000000 -0400
27913 +++ linux-2.6.32.41/drivers/gpu/drm/i830/i830_irq.c 2011-05-04 17:56:28.000000000 -0400
27914 @@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_
27915
27916 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
27917
27918 - atomic_inc(&dev_priv->irq_received);
27919 + atomic_inc_unchecked(&dev_priv->irq_received);
27920 wake_up_interruptible(&dev_priv->irq_queue);
27921
27922 return IRQ_HANDLED;
27923 @@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_devi
27924
27925 DRM_DEBUG("%s\n", __func__);
27926
27927 - atomic_inc(&dev_priv->irq_emitted);
27928 + atomic_inc_unchecked(&dev_priv->irq_emitted);
27929
27930 BEGIN_LP_RING(2);
27931 OUT_RING(0);
27932 OUT_RING(GFX_OP_USER_INTERRUPT);
27933 ADVANCE_LP_RING();
27934
27935 - return atomic_read(&dev_priv->irq_emitted);
27936 + return atomic_read_unchecked(&dev_priv->irq_emitted);
27937 }
27938
27939 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
27940 @@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_devi
27941
27942 DRM_DEBUG("%s\n", __func__);
27943
27944 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
27945 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
27946 return 0;
27947
27948 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
27949 @@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_devi
27950
27951 for (;;) {
27952 __set_current_state(TASK_INTERRUPTIBLE);
27953 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
27954 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
27955 break;
27956 if ((signed)(end - jiffies) <= 0) {
27957 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
27958 @@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct d
27959 I830_WRITE16(I830REG_HWSTAM, 0xffff);
27960 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
27961 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
27962 - atomic_set(&dev_priv->irq_received, 0);
27963 - atomic_set(&dev_priv->irq_emitted, 0);
27964 + atomic_set_unchecked(&dev_priv->irq_received, 0);
27965 + atomic_set_unchecked(&dev_priv->irq_emitted, 0);
27966 init_waitqueue_head(&dev_priv->irq_queue);
27967 }
27968
27969 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ch7017.c linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ch7017.c
27970 --- linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ch7017.c 2011-03-27 14:31:47.000000000 -0400
27971 +++ linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ch7017.c 2011-04-17 15:56:46.000000000 -0400
27972 @@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_
27973 }
27974 }
27975
27976 -struct intel_dvo_dev_ops ch7017_ops = {
27977 +const struct intel_dvo_dev_ops ch7017_ops = {
27978 .init = ch7017_init,
27979 .detect = ch7017_detect,
27980 .mode_valid = ch7017_mode_valid,
27981 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ch7xxx.c linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ch7xxx.c
27982 --- linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-03-27 14:31:47.000000000 -0400
27983 +++ linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-04-17 15:56:46.000000000 -0400
27984 @@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_
27985 }
27986 }
27987
27988 -struct intel_dvo_dev_ops ch7xxx_ops = {
27989 +const struct intel_dvo_dev_ops ch7xxx_ops = {
27990 .init = ch7xxx_init,
27991 .detect = ch7xxx_detect,
27992 .mode_valid = ch7xxx_mode_valid,
27993 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/dvo.h linux-2.6.32.41/drivers/gpu/drm/i915/dvo.h
27994 --- linux-2.6.32.41/drivers/gpu/drm/i915/dvo.h 2011-03-27 14:31:47.000000000 -0400
27995 +++ linux-2.6.32.41/drivers/gpu/drm/i915/dvo.h 2011-04-17 15:56:46.000000000 -0400
27996 @@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
27997 *
27998 * \return singly-linked list of modes or NULL if no modes found.
27999 */
28000 - struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
28001 + struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
28002
28003 /**
28004 * Clean up driver-specific bits of the output
28005 */
28006 - void (*destroy) (struct intel_dvo_device *dvo);
28007 + void (* const destroy) (struct intel_dvo_device *dvo);
28008
28009 /**
28010 * Debugging hook to dump device registers to log file
28011 */
28012 - void (*dump_regs)(struct intel_dvo_device *dvo);
28013 + void (* const dump_regs)(struct intel_dvo_device *dvo);
28014 };
28015
28016 -extern struct intel_dvo_dev_ops sil164_ops;
28017 -extern struct intel_dvo_dev_ops ch7xxx_ops;
28018 -extern struct intel_dvo_dev_ops ivch_ops;
28019 -extern struct intel_dvo_dev_ops tfp410_ops;
28020 -extern struct intel_dvo_dev_ops ch7017_ops;
28021 +extern const struct intel_dvo_dev_ops sil164_ops;
28022 +extern const struct intel_dvo_dev_ops ch7xxx_ops;
28023 +extern const struct intel_dvo_dev_ops ivch_ops;
28024 +extern const struct intel_dvo_dev_ops tfp410_ops;
28025 +extern const struct intel_dvo_dev_ops ch7017_ops;
28026
28027 #endif /* _INTEL_DVO_H */
28028 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ivch.c linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ivch.c
28029 --- linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ivch.c 2011-03-27 14:31:47.000000000 -0400
28030 +++ linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ivch.c 2011-04-17 15:56:46.000000000 -0400
28031 @@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dv
28032 }
28033 }
28034
28035 -struct intel_dvo_dev_ops ivch_ops= {
28036 +const struct intel_dvo_dev_ops ivch_ops= {
28037 .init = ivch_init,
28038 .dpms = ivch_dpms,
28039 .save = ivch_save,
28040 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/dvo_sil164.c linux-2.6.32.41/drivers/gpu/drm/i915/dvo_sil164.c
28041 --- linux-2.6.32.41/drivers/gpu/drm/i915/dvo_sil164.c 2011-03-27 14:31:47.000000000 -0400
28042 +++ linux-2.6.32.41/drivers/gpu/drm/i915/dvo_sil164.c 2011-04-17 15:56:46.000000000 -0400
28043 @@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_
28044 }
28045 }
28046
28047 -struct intel_dvo_dev_ops sil164_ops = {
28048 +const struct intel_dvo_dev_ops sil164_ops = {
28049 .init = sil164_init,
28050 .detect = sil164_detect,
28051 .mode_valid = sil164_mode_valid,
28052 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/dvo_tfp410.c linux-2.6.32.41/drivers/gpu/drm/i915/dvo_tfp410.c
28053 --- linux-2.6.32.41/drivers/gpu/drm/i915/dvo_tfp410.c 2011-03-27 14:31:47.000000000 -0400
28054 +++ linux-2.6.32.41/drivers/gpu/drm/i915/dvo_tfp410.c 2011-04-17 15:56:46.000000000 -0400
28055 @@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_
28056 }
28057 }
28058
28059 -struct intel_dvo_dev_ops tfp410_ops = {
28060 +const struct intel_dvo_dev_ops tfp410_ops = {
28061 .init = tfp410_init,
28062 .detect = tfp410_detect,
28063 .mode_valid = tfp410_mode_valid,
28064 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/i915_debugfs.c linux-2.6.32.41/drivers/gpu/drm/i915/i915_debugfs.c
28065 --- linux-2.6.32.41/drivers/gpu/drm/i915/i915_debugfs.c 2011-03-27 14:31:47.000000000 -0400
28066 +++ linux-2.6.32.41/drivers/gpu/drm/i915/i915_debugfs.c 2011-05-04 17:56:28.000000000 -0400
28067 @@ -192,7 +192,7 @@ static int i915_interrupt_info(struct se
28068 I915_READ(GTIMR));
28069 }
28070 seq_printf(m, "Interrupts received: %d\n",
28071 - atomic_read(&dev_priv->irq_received));
28072 + atomic_read_unchecked(&dev_priv->irq_received));
28073 if (dev_priv->hw_status_page != NULL) {
28074 seq_printf(m, "Current sequence: %d\n",
28075 i915_get_gem_seqno(dev));
28076 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/i915_drv.c linux-2.6.32.41/drivers/gpu/drm/i915/i915_drv.c
28077 --- linux-2.6.32.41/drivers/gpu/drm/i915/i915_drv.c 2011-03-27 14:31:47.000000000 -0400
28078 +++ linux-2.6.32.41/drivers/gpu/drm/i915/i915_drv.c 2011-04-17 15:56:46.000000000 -0400
28079 @@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
28080 return i915_resume(dev);
28081 }
28082
28083 -static struct vm_operations_struct i915_gem_vm_ops = {
28084 +static const struct vm_operations_struct i915_gem_vm_ops = {
28085 .fault = i915_gem_fault,
28086 .open = drm_gem_vm_open,
28087 .close = drm_gem_vm_close,
28088 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/i915_drv.h linux-2.6.32.41/drivers/gpu/drm/i915/i915_drv.h
28089 --- linux-2.6.32.41/drivers/gpu/drm/i915/i915_drv.h 2011-03-27 14:31:47.000000000 -0400
28090 +++ linux-2.6.32.41/drivers/gpu/drm/i915/i915_drv.h 2011-05-04 17:56:28.000000000 -0400
28091 @@ -197,7 +197,7 @@ typedef struct drm_i915_private {
28092 int page_flipping;
28093
28094 wait_queue_head_t irq_queue;
28095 - atomic_t irq_received;
28096 + atomic_unchecked_t irq_received;
28097 /** Protects user_irq_refcount and irq_mask_reg */
28098 spinlock_t user_irq_lock;
28099 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
28100 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/i915_gem.c linux-2.6.32.41/drivers/gpu/drm/i915/i915_gem.c
28101 --- linux-2.6.32.41/drivers/gpu/drm/i915/i915_gem.c 2011-03-27 14:31:47.000000000 -0400
28102 +++ linux-2.6.32.41/drivers/gpu/drm/i915/i915_gem.c 2011-04-17 15:56:46.000000000 -0400
28103 @@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_d
28104
28105 args->aper_size = dev->gtt_total;
28106 args->aper_available_size = (args->aper_size -
28107 - atomic_read(&dev->pin_memory));
28108 + atomic_read_unchecked(&dev->pin_memory));
28109
28110 return 0;
28111 }
28112 @@ -492,6 +492,11 @@ i915_gem_pread_ioctl(struct drm_device *
28113 return -EINVAL;
28114 }
28115
28116 + if (!access_ok(VERIFY_WRITE, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
28117 + drm_gem_object_unreference(obj);
28118 + return -EFAULT;
28119 + }
28120 +
28121 if (i915_gem_object_needs_bit17_swizzle(obj)) {
28122 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
28123 } else {
28124 @@ -965,6 +970,11 @@ i915_gem_pwrite_ioctl(struct drm_device
28125 return -EINVAL;
28126 }
28127
28128 + if (!access_ok(VERIFY_READ, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
28129 + drm_gem_object_unreference(obj);
28130 + return -EFAULT;
28131 + }
28132 +
28133 /* We can only do the GTT pwrite on untiled buffers, as otherwise
28134 * it would end up going through the fenced access, and we'll get
28135 * different detiling behavior between reading and writing.
28136 @@ -2054,7 +2064,7 @@ i915_gem_object_unbind(struct drm_gem_ob
28137
28138 if (obj_priv->gtt_space) {
28139 atomic_dec(&dev->gtt_count);
28140 - atomic_sub(obj->size, &dev->gtt_memory);
28141 + atomic_sub_unchecked(obj->size, &dev->gtt_memory);
28142
28143 drm_mm_put_block(obj_priv->gtt_space);
28144 obj_priv->gtt_space = NULL;
28145 @@ -2697,7 +2707,7 @@ i915_gem_object_bind_to_gtt(struct drm_g
28146 goto search_free;
28147 }
28148 atomic_inc(&dev->gtt_count);
28149 - atomic_add(obj->size, &dev->gtt_memory);
28150 + atomic_add_unchecked(obj->size, &dev->gtt_memory);
28151
28152 /* Assert that the object is not currently in any GPU domain. As it
28153 * wasn't in the GTT, there shouldn't be any way it could have been in
28154 @@ -3751,9 +3761,9 @@ i915_gem_execbuffer(struct drm_device *d
28155 "%d/%d gtt bytes\n",
28156 atomic_read(&dev->object_count),
28157 atomic_read(&dev->pin_count),
28158 - atomic_read(&dev->object_memory),
28159 - atomic_read(&dev->pin_memory),
28160 - atomic_read(&dev->gtt_memory),
28161 + atomic_read_unchecked(&dev->object_memory),
28162 + atomic_read_unchecked(&dev->pin_memory),
28163 + atomic_read_unchecked(&dev->gtt_memory),
28164 dev->gtt_total);
28165 }
28166 goto err;
28167 @@ -3985,7 +3995,7 @@ i915_gem_object_pin(struct drm_gem_objec
28168 */
28169 if (obj_priv->pin_count == 1) {
28170 atomic_inc(&dev->pin_count);
28171 - atomic_add(obj->size, &dev->pin_memory);
28172 + atomic_add_unchecked(obj->size, &dev->pin_memory);
28173 if (!obj_priv->active &&
28174 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
28175 !list_empty(&obj_priv->list))
28176 @@ -4018,7 +4028,7 @@ i915_gem_object_unpin(struct drm_gem_obj
28177 list_move_tail(&obj_priv->list,
28178 &dev_priv->mm.inactive_list);
28179 atomic_dec(&dev->pin_count);
28180 - atomic_sub(obj->size, &dev->pin_memory);
28181 + atomic_sub_unchecked(obj->size, &dev->pin_memory);
28182 }
28183 i915_verify_inactive(dev, __FILE__, __LINE__);
28184 }
28185 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/i915_irq.c linux-2.6.32.41/drivers/gpu/drm/i915/i915_irq.c
28186 --- linux-2.6.32.41/drivers/gpu/drm/i915/i915_irq.c 2011-03-27 14:31:47.000000000 -0400
28187 +++ linux-2.6.32.41/drivers/gpu/drm/i915/i915_irq.c 2011-05-04 17:56:28.000000000 -0400
28188 @@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_
28189 int irq_received;
28190 int ret = IRQ_NONE;
28191
28192 - atomic_inc(&dev_priv->irq_received);
28193 + atomic_inc_unchecked(&dev_priv->irq_received);
28194
28195 if (IS_IGDNG(dev))
28196 return igdng_irq_handler(dev);
28197 @@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct d
28198 {
28199 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28200
28201 - atomic_set(&dev_priv->irq_received, 0);
28202 + atomic_set_unchecked(&dev_priv->irq_received, 0);
28203
28204 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28205 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28206 diff -urNp linux-2.6.32.41/drivers/gpu/drm/mga/mga_drv.h linux-2.6.32.41/drivers/gpu/drm/mga/mga_drv.h
28207 --- linux-2.6.32.41/drivers/gpu/drm/mga/mga_drv.h 2011-03-27 14:31:47.000000000 -0400
28208 +++ linux-2.6.32.41/drivers/gpu/drm/mga/mga_drv.h 2011-05-04 17:56:28.000000000 -0400
28209 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
28210 u32 clear_cmd;
28211 u32 maccess;
28212
28213 - atomic_t vbl_received; /**< Number of vblanks received. */
28214 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
28215 wait_queue_head_t fence_queue;
28216 - atomic_t last_fence_retired;
28217 + atomic_unchecked_t last_fence_retired;
28218 u32 next_fence_to_post;
28219
28220 unsigned int fb_cpp;
28221 diff -urNp linux-2.6.32.41/drivers/gpu/drm/mga/mga_irq.c linux-2.6.32.41/drivers/gpu/drm/mga/mga_irq.c
28222 --- linux-2.6.32.41/drivers/gpu/drm/mga/mga_irq.c 2011-03-27 14:31:47.000000000 -0400
28223 +++ linux-2.6.32.41/drivers/gpu/drm/mga/mga_irq.c 2011-05-04 17:56:28.000000000 -0400
28224 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
28225 if (crtc != 0)
28226 return 0;
28227
28228 - return atomic_read(&dev_priv->vbl_received);
28229 + return atomic_read_unchecked(&dev_priv->vbl_received);
28230 }
28231
28232
28233 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
28234 /* VBLANK interrupt */
28235 if (status & MGA_VLINEPEN) {
28236 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
28237 - atomic_inc(&dev_priv->vbl_received);
28238 + atomic_inc_unchecked(&dev_priv->vbl_received);
28239 drm_handle_vblank(dev, 0);
28240 handled = 1;
28241 }
28242 @@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
28243 MGA_WRITE(MGA_PRIMEND, prim_end);
28244 }
28245
28246 - atomic_inc(&dev_priv->last_fence_retired);
28247 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
28248 DRM_WAKEUP(&dev_priv->fence_queue);
28249 handled = 1;
28250 }
28251 @@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_dev
28252 * using fences.
28253 */
28254 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
28255 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
28256 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
28257 - *sequence) <= (1 << 23)));
28258
28259 *sequence = cur_fence;
28260 diff -urNp linux-2.6.32.41/drivers/gpu/drm/r128/r128_cce.c linux-2.6.32.41/drivers/gpu/drm/r128/r128_cce.c
28261 --- linux-2.6.32.41/drivers/gpu/drm/r128/r128_cce.c 2011-03-27 14:31:47.000000000 -0400
28262 +++ linux-2.6.32.41/drivers/gpu/drm/r128/r128_cce.c 2011-05-04 17:56:28.000000000 -0400
28263 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
28264
28265 /* GH: Simple idle check.
28266 */
28267 - atomic_set(&dev_priv->idle_count, 0);
28268 + atomic_set_unchecked(&dev_priv->idle_count, 0);
28269
28270 /* We don't support anything other than bus-mastering ring mode,
28271 * but the ring can be in either AGP or PCI space for the ring
28272 diff -urNp linux-2.6.32.41/drivers/gpu/drm/r128/r128_drv.h linux-2.6.32.41/drivers/gpu/drm/r128/r128_drv.h
28273 --- linux-2.6.32.41/drivers/gpu/drm/r128/r128_drv.h 2011-03-27 14:31:47.000000000 -0400
28274 +++ linux-2.6.32.41/drivers/gpu/drm/r128/r128_drv.h 2011-05-04 17:56:28.000000000 -0400
28275 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
28276 int is_pci;
28277 unsigned long cce_buffers_offset;
28278
28279 - atomic_t idle_count;
28280 + atomic_unchecked_t idle_count;
28281
28282 int page_flipping;
28283 int current_page;
28284 u32 crtc_offset;
28285 u32 crtc_offset_cntl;
28286
28287 - atomic_t vbl_received;
28288 + atomic_unchecked_t vbl_received;
28289
28290 u32 color_fmt;
28291 unsigned int front_offset;
28292 diff -urNp linux-2.6.32.41/drivers/gpu/drm/r128/r128_irq.c linux-2.6.32.41/drivers/gpu/drm/r128/r128_irq.c
28293 --- linux-2.6.32.41/drivers/gpu/drm/r128/r128_irq.c 2011-03-27 14:31:47.000000000 -0400
28294 +++ linux-2.6.32.41/drivers/gpu/drm/r128/r128_irq.c 2011-05-04 17:56:28.000000000 -0400
28295 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
28296 if (crtc != 0)
28297 return 0;
28298
28299 - return atomic_read(&dev_priv->vbl_received);
28300 + return atomic_read_unchecked(&dev_priv->vbl_received);
28301 }
28302
28303 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28304 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
28305 /* VBLANK interrupt */
28306 if (status & R128_CRTC_VBLANK_INT) {
28307 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
28308 - atomic_inc(&dev_priv->vbl_received);
28309 + atomic_inc_unchecked(&dev_priv->vbl_received);
28310 drm_handle_vblank(dev, 0);
28311 return IRQ_HANDLED;
28312 }
28313 diff -urNp linux-2.6.32.41/drivers/gpu/drm/r128/r128_state.c linux-2.6.32.41/drivers/gpu/drm/r128/r128_state.c
28314 --- linux-2.6.32.41/drivers/gpu/drm/r128/r128_state.c 2011-03-27 14:31:47.000000000 -0400
28315 +++ linux-2.6.32.41/drivers/gpu/drm/r128/r128_state.c 2011-05-04 17:56:28.000000000 -0400
28316 @@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_priv
28317
28318 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
28319 {
28320 - if (atomic_read(&dev_priv->idle_count) == 0) {
28321 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
28322 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
28323 } else {
28324 - atomic_set(&dev_priv->idle_count, 0);
28325 + atomic_set_unchecked(&dev_priv->idle_count, 0);
28326 }
28327 }
28328
28329 diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/atom.c linux-2.6.32.41/drivers/gpu/drm/radeon/atom.c
28330 --- linux-2.6.32.41/drivers/gpu/drm/radeon/atom.c 2011-05-10 22:12:01.000000000 -0400
28331 +++ linux-2.6.32.41/drivers/gpu/drm/radeon/atom.c 2011-05-16 21:46:57.000000000 -0400
28332 @@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct c
28333 char name[512];
28334 int i;
28335
28336 + pax_track_stack();
28337 +
28338 ctx->card = card;
28339 ctx->bios = bios;
28340
28341 diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/mkregtable.c linux-2.6.32.41/drivers/gpu/drm/radeon/mkregtable.c
28342 --- linux-2.6.32.41/drivers/gpu/drm/radeon/mkregtable.c 2011-03-27 14:31:47.000000000 -0400
28343 +++ linux-2.6.32.41/drivers/gpu/drm/radeon/mkregtable.c 2011-04-17 15:56:46.000000000 -0400
28344 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
28345 regex_t mask_rex;
28346 regmatch_t match[4];
28347 char buf[1024];
28348 - size_t end;
28349 + long end;
28350 int len;
28351 int done = 0;
28352 int r;
28353 unsigned o;
28354 struct offset *offset;
28355 char last_reg_s[10];
28356 - int last_reg;
28357 + unsigned long last_reg;
28358
28359 if (regcomp
28360 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
28361 diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_atombios.c linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_atombios.c
28362 --- linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_atombios.c 2011-03-27 14:31:47.000000000 -0400
28363 +++ linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_atombios.c 2011-05-16 21:46:57.000000000 -0400
28364 @@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from
28365 bool linkb;
28366 struct radeon_i2c_bus_rec ddc_bus;
28367
28368 + pax_track_stack();
28369 +
28370 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
28371
28372 if (data_offset == 0)
28373 @@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_o
28374 }
28375 }
28376
28377 -struct bios_connector {
28378 +static struct bios_connector {
28379 bool valid;
28380 uint16_t line_mux;
28381 uint16_t devices;
28382 int connector_type;
28383 struct radeon_i2c_bus_rec ddc_bus;
28384 -};
28385 +} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
28386
28387 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
28388 drm_device
28389 @@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from
28390 uint8_t dac;
28391 union atom_supported_devices *supported_devices;
28392 int i, j;
28393 - struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
28394
28395 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
28396
28397 diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_display.c linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_display.c
28398 --- linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_display.c 2011-03-27 14:31:47.000000000 -0400
28399 +++ linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_display.c 2011-04-17 15:56:46.000000000 -0400
28400 @@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pl
28401
28402 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
28403 error = freq - current_freq;
28404 - error = error < 0 ? 0xffffffff : error;
28405 + error = (int32_t)error < 0 ? 0xffffffff : error;
28406 } else
28407 error = abs(current_freq - freq);
28408 vco_diff = abs(vco - best_vco);
28409 diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_drv.h linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_drv.h
28410 --- linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_drv.h 2011-03-27 14:31:47.000000000 -0400
28411 +++ linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_drv.h 2011-05-04 17:56:28.000000000 -0400
28412 @@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
28413
28414 /* SW interrupt */
28415 wait_queue_head_t swi_queue;
28416 - atomic_t swi_emitted;
28417 + atomic_unchecked_t swi_emitted;
28418 int vblank_crtc;
28419 uint32_t irq_enable_reg;
28420 uint32_t r500_disp_irq_reg;
28421 diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_fence.c linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_fence.c
28422 --- linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_fence.c 2011-03-27 14:31:47.000000000 -0400
28423 +++ linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_fence.c 2011-05-04 17:56:28.000000000 -0400
28424 @@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_devi
28425 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
28426 return 0;
28427 }
28428 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
28429 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
28430 if (!rdev->cp.ready) {
28431 /* FIXME: cp is not running assume everythings is done right
28432 * away
28433 @@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct rade
28434 return r;
28435 }
28436 WREG32(rdev->fence_drv.scratch_reg, 0);
28437 - atomic_set(&rdev->fence_drv.seq, 0);
28438 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
28439 INIT_LIST_HEAD(&rdev->fence_drv.created);
28440 INIT_LIST_HEAD(&rdev->fence_drv.emited);
28441 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
28442 diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/radeon.h linux-2.6.32.41/drivers/gpu/drm/radeon/radeon.h
28443 --- linux-2.6.32.41/drivers/gpu/drm/radeon/radeon.h 2011-03-27 14:31:47.000000000 -0400
28444 +++ linux-2.6.32.41/drivers/gpu/drm/radeon/radeon.h 2011-05-04 17:56:28.000000000 -0400
28445 @@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device
28446 */
28447 struct radeon_fence_driver {
28448 uint32_t scratch_reg;
28449 - atomic_t seq;
28450 + atomic_unchecked_t seq;
28451 uint32_t last_seq;
28452 unsigned long count_timeout;
28453 wait_queue_head_t queue;
28454 diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_ioc32.c linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_ioc32.c
28455 --- linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-03-27 14:31:47.000000000 -0400
28456 +++ linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-04-23 13:57:24.000000000 -0400
28457 @@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(str
28458 request = compat_alloc_user_space(sizeof(*request));
28459 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
28460 || __put_user(req32.param, &request->param)
28461 - || __put_user((void __user *)(unsigned long)req32.value,
28462 + || __put_user((unsigned long)req32.value,
28463 &request->value))
28464 return -EFAULT;
28465
28466 diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_irq.c linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_irq.c
28467 --- linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_irq.c 2011-03-27 14:31:47.000000000 -0400
28468 +++ linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_irq.c 2011-05-04 17:56:28.000000000 -0400
28469 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
28470 unsigned int ret;
28471 RING_LOCALS;
28472
28473 - atomic_inc(&dev_priv->swi_emitted);
28474 - ret = atomic_read(&dev_priv->swi_emitted);
28475 + atomic_inc_unchecked(&dev_priv->swi_emitted);
28476 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
28477
28478 BEGIN_RING(4);
28479 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
28480 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
28481 drm_radeon_private_t *dev_priv =
28482 (drm_radeon_private_t *) dev->dev_private;
28483
28484 - atomic_set(&dev_priv->swi_emitted, 0);
28485 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
28486 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
28487
28488 dev->max_vblank_count = 0x001fffff;
28489 diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_state.c linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_state.c
28490 --- linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_state.c 2011-03-27 14:31:47.000000000 -0400
28491 +++ linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_state.c 2011-04-17 15:56:46.000000000 -0400
28492 @@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm
28493 {
28494 drm_radeon_private_t *dev_priv = dev->dev_private;
28495 drm_radeon_getparam_t *param = data;
28496 - int value;
28497 + int value = 0;
28498
28499 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
28500
28501 diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_ttm.c linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_ttm.c
28502 --- linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_ttm.c 2011-03-27 14:31:47.000000000 -0400
28503 +++ linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_ttm.c 2011-04-17 15:56:46.000000000 -0400
28504 @@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_devic
28505 DRM_INFO("radeon: ttm finalized\n");
28506 }
28507
28508 -static struct vm_operations_struct radeon_ttm_vm_ops;
28509 -static const struct vm_operations_struct *ttm_vm_ops = NULL;
28510 -
28511 -static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
28512 -{
28513 - struct ttm_buffer_object *bo;
28514 - int r;
28515 -
28516 - bo = (struct ttm_buffer_object *)vma->vm_private_data;
28517 - if (bo == NULL) {
28518 - return VM_FAULT_NOPAGE;
28519 - }
28520 - r = ttm_vm_ops->fault(vma, vmf);
28521 - return r;
28522 -}
28523 -
28524 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
28525 {
28526 struct drm_file *file_priv;
28527 struct radeon_device *rdev;
28528 - int r;
28529
28530 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
28531 return drm_mmap(filp, vma);
28532 @@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struc
28533
28534 file_priv = (struct drm_file *)filp->private_data;
28535 rdev = file_priv->minor->dev->dev_private;
28536 - if (rdev == NULL) {
28537 + if (!rdev)
28538 return -EINVAL;
28539 - }
28540 - r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
28541 - if (unlikely(r != 0)) {
28542 - return r;
28543 - }
28544 - if (unlikely(ttm_vm_ops == NULL)) {
28545 - ttm_vm_ops = vma->vm_ops;
28546 - radeon_ttm_vm_ops = *ttm_vm_ops;
28547 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
28548 - }
28549 - vma->vm_ops = &radeon_ttm_vm_ops;
28550 - return 0;
28551 + return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
28552 }
28553
28554
28555 diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/rs690.c linux-2.6.32.41/drivers/gpu/drm/radeon/rs690.c
28556 --- linux-2.6.32.41/drivers/gpu/drm/radeon/rs690.c 2011-03-27 14:31:47.000000000 -0400
28557 +++ linux-2.6.32.41/drivers/gpu/drm/radeon/rs690.c 2011-04-17 15:56:46.000000000 -0400
28558 @@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct
28559 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
28560 rdev->pm.sideport_bandwidth.full)
28561 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
28562 - read_delay_latency.full = rfixed_const(370 * 800 * 1000);
28563 + read_delay_latency.full = rfixed_const(800 * 1000);
28564 read_delay_latency.full = rfixed_div(read_delay_latency,
28565 rdev->pm.igp_sideport_mclk);
28566 + a.full = rfixed_const(370);
28567 + read_delay_latency.full = rfixed_mul(read_delay_latency, a);
28568 } else {
28569 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
28570 rdev->pm.k8_bandwidth.full)
28571 diff -urNp linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_bo.c linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_bo.c
28572 --- linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_bo.c 2011-03-27 14:31:47.000000000 -0400
28573 +++ linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_bo.c 2011-04-23 12:56:11.000000000 -0400
28574 @@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_a
28575 NULL
28576 };
28577
28578 -static struct sysfs_ops ttm_bo_global_ops = {
28579 +static const struct sysfs_ops ttm_bo_global_ops = {
28580 .show = &ttm_bo_global_show
28581 };
28582
28583 diff -urNp linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_bo_vm.c linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_bo_vm.c
28584 --- linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-03-27 14:31:47.000000000 -0400
28585 +++ linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-04-17 15:56:46.000000000 -0400
28586 @@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_are
28587 {
28588 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
28589 vma->vm_private_data;
28590 - struct ttm_bo_device *bdev = bo->bdev;
28591 + struct ttm_bo_device *bdev;
28592 unsigned long bus_base;
28593 unsigned long bus_offset;
28594 unsigned long bus_size;
28595 @@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_are
28596 unsigned long address = (unsigned long)vmf->virtual_address;
28597 int retval = VM_FAULT_NOPAGE;
28598
28599 + if (!bo)
28600 + return VM_FAULT_NOPAGE;
28601 + bdev = bo->bdev;
28602 +
28603 /*
28604 * Work around locking order reversal in fault / nopfn
28605 * between mmap_sem and bo_reserve: Perform a trylock operation
28606 diff -urNp linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_global.c linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_global.c
28607 --- linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_global.c 2011-03-27 14:31:47.000000000 -0400
28608 +++ linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_global.c 2011-04-17 15:56:46.000000000 -0400
28609 @@ -36,7 +36,7 @@
28610 struct ttm_global_item {
28611 struct mutex mutex;
28612 void *object;
28613 - int refcount;
28614 + atomic_t refcount;
28615 };
28616
28617 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
28618 @@ -49,7 +49,7 @@ void ttm_global_init(void)
28619 struct ttm_global_item *item = &glob[i];
28620 mutex_init(&item->mutex);
28621 item->object = NULL;
28622 - item->refcount = 0;
28623 + atomic_set(&item->refcount, 0);
28624 }
28625 }
28626
28627 @@ -59,7 +59,7 @@ void ttm_global_release(void)
28628 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
28629 struct ttm_global_item *item = &glob[i];
28630 BUG_ON(item->object != NULL);
28631 - BUG_ON(item->refcount != 0);
28632 + BUG_ON(atomic_read(&item->refcount) != 0);
28633 }
28634 }
28635
28636 @@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_globa
28637 void *object;
28638
28639 mutex_lock(&item->mutex);
28640 - if (item->refcount == 0) {
28641 + if (atomic_read(&item->refcount) == 0) {
28642 item->object = kzalloc(ref->size, GFP_KERNEL);
28643 if (unlikely(item->object == NULL)) {
28644 ret = -ENOMEM;
28645 @@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_globa
28646 goto out_err;
28647
28648 }
28649 - ++item->refcount;
28650 + atomic_inc(&item->refcount);
28651 ref->object = item->object;
28652 object = item->object;
28653 mutex_unlock(&item->mutex);
28654 @@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_gl
28655 struct ttm_global_item *item = &glob[ref->global_type];
28656
28657 mutex_lock(&item->mutex);
28658 - BUG_ON(item->refcount == 0);
28659 + BUG_ON(atomic_read(&item->refcount) == 0);
28660 BUG_ON(ref->object != item->object);
28661 - if (--item->refcount == 0) {
28662 + if (atomic_dec_and_test(&item->refcount)) {
28663 ref->release(ref);
28664 item->object = NULL;
28665 }
28666 diff -urNp linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_memory.c linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_memory.c
28667 --- linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_memory.c 2011-03-27 14:31:47.000000000 -0400
28668 +++ linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_memory.c 2011-04-17 15:56:46.000000000 -0400
28669 @@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_at
28670 NULL
28671 };
28672
28673 -static struct sysfs_ops ttm_mem_zone_ops = {
28674 +static const struct sysfs_ops ttm_mem_zone_ops = {
28675 .show = &ttm_mem_zone_show,
28676 .store = &ttm_mem_zone_store
28677 };
28678 diff -urNp linux-2.6.32.41/drivers/gpu/drm/via/via_drv.h linux-2.6.32.41/drivers/gpu/drm/via/via_drv.h
28679 --- linux-2.6.32.41/drivers/gpu/drm/via/via_drv.h 2011-03-27 14:31:47.000000000 -0400
28680 +++ linux-2.6.32.41/drivers/gpu/drm/via/via_drv.h 2011-05-04 17:56:28.000000000 -0400
28681 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
28682 typedef uint32_t maskarray_t[5];
28683
28684 typedef struct drm_via_irq {
28685 - atomic_t irq_received;
28686 + atomic_unchecked_t irq_received;
28687 uint32_t pending_mask;
28688 uint32_t enable_mask;
28689 wait_queue_head_t irq_queue;
28690 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
28691 struct timeval last_vblank;
28692 int last_vblank_valid;
28693 unsigned usec_per_vblank;
28694 - atomic_t vbl_received;
28695 + atomic_unchecked_t vbl_received;
28696 drm_via_state_t hc_state;
28697 char pci_buf[VIA_PCI_BUF_SIZE];
28698 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
28699 diff -urNp linux-2.6.32.41/drivers/gpu/drm/via/via_irq.c linux-2.6.32.41/drivers/gpu/drm/via/via_irq.c
28700 --- linux-2.6.32.41/drivers/gpu/drm/via/via_irq.c 2011-03-27 14:31:47.000000000 -0400
28701 +++ linux-2.6.32.41/drivers/gpu/drm/via/via_irq.c 2011-05-04 17:56:28.000000000 -0400
28702 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
28703 if (crtc != 0)
28704 return 0;
28705
28706 - return atomic_read(&dev_priv->vbl_received);
28707 + return atomic_read_unchecked(&dev_priv->vbl_received);
28708 }
28709
28710 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28711 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
28712
28713 status = VIA_READ(VIA_REG_INTERRUPT);
28714 if (status & VIA_IRQ_VBLANK_PENDING) {
28715 - atomic_inc(&dev_priv->vbl_received);
28716 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
28717 + atomic_inc_unchecked(&dev_priv->vbl_received);
28718 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
28719 do_gettimeofday(&cur_vblank);
28720 if (dev_priv->last_vblank_valid) {
28721 dev_priv->usec_per_vblank =
28722 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
28723 dev_priv->last_vblank = cur_vblank;
28724 dev_priv->last_vblank_valid = 1;
28725 }
28726 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
28727 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
28728 DRM_DEBUG("US per vblank is: %u\n",
28729 dev_priv->usec_per_vblank);
28730 }
28731 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
28732
28733 for (i = 0; i < dev_priv->num_irqs; ++i) {
28734 if (status & cur_irq->pending_mask) {
28735 - atomic_inc(&cur_irq->irq_received);
28736 + atomic_inc_unchecked(&cur_irq->irq_received);
28737 DRM_WAKEUP(&cur_irq->irq_queue);
28738 handled = 1;
28739 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
28740 @@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device *
28741 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
28742 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
28743 masks[irq][4]));
28744 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
28745 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
28746 } else {
28747 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
28748 (((cur_irq_sequence =
28749 - atomic_read(&cur_irq->irq_received)) -
28750 + atomic_read_unchecked(&cur_irq->irq_received)) -
28751 *sequence) <= (1 << 23)));
28752 }
28753 *sequence = cur_irq_sequence;
28754 @@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct dr
28755 }
28756
28757 for (i = 0; i < dev_priv->num_irqs; ++i) {
28758 - atomic_set(&cur_irq->irq_received, 0);
28759 + atomic_set_unchecked(&cur_irq->irq_received, 0);
28760 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
28761 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
28762 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
28763 @@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev,
28764 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
28765 case VIA_IRQ_RELATIVE:
28766 irqwait->request.sequence +=
28767 - atomic_read(&cur_irq->irq_received);
28768 + atomic_read_unchecked(&cur_irq->irq_received);
28769 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
28770 case VIA_IRQ_ABSOLUTE:
28771 break;
28772 diff -urNp linux-2.6.32.41/drivers/hid/hid-core.c linux-2.6.32.41/drivers/hid/hid-core.c
28773 --- linux-2.6.32.41/drivers/hid/hid-core.c 2011-05-10 22:12:01.000000000 -0400
28774 +++ linux-2.6.32.41/drivers/hid/hid-core.c 2011-05-10 22:12:32.000000000 -0400
28775 @@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device
28776
28777 int hid_add_device(struct hid_device *hdev)
28778 {
28779 - static atomic_t id = ATOMIC_INIT(0);
28780 + static atomic_unchecked_t id = ATOMIC_INIT(0);
28781 int ret;
28782
28783 if (WARN_ON(hdev->status & HID_STAT_ADDED))
28784 @@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hd
28785 /* XXX hack, any other cleaner solution after the driver core
28786 * is converted to allow more than 20 bytes as the device name? */
28787 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
28788 - hdev->vendor, hdev->product, atomic_inc_return(&id));
28789 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
28790
28791 ret = device_add(&hdev->dev);
28792 if (!ret)
28793 diff -urNp linux-2.6.32.41/drivers/hid/usbhid/hiddev.c linux-2.6.32.41/drivers/hid/usbhid/hiddev.c
28794 --- linux-2.6.32.41/drivers/hid/usbhid/hiddev.c 2011-03-27 14:31:47.000000000 -0400
28795 +++ linux-2.6.32.41/drivers/hid/usbhid/hiddev.c 2011-04-17 15:56:46.000000000 -0400
28796 @@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *fi
28797 return put_user(HID_VERSION, (int __user *)arg);
28798
28799 case HIDIOCAPPLICATION:
28800 - if (arg < 0 || arg >= hid->maxapplication)
28801 + if (arg >= hid->maxapplication)
28802 return -EINVAL;
28803
28804 for (i = 0; i < hid->maxcollection; i++)
28805 diff -urNp linux-2.6.32.41/drivers/hwmon/lis3lv02d.c linux-2.6.32.41/drivers/hwmon/lis3lv02d.c
28806 --- linux-2.6.32.41/drivers/hwmon/lis3lv02d.c 2011-03-27 14:31:47.000000000 -0400
28807 +++ linux-2.6.32.41/drivers/hwmon/lis3lv02d.c 2011-05-04 17:56:28.000000000 -0400
28808 @@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(in
28809 * the lid is closed. This leads to interrupts as soon as a little move
28810 * is done.
28811 */
28812 - atomic_inc(&lis3_dev.count);
28813 + atomic_inc_unchecked(&lis3_dev.count);
28814
28815 wake_up_interruptible(&lis3_dev.misc_wait);
28816 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
28817 @@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct in
28818 if (test_and_set_bit(0, &lis3_dev.misc_opened))
28819 return -EBUSY; /* already open */
28820
28821 - atomic_set(&lis3_dev.count, 0);
28822 + atomic_set_unchecked(&lis3_dev.count, 0);
28823
28824 /*
28825 * The sensor can generate interrupts for free-fall and direction
28826 @@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struc
28827 add_wait_queue(&lis3_dev.misc_wait, &wait);
28828 while (true) {
28829 set_current_state(TASK_INTERRUPTIBLE);
28830 - data = atomic_xchg(&lis3_dev.count, 0);
28831 + data = atomic_xchg_unchecked(&lis3_dev.count, 0);
28832 if (data)
28833 break;
28834
28835 @@ -244,7 +244,7 @@ out:
28836 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
28837 {
28838 poll_wait(file, &lis3_dev.misc_wait, wait);
28839 - if (atomic_read(&lis3_dev.count))
28840 + if (atomic_read_unchecked(&lis3_dev.count))
28841 return POLLIN | POLLRDNORM;
28842 return 0;
28843 }
28844 diff -urNp linux-2.6.32.41/drivers/hwmon/lis3lv02d.h linux-2.6.32.41/drivers/hwmon/lis3lv02d.h
28845 --- linux-2.6.32.41/drivers/hwmon/lis3lv02d.h 2011-03-27 14:31:47.000000000 -0400
28846 +++ linux-2.6.32.41/drivers/hwmon/lis3lv02d.h 2011-05-04 17:56:28.000000000 -0400
28847 @@ -201,7 +201,7 @@ struct lis3lv02d {
28848
28849 struct input_polled_dev *idev; /* input device */
28850 struct platform_device *pdev; /* platform device */
28851 - atomic_t count; /* interrupt count after last read */
28852 + atomic_unchecked_t count; /* interrupt count after last read */
28853 int xcalib; /* calibrated null value for x */
28854 int ycalib; /* calibrated null value for y */
28855 int zcalib; /* calibrated null value for z */
28856 diff -urNp linux-2.6.32.41/drivers/hwmon/sht15.c linux-2.6.32.41/drivers/hwmon/sht15.c
28857 --- linux-2.6.32.41/drivers/hwmon/sht15.c 2011-03-27 14:31:47.000000000 -0400
28858 +++ linux-2.6.32.41/drivers/hwmon/sht15.c 2011-05-04 17:56:28.000000000 -0400
28859 @@ -112,7 +112,7 @@ struct sht15_data {
28860 int supply_uV;
28861 int supply_uV_valid;
28862 struct work_struct update_supply_work;
28863 - atomic_t interrupt_handled;
28864 + atomic_unchecked_t interrupt_handled;
28865 };
28866
28867 /**
28868 @@ -245,13 +245,13 @@ static inline int sht15_update_single_va
28869 return ret;
28870
28871 gpio_direction_input(data->pdata->gpio_data);
28872 - atomic_set(&data->interrupt_handled, 0);
28873 + atomic_set_unchecked(&data->interrupt_handled, 0);
28874
28875 enable_irq(gpio_to_irq(data->pdata->gpio_data));
28876 if (gpio_get_value(data->pdata->gpio_data) == 0) {
28877 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
28878 /* Only relevant if the interrupt hasn't occured. */
28879 - if (!atomic_read(&data->interrupt_handled))
28880 + if (!atomic_read_unchecked(&data->interrupt_handled))
28881 schedule_work(&data->read_work);
28882 }
28883 ret = wait_event_timeout(data->wait_queue,
28884 @@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired
28885 struct sht15_data *data = d;
28886 /* First disable the interrupt */
28887 disable_irq_nosync(irq);
28888 - atomic_inc(&data->interrupt_handled);
28889 + atomic_inc_unchecked(&data->interrupt_handled);
28890 /* Then schedule a reading work struct */
28891 if (data->flag != SHT15_READING_NOTHING)
28892 schedule_work(&data->read_work);
28893 @@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct wo
28894 here as could have gone low in meantime so verify
28895 it hasn't!
28896 */
28897 - atomic_set(&data->interrupt_handled, 0);
28898 + atomic_set_unchecked(&data->interrupt_handled, 0);
28899 enable_irq(gpio_to_irq(data->pdata->gpio_data));
28900 /* If still not occured or another handler has been scheduled */
28901 if (gpio_get_value(data->pdata->gpio_data)
28902 - || atomic_read(&data->interrupt_handled))
28903 + || atomic_read_unchecked(&data->interrupt_handled))
28904 return;
28905 }
28906 /* Read the data back from the device */
28907 diff -urNp linux-2.6.32.41/drivers/hwmon/w83791d.c linux-2.6.32.41/drivers/hwmon/w83791d.c
28908 --- linux-2.6.32.41/drivers/hwmon/w83791d.c 2011-03-27 14:31:47.000000000 -0400
28909 +++ linux-2.6.32.41/drivers/hwmon/w83791d.c 2011-04-17 15:56:46.000000000 -0400
28910 @@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_cli
28911 struct i2c_board_info *info);
28912 static int w83791d_remove(struct i2c_client *client);
28913
28914 -static int w83791d_read(struct i2c_client *client, u8 register);
28915 -static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
28916 +static int w83791d_read(struct i2c_client *client, u8 reg);
28917 +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
28918 static struct w83791d_data *w83791d_update_device(struct device *dev);
28919
28920 #ifdef DEBUG
28921 diff -urNp linux-2.6.32.41/drivers/ide/ide-cd.c linux-2.6.32.41/drivers/ide/ide-cd.c
28922 --- linux-2.6.32.41/drivers/ide/ide-cd.c 2011-03-27 14:31:47.000000000 -0400
28923 +++ linux-2.6.32.41/drivers/ide/ide-cd.c 2011-04-17 15:56:46.000000000 -0400
28924 @@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_
28925 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
28926 if ((unsigned long)buf & alignment
28927 || blk_rq_bytes(rq) & q->dma_pad_mask
28928 - || object_is_on_stack(buf))
28929 + || object_starts_on_stack(buf))
28930 drive->dma = 0;
28931 }
28932 }
28933 diff -urNp linux-2.6.32.41/drivers/ide/ide-floppy.c linux-2.6.32.41/drivers/ide/ide-floppy.c
28934 --- linux-2.6.32.41/drivers/ide/ide-floppy.c 2011-03-27 14:31:47.000000000 -0400
28935 +++ linux-2.6.32.41/drivers/ide/ide-floppy.c 2011-05-16 21:46:57.000000000 -0400
28936 @@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_d
28937 u8 pc_buf[256], header_len, desc_cnt;
28938 int i, rc = 1, blocks, length;
28939
28940 + pax_track_stack();
28941 +
28942 ide_debug_log(IDE_DBG_FUNC, "enter");
28943
28944 drive->bios_cyl = 0;
28945 diff -urNp linux-2.6.32.41/drivers/ide/setup-pci.c linux-2.6.32.41/drivers/ide/setup-pci.c
28946 --- linux-2.6.32.41/drivers/ide/setup-pci.c 2011-03-27 14:31:47.000000000 -0400
28947 +++ linux-2.6.32.41/drivers/ide/setup-pci.c 2011-05-16 21:46:57.000000000 -0400
28948 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
28949 int ret, i, n_ports = dev2 ? 4 : 2;
28950 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
28951
28952 + pax_track_stack();
28953 +
28954 for (i = 0; i < n_ports / 2; i++) {
28955 ret = ide_setup_pci_controller(pdev[i], d, !i);
28956 if (ret < 0)
28957 diff -urNp linux-2.6.32.41/drivers/ieee1394/dv1394.c linux-2.6.32.41/drivers/ieee1394/dv1394.c
28958 --- linux-2.6.32.41/drivers/ieee1394/dv1394.c 2011-03-27 14:31:47.000000000 -0400
28959 +++ linux-2.6.32.41/drivers/ieee1394/dv1394.c 2011-04-23 12:56:11.000000000 -0400
28960 @@ -739,7 +739,7 @@ static void frame_prepare(struct video_c
28961 based upon DIF section and sequence
28962 */
28963
28964 -static void inline
28965 +static inline void
28966 frame_put_packet (struct frame *f, struct packet *p)
28967 {
28968 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
28969 diff -urNp linux-2.6.32.41/drivers/ieee1394/hosts.c linux-2.6.32.41/drivers/ieee1394/hosts.c
28970 --- linux-2.6.32.41/drivers/ieee1394/hosts.c 2011-03-27 14:31:47.000000000 -0400
28971 +++ linux-2.6.32.41/drivers/ieee1394/hosts.c 2011-04-17 15:56:46.000000000 -0400
28972 @@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso
28973 }
28974
28975 static struct hpsb_host_driver dummy_driver = {
28976 + .name = "dummy",
28977 .transmit_packet = dummy_transmit_packet,
28978 .devctl = dummy_devctl,
28979 .isoctl = dummy_isoctl
28980 diff -urNp linux-2.6.32.41/drivers/ieee1394/init_ohci1394_dma.c linux-2.6.32.41/drivers/ieee1394/init_ohci1394_dma.c
28981 --- linux-2.6.32.41/drivers/ieee1394/init_ohci1394_dma.c 2011-03-27 14:31:47.000000000 -0400
28982 +++ linux-2.6.32.41/drivers/ieee1394/init_ohci1394_dma.c 2011-04-17 15:56:46.000000000 -0400
28983 @@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_con
28984 for (func = 0; func < 8; func++) {
28985 u32 class = read_pci_config(num,slot,func,
28986 PCI_CLASS_REVISION);
28987 - if ((class == 0xffffffff))
28988 + if (class == 0xffffffff)
28989 continue; /* No device at this func */
28990
28991 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
28992 diff -urNp linux-2.6.32.41/drivers/ieee1394/ohci1394.c linux-2.6.32.41/drivers/ieee1394/ohci1394.c
28993 --- linux-2.6.32.41/drivers/ieee1394/ohci1394.c 2011-03-27 14:31:47.000000000 -0400
28994 +++ linux-2.6.32.41/drivers/ieee1394/ohci1394.c 2011-04-23 12:56:11.000000000 -0400
28995 @@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_
28996 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
28997
28998 /* Module Parameters */
28999 -static int phys_dma = 1;
29000 +static int phys_dma;
29001 module_param(phys_dma, int, 0444);
29002 -MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
29003 +MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
29004
29005 static void dma_trm_tasklet(unsigned long data);
29006 static void dma_trm_reset(struct dma_trm_ctx *d);
29007 diff -urNp linux-2.6.32.41/drivers/ieee1394/sbp2.c linux-2.6.32.41/drivers/ieee1394/sbp2.c
29008 --- linux-2.6.32.41/drivers/ieee1394/sbp2.c 2011-03-27 14:31:47.000000000 -0400
29009 +++ linux-2.6.32.41/drivers/ieee1394/sbp2.c 2011-04-23 12:56:11.000000000 -0400
29010 @@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 prot
29011 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
29012 MODULE_LICENSE("GPL");
29013
29014 -static int sbp2_module_init(void)
29015 +static int __init sbp2_module_init(void)
29016 {
29017 int ret;
29018
29019 diff -urNp linux-2.6.32.41/drivers/infiniband/core/cm.c linux-2.6.32.41/drivers/infiniband/core/cm.c
29020 --- linux-2.6.32.41/drivers/infiniband/core/cm.c 2011-03-27 14:31:47.000000000 -0400
29021 +++ linux-2.6.32.41/drivers/infiniband/core/cm.c 2011-04-17 15:56:46.000000000 -0400
29022 @@ -112,7 +112,7 @@ static char const counter_group_names[CM
29023
29024 struct cm_counter_group {
29025 struct kobject obj;
29026 - atomic_long_t counter[CM_ATTR_COUNT];
29027 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
29028 };
29029
29030 struct cm_counter_attribute {
29031 @@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm
29032 struct ib_mad_send_buf *msg = NULL;
29033 int ret;
29034
29035 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29036 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29037 counter[CM_REQ_COUNTER]);
29038
29039 /* Quick state check to discard duplicate REQs. */
29040 @@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm
29041 if (!cm_id_priv)
29042 return;
29043
29044 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29045 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29046 counter[CM_REP_COUNTER]);
29047 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
29048 if (ret)
29049 @@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work
29050 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
29051 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
29052 spin_unlock_irq(&cm_id_priv->lock);
29053 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29054 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29055 counter[CM_RTU_COUNTER]);
29056 goto out;
29057 }
29058 @@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_wor
29059 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
29060 dreq_msg->local_comm_id);
29061 if (!cm_id_priv) {
29062 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29063 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29064 counter[CM_DREQ_COUNTER]);
29065 cm_issue_drep(work->port, work->mad_recv_wc);
29066 return -EINVAL;
29067 @@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_wor
29068 case IB_CM_MRA_REP_RCVD:
29069 break;
29070 case IB_CM_TIMEWAIT:
29071 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29072 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29073 counter[CM_DREQ_COUNTER]);
29074 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29075 goto unlock;
29076 @@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_wor
29077 cm_free_msg(msg);
29078 goto deref;
29079 case IB_CM_DREQ_RCVD:
29080 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29081 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29082 counter[CM_DREQ_COUNTER]);
29083 goto unlock;
29084 default:
29085 @@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work
29086 ib_modify_mad(cm_id_priv->av.port->mad_agent,
29087 cm_id_priv->msg, timeout)) {
29088 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
29089 - atomic_long_inc(&work->port->
29090 + atomic_long_inc_unchecked(&work->port->
29091 counter_group[CM_RECV_DUPLICATES].
29092 counter[CM_MRA_COUNTER]);
29093 goto out;
29094 @@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work
29095 break;
29096 case IB_CM_MRA_REQ_RCVD:
29097 case IB_CM_MRA_REP_RCVD:
29098 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29099 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29100 counter[CM_MRA_COUNTER]);
29101 /* fall through */
29102 default:
29103 @@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work
29104 case IB_CM_LAP_IDLE:
29105 break;
29106 case IB_CM_MRA_LAP_SENT:
29107 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29108 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29109 counter[CM_LAP_COUNTER]);
29110 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29111 goto unlock;
29112 @@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work
29113 cm_free_msg(msg);
29114 goto deref;
29115 case IB_CM_LAP_RCVD:
29116 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29117 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29118 counter[CM_LAP_COUNTER]);
29119 goto unlock;
29120 default:
29121 @@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm
29122 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
29123 if (cur_cm_id_priv) {
29124 spin_unlock_irq(&cm.lock);
29125 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29126 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29127 counter[CM_SIDR_REQ_COUNTER]);
29128 goto out; /* Duplicate message. */
29129 }
29130 @@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_ma
29131 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
29132 msg->retries = 1;
29133
29134 - atomic_long_add(1 + msg->retries,
29135 + atomic_long_add_unchecked(1 + msg->retries,
29136 &port->counter_group[CM_XMIT].counter[attr_index]);
29137 if (msg->retries)
29138 - atomic_long_add(msg->retries,
29139 + atomic_long_add_unchecked(msg->retries,
29140 &port->counter_group[CM_XMIT_RETRIES].
29141 counter[attr_index]);
29142
29143 @@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_ma
29144 }
29145
29146 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
29147 - atomic_long_inc(&port->counter_group[CM_RECV].
29148 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
29149 counter[attr_id - CM_ATTR_ID_OFFSET]);
29150
29151 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
29152 @@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct ko
29153 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
29154
29155 return sprintf(buf, "%ld\n",
29156 - atomic_long_read(&group->counter[cm_attr->index]));
29157 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
29158 }
29159
29160 -static struct sysfs_ops cm_counter_ops = {
29161 +static const struct sysfs_ops cm_counter_ops = {
29162 .show = cm_show_counter
29163 };
29164
29165 diff -urNp linux-2.6.32.41/drivers/infiniband/core/fmr_pool.c linux-2.6.32.41/drivers/infiniband/core/fmr_pool.c
29166 --- linux-2.6.32.41/drivers/infiniband/core/fmr_pool.c 2011-03-27 14:31:47.000000000 -0400
29167 +++ linux-2.6.32.41/drivers/infiniband/core/fmr_pool.c 2011-05-04 17:56:28.000000000 -0400
29168 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
29169
29170 struct task_struct *thread;
29171
29172 - atomic_t req_ser;
29173 - atomic_t flush_ser;
29174 + atomic_unchecked_t req_ser;
29175 + atomic_unchecked_t flush_ser;
29176
29177 wait_queue_head_t force_wait;
29178 };
29179 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
29180 struct ib_fmr_pool *pool = pool_ptr;
29181
29182 do {
29183 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
29184 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
29185 ib_fmr_batch_release(pool);
29186
29187 - atomic_inc(&pool->flush_ser);
29188 + atomic_inc_unchecked(&pool->flush_ser);
29189 wake_up_interruptible(&pool->force_wait);
29190
29191 if (pool->flush_function)
29192 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
29193 }
29194
29195 set_current_state(TASK_INTERRUPTIBLE);
29196 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
29197 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
29198 !kthread_should_stop())
29199 schedule();
29200 __set_current_state(TASK_RUNNING);
29201 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
29202 pool->dirty_watermark = params->dirty_watermark;
29203 pool->dirty_len = 0;
29204 spin_lock_init(&pool->pool_lock);
29205 - atomic_set(&pool->req_ser, 0);
29206 - atomic_set(&pool->flush_ser, 0);
29207 + atomic_set_unchecked(&pool->req_ser, 0);
29208 + atomic_set_unchecked(&pool->flush_ser, 0);
29209 init_waitqueue_head(&pool->force_wait);
29210
29211 pool->thread = kthread_run(ib_fmr_cleanup_thread,
29212 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
29213 }
29214 spin_unlock_irq(&pool->pool_lock);
29215
29216 - serial = atomic_inc_return(&pool->req_ser);
29217 + serial = atomic_inc_return_unchecked(&pool->req_ser);
29218 wake_up_process(pool->thread);
29219
29220 if (wait_event_interruptible(pool->force_wait,
29221 - atomic_read(&pool->flush_ser) - serial >= 0))
29222 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
29223 return -EINTR;
29224
29225 return 0;
29226 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
29227 } else {
29228 list_add_tail(&fmr->list, &pool->dirty_list);
29229 if (++pool->dirty_len >= pool->dirty_watermark) {
29230 - atomic_inc(&pool->req_ser);
29231 + atomic_inc_unchecked(&pool->req_ser);
29232 wake_up_process(pool->thread);
29233 }
29234 }
29235 diff -urNp linux-2.6.32.41/drivers/infiniband/core/sysfs.c linux-2.6.32.41/drivers/infiniband/core/sysfs.c
29236 --- linux-2.6.32.41/drivers/infiniband/core/sysfs.c 2011-03-27 14:31:47.000000000 -0400
29237 +++ linux-2.6.32.41/drivers/infiniband/core/sysfs.c 2011-04-17 15:56:46.000000000 -0400
29238 @@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kob
29239 return port_attr->show(p, port_attr, buf);
29240 }
29241
29242 -static struct sysfs_ops port_sysfs_ops = {
29243 +static const struct sysfs_ops port_sysfs_ops = {
29244 .show = port_attr_show
29245 };
29246
29247 diff -urNp linux-2.6.32.41/drivers/infiniband/core/uverbs_marshall.c linux-2.6.32.41/drivers/infiniband/core/uverbs_marshall.c
29248 --- linux-2.6.32.41/drivers/infiniband/core/uverbs_marshall.c 2011-03-27 14:31:47.000000000 -0400
29249 +++ linux-2.6.32.41/drivers/infiniband/core/uverbs_marshall.c 2011-04-17 15:56:46.000000000 -0400
29250 @@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_u
29251 dst->grh.sgid_index = src->grh.sgid_index;
29252 dst->grh.hop_limit = src->grh.hop_limit;
29253 dst->grh.traffic_class = src->grh.traffic_class;
29254 + memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
29255 dst->dlid = src->dlid;
29256 dst->sl = src->sl;
29257 dst->src_path_bits = src->src_path_bits;
29258 dst->static_rate = src->static_rate;
29259 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
29260 dst->port_num = src->port_num;
29261 + dst->reserved = 0;
29262 }
29263 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
29264
29265 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
29266 struct ib_qp_attr *src)
29267 {
29268 + dst->qp_state = src->qp_state;
29269 dst->cur_qp_state = src->cur_qp_state;
29270 dst->path_mtu = src->path_mtu;
29271 dst->path_mig_state = src->path_mig_state;
29272 @@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_u
29273 dst->rnr_retry = src->rnr_retry;
29274 dst->alt_port_num = src->alt_port_num;
29275 dst->alt_timeout = src->alt_timeout;
29276 + memset(dst->reserved, 0, sizeof(dst->reserved));
29277 }
29278 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
29279
29280 diff -urNp linux-2.6.32.41/drivers/infiniband/hw/ipath/ipath_fs.c linux-2.6.32.41/drivers/infiniband/hw/ipath/ipath_fs.c
29281 --- linux-2.6.32.41/drivers/infiniband/hw/ipath/ipath_fs.c 2011-03-27 14:31:47.000000000 -0400
29282 +++ linux-2.6.32.41/drivers/infiniband/hw/ipath/ipath_fs.c 2011-05-16 21:46:57.000000000 -0400
29283 @@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(stru
29284 struct infinipath_counters counters;
29285 struct ipath_devdata *dd;
29286
29287 + pax_track_stack();
29288 +
29289 dd = file->f_path.dentry->d_inode->i_private;
29290 dd->ipath_f_read_counters(dd, &counters);
29291
29292 diff -urNp linux-2.6.32.41/drivers/infiniband/hw/nes/nes.c linux-2.6.32.41/drivers/infiniband/hw/nes/nes.c
29293 --- linux-2.6.32.41/drivers/infiniband/hw/nes/nes.c 2011-03-27 14:31:47.000000000 -0400
29294 +++ linux-2.6.32.41/drivers/infiniband/hw/nes/nes.c 2011-05-04 17:56:28.000000000 -0400
29295 @@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
29296 LIST_HEAD(nes_adapter_list);
29297 static LIST_HEAD(nes_dev_list);
29298
29299 -atomic_t qps_destroyed;
29300 +atomic_unchecked_t qps_destroyed;
29301
29302 static unsigned int ee_flsh_adapter;
29303 static unsigned int sysfs_nonidx_addr;
29304 @@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(str
29305 struct nes_adapter *nesadapter = nesdev->nesadapter;
29306 u32 qp_id;
29307
29308 - atomic_inc(&qps_destroyed);
29309 + atomic_inc_unchecked(&qps_destroyed);
29310
29311 /* Free the control structures */
29312
29313 diff -urNp linux-2.6.32.41/drivers/infiniband/hw/nes/nes_cm.c linux-2.6.32.41/drivers/infiniband/hw/nes/nes_cm.c
29314 --- linux-2.6.32.41/drivers/infiniband/hw/nes/nes_cm.c 2011-03-27 14:31:47.000000000 -0400
29315 +++ linux-2.6.32.41/drivers/infiniband/hw/nes/nes_cm.c 2011-05-04 17:56:28.000000000 -0400
29316 @@ -69,11 +69,11 @@ u32 cm_packets_received;
29317 u32 cm_listens_created;
29318 u32 cm_listens_destroyed;
29319 u32 cm_backlog_drops;
29320 -atomic_t cm_loopbacks;
29321 -atomic_t cm_nodes_created;
29322 -atomic_t cm_nodes_destroyed;
29323 -atomic_t cm_accel_dropped_pkts;
29324 -atomic_t cm_resets_recvd;
29325 +atomic_unchecked_t cm_loopbacks;
29326 +atomic_unchecked_t cm_nodes_created;
29327 +atomic_unchecked_t cm_nodes_destroyed;
29328 +atomic_unchecked_t cm_accel_dropped_pkts;
29329 +atomic_unchecked_t cm_resets_recvd;
29330
29331 static inline int mini_cm_accelerated(struct nes_cm_core *,
29332 struct nes_cm_node *);
29333 @@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
29334
29335 static struct nes_cm_core *g_cm_core;
29336
29337 -atomic_t cm_connects;
29338 -atomic_t cm_accepts;
29339 -atomic_t cm_disconnects;
29340 -atomic_t cm_closes;
29341 -atomic_t cm_connecteds;
29342 -atomic_t cm_connect_reqs;
29343 -atomic_t cm_rejects;
29344 +atomic_unchecked_t cm_connects;
29345 +atomic_unchecked_t cm_accepts;
29346 +atomic_unchecked_t cm_disconnects;
29347 +atomic_unchecked_t cm_closes;
29348 +atomic_unchecked_t cm_connecteds;
29349 +atomic_unchecked_t cm_connect_reqs;
29350 +atomic_unchecked_t cm_rejects;
29351
29352
29353 /**
29354 @@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(
29355 cm_node->rem_mac);
29356
29357 add_hte_node(cm_core, cm_node);
29358 - atomic_inc(&cm_nodes_created);
29359 + atomic_inc_unchecked(&cm_nodes_created);
29360
29361 return cm_node;
29362 }
29363 @@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm
29364 }
29365
29366 atomic_dec(&cm_core->node_cnt);
29367 - atomic_inc(&cm_nodes_destroyed);
29368 + atomic_inc_unchecked(&cm_nodes_destroyed);
29369 nesqp = cm_node->nesqp;
29370 if (nesqp) {
29371 nesqp->cm_node = NULL;
29372 @@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm
29373
29374 static void drop_packet(struct sk_buff *skb)
29375 {
29376 - atomic_inc(&cm_accel_dropped_pkts);
29377 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
29378 dev_kfree_skb_any(skb);
29379 }
29380
29381 @@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm
29382
29383 int reset = 0; /* whether to send reset in case of err.. */
29384 int passive_state;
29385 - atomic_inc(&cm_resets_recvd);
29386 + atomic_inc_unchecked(&cm_resets_recvd);
29387 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
29388 " refcnt=%d\n", cm_node, cm_node->state,
29389 atomic_read(&cm_node->ref_count));
29390 @@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_conne
29391 rem_ref_cm_node(cm_node->cm_core, cm_node);
29392 return NULL;
29393 }
29394 - atomic_inc(&cm_loopbacks);
29395 + atomic_inc_unchecked(&cm_loopbacks);
29396 loopbackremotenode->loopbackpartner = cm_node;
29397 loopbackremotenode->tcp_cntxt.rcv_wscale =
29398 NES_CM_DEFAULT_RCV_WND_SCALE;
29399 @@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_c
29400 add_ref_cm_node(cm_node);
29401 } else if (cm_node->state == NES_CM_STATE_TSA) {
29402 rem_ref_cm_node(cm_core, cm_node);
29403 - atomic_inc(&cm_accel_dropped_pkts);
29404 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
29405 dev_kfree_skb_any(skb);
29406 break;
29407 }
29408 @@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct ne
29409
29410 if ((cm_id) && (cm_id->event_handler)) {
29411 if (issue_disconn) {
29412 - atomic_inc(&cm_disconnects);
29413 + atomic_inc_unchecked(&cm_disconnects);
29414 cm_event.event = IW_CM_EVENT_DISCONNECT;
29415 cm_event.status = disconn_status;
29416 cm_event.local_addr = cm_id->local_addr;
29417 @@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct ne
29418 }
29419
29420 if (issue_close) {
29421 - atomic_inc(&cm_closes);
29422 + atomic_inc_unchecked(&cm_closes);
29423 nes_disconnect(nesqp, 1);
29424
29425 cm_id->provider_data = nesqp;
29426 @@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
29427
29428 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
29429 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
29430 - atomic_inc(&cm_accepts);
29431 + atomic_inc_unchecked(&cm_accepts);
29432
29433 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
29434 atomic_read(&nesvnic->netdev->refcnt));
29435 @@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
29436
29437 struct nes_cm_core *cm_core;
29438
29439 - atomic_inc(&cm_rejects);
29440 + atomic_inc_unchecked(&cm_rejects);
29441 cm_node = (struct nes_cm_node *) cm_id->provider_data;
29442 loopback = cm_node->loopbackpartner;
29443 cm_core = cm_node->cm_core;
29444 @@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id,
29445 ntohl(cm_id->local_addr.sin_addr.s_addr),
29446 ntohs(cm_id->local_addr.sin_port));
29447
29448 - atomic_inc(&cm_connects);
29449 + atomic_inc_unchecked(&cm_connects);
29450 nesqp->active_conn = 1;
29451
29452 /* cache the cm_id in the qp */
29453 @@ -3195,7 +3195,7 @@ static void cm_event_connected(struct ne
29454 if (nesqp->destroyed) {
29455 return;
29456 }
29457 - atomic_inc(&cm_connecteds);
29458 + atomic_inc_unchecked(&cm_connecteds);
29459 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
29460 " local port 0x%04X. jiffies = %lu.\n",
29461 nesqp->hwqp.qp_id,
29462 @@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm
29463
29464 ret = cm_id->event_handler(cm_id, &cm_event);
29465 cm_id->add_ref(cm_id);
29466 - atomic_inc(&cm_closes);
29467 + atomic_inc_unchecked(&cm_closes);
29468 cm_event.event = IW_CM_EVENT_CLOSE;
29469 cm_event.status = IW_CM_EVENT_STATUS_OK;
29470 cm_event.provider_data = cm_id->provider_data;
29471 @@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_
29472 return;
29473 cm_id = cm_node->cm_id;
29474
29475 - atomic_inc(&cm_connect_reqs);
29476 + atomic_inc_unchecked(&cm_connect_reqs);
29477 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
29478 cm_node, cm_id, jiffies);
29479
29480 @@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct n
29481 return;
29482 cm_id = cm_node->cm_id;
29483
29484 - atomic_inc(&cm_connect_reqs);
29485 + atomic_inc_unchecked(&cm_connect_reqs);
29486 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
29487 cm_node, cm_id, jiffies);
29488
29489 diff -urNp linux-2.6.32.41/drivers/infiniband/hw/nes/nes.h linux-2.6.32.41/drivers/infiniband/hw/nes/nes.h
29490 --- linux-2.6.32.41/drivers/infiniband/hw/nes/nes.h 2011-03-27 14:31:47.000000000 -0400
29491 +++ linux-2.6.32.41/drivers/infiniband/hw/nes/nes.h 2011-05-04 17:56:28.000000000 -0400
29492 @@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
29493 extern unsigned int wqm_quanta;
29494 extern struct list_head nes_adapter_list;
29495
29496 -extern atomic_t cm_connects;
29497 -extern atomic_t cm_accepts;
29498 -extern atomic_t cm_disconnects;
29499 -extern atomic_t cm_closes;
29500 -extern atomic_t cm_connecteds;
29501 -extern atomic_t cm_connect_reqs;
29502 -extern atomic_t cm_rejects;
29503 -extern atomic_t mod_qp_timouts;
29504 -extern atomic_t qps_created;
29505 -extern atomic_t qps_destroyed;
29506 -extern atomic_t sw_qps_destroyed;
29507 +extern atomic_unchecked_t cm_connects;
29508 +extern atomic_unchecked_t cm_accepts;
29509 +extern atomic_unchecked_t cm_disconnects;
29510 +extern atomic_unchecked_t cm_closes;
29511 +extern atomic_unchecked_t cm_connecteds;
29512 +extern atomic_unchecked_t cm_connect_reqs;
29513 +extern atomic_unchecked_t cm_rejects;
29514 +extern atomic_unchecked_t mod_qp_timouts;
29515 +extern atomic_unchecked_t qps_created;
29516 +extern atomic_unchecked_t qps_destroyed;
29517 +extern atomic_unchecked_t sw_qps_destroyed;
29518 extern u32 mh_detected;
29519 extern u32 mh_pauses_sent;
29520 extern u32 cm_packets_sent;
29521 @@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
29522 extern u32 cm_listens_created;
29523 extern u32 cm_listens_destroyed;
29524 extern u32 cm_backlog_drops;
29525 -extern atomic_t cm_loopbacks;
29526 -extern atomic_t cm_nodes_created;
29527 -extern atomic_t cm_nodes_destroyed;
29528 -extern atomic_t cm_accel_dropped_pkts;
29529 -extern atomic_t cm_resets_recvd;
29530 +extern atomic_unchecked_t cm_loopbacks;
29531 +extern atomic_unchecked_t cm_nodes_created;
29532 +extern atomic_unchecked_t cm_nodes_destroyed;
29533 +extern atomic_unchecked_t cm_accel_dropped_pkts;
29534 +extern atomic_unchecked_t cm_resets_recvd;
29535
29536 extern u32 int_mod_timer_init;
29537 extern u32 int_mod_cq_depth_256;
29538 diff -urNp linux-2.6.32.41/drivers/infiniband/hw/nes/nes_nic.c linux-2.6.32.41/drivers/infiniband/hw/nes/nes_nic.c
29539 --- linux-2.6.32.41/drivers/infiniband/hw/nes/nes_nic.c 2011-03-27 14:31:47.000000000 -0400
29540 +++ linux-2.6.32.41/drivers/infiniband/hw/nes/nes_nic.c 2011-05-04 17:56:28.000000000 -0400
29541 @@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats
29542 target_stat_values[++index] = mh_detected;
29543 target_stat_values[++index] = mh_pauses_sent;
29544 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
29545 - target_stat_values[++index] = atomic_read(&cm_connects);
29546 - target_stat_values[++index] = atomic_read(&cm_accepts);
29547 - target_stat_values[++index] = atomic_read(&cm_disconnects);
29548 - target_stat_values[++index] = atomic_read(&cm_connecteds);
29549 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
29550 - target_stat_values[++index] = atomic_read(&cm_rejects);
29551 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
29552 - target_stat_values[++index] = atomic_read(&qps_created);
29553 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
29554 - target_stat_values[++index] = atomic_read(&qps_destroyed);
29555 - target_stat_values[++index] = atomic_read(&cm_closes);
29556 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
29557 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
29558 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
29559 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
29560 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
29561 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
29562 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
29563 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
29564 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
29565 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
29566 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
29567 target_stat_values[++index] = cm_packets_sent;
29568 target_stat_values[++index] = cm_packets_bounced;
29569 target_stat_values[++index] = cm_packets_created;
29570 @@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats
29571 target_stat_values[++index] = cm_listens_created;
29572 target_stat_values[++index] = cm_listens_destroyed;
29573 target_stat_values[++index] = cm_backlog_drops;
29574 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
29575 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
29576 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
29577 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
29578 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
29579 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
29580 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
29581 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
29582 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
29583 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
29584 target_stat_values[++index] = int_mod_timer_init;
29585 target_stat_values[++index] = int_mod_cq_depth_1;
29586 target_stat_values[++index] = int_mod_cq_depth_4;
29587 diff -urNp linux-2.6.32.41/drivers/infiniband/hw/nes/nes_verbs.c linux-2.6.32.41/drivers/infiniband/hw/nes/nes_verbs.c
29588 --- linux-2.6.32.41/drivers/infiniband/hw/nes/nes_verbs.c 2011-03-27 14:31:47.000000000 -0400
29589 +++ linux-2.6.32.41/drivers/infiniband/hw/nes/nes_verbs.c 2011-05-04 17:56:28.000000000 -0400
29590 @@ -45,9 +45,9 @@
29591
29592 #include <rdma/ib_umem.h>
29593
29594 -atomic_t mod_qp_timouts;
29595 -atomic_t qps_created;
29596 -atomic_t sw_qps_destroyed;
29597 +atomic_unchecked_t mod_qp_timouts;
29598 +atomic_unchecked_t qps_created;
29599 +atomic_unchecked_t sw_qps_destroyed;
29600
29601 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
29602
29603 @@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struc
29604 if (init_attr->create_flags)
29605 return ERR_PTR(-EINVAL);
29606
29607 - atomic_inc(&qps_created);
29608 + atomic_inc_unchecked(&qps_created);
29609 switch (init_attr->qp_type) {
29610 case IB_QPT_RC:
29611 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
29612 @@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *
29613 struct iw_cm_event cm_event;
29614 int ret;
29615
29616 - atomic_inc(&sw_qps_destroyed);
29617 + atomic_inc_unchecked(&sw_qps_destroyed);
29618 nesqp->destroyed = 1;
29619
29620 /* Blow away the connection if it exists. */
29621 diff -urNp linux-2.6.32.41/drivers/input/gameport/gameport.c linux-2.6.32.41/drivers/input/gameport/gameport.c
29622 --- linux-2.6.32.41/drivers/input/gameport/gameport.c 2011-03-27 14:31:47.000000000 -0400
29623 +++ linux-2.6.32.41/drivers/input/gameport/gameport.c 2011-05-04 17:56:28.000000000 -0400
29624 @@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
29625 */
29626 static void gameport_init_port(struct gameport *gameport)
29627 {
29628 - static atomic_t gameport_no = ATOMIC_INIT(0);
29629 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
29630
29631 __module_get(THIS_MODULE);
29632
29633 mutex_init(&gameport->drv_mutex);
29634 device_initialize(&gameport->dev);
29635 - dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
29636 + dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
29637 gameport->dev.bus = &gameport_bus;
29638 gameport->dev.release = gameport_release_port;
29639 if (gameport->parent)
29640 diff -urNp linux-2.6.32.41/drivers/input/input.c linux-2.6.32.41/drivers/input/input.c
29641 --- linux-2.6.32.41/drivers/input/input.c 2011-03-27 14:31:47.000000000 -0400
29642 +++ linux-2.6.32.41/drivers/input/input.c 2011-05-04 17:56:28.000000000 -0400
29643 @@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
29644 */
29645 int input_register_device(struct input_dev *dev)
29646 {
29647 - static atomic_t input_no = ATOMIC_INIT(0);
29648 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
29649 struct input_handler *handler;
29650 const char *path;
29651 int error;
29652 @@ -1585,7 +1585,7 @@ int input_register_device(struct input_d
29653 dev->setkeycode = input_default_setkeycode;
29654
29655 dev_set_name(&dev->dev, "input%ld",
29656 - (unsigned long) atomic_inc_return(&input_no) - 1);
29657 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
29658
29659 error = device_add(&dev->dev);
29660 if (error)
29661 diff -urNp linux-2.6.32.41/drivers/input/joystick/sidewinder.c linux-2.6.32.41/drivers/input/joystick/sidewinder.c
29662 --- linux-2.6.32.41/drivers/input/joystick/sidewinder.c 2011-03-27 14:31:47.000000000 -0400
29663 +++ linux-2.6.32.41/drivers/input/joystick/sidewinder.c 2011-05-18 20:09:36.000000000 -0400
29664 @@ -30,6 +30,7 @@
29665 #include <linux/kernel.h>
29666 #include <linux/module.h>
29667 #include <linux/slab.h>
29668 +#include <linux/sched.h>
29669 #include <linux/init.h>
29670 #include <linux/input.h>
29671 #include <linux/gameport.h>
29672 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
29673 unsigned char buf[SW_LENGTH];
29674 int i;
29675
29676 + pax_track_stack();
29677 +
29678 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
29679
29680 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
29681 diff -urNp linux-2.6.32.41/drivers/input/joystick/xpad.c linux-2.6.32.41/drivers/input/joystick/xpad.c
29682 --- linux-2.6.32.41/drivers/input/joystick/xpad.c 2011-03-27 14:31:47.000000000 -0400
29683 +++ linux-2.6.32.41/drivers/input/joystick/xpad.c 2011-05-04 17:56:28.000000000 -0400
29684 @@ -621,7 +621,7 @@ static void xpad_led_set(struct led_clas
29685
29686 static int xpad_led_probe(struct usb_xpad *xpad)
29687 {
29688 - static atomic_t led_seq = ATOMIC_INIT(0);
29689 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
29690 long led_no;
29691 struct xpad_led *led;
29692 struct led_classdev *led_cdev;
29693 @@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpa
29694 if (!led)
29695 return -ENOMEM;
29696
29697 - led_no = (long)atomic_inc_return(&led_seq) - 1;
29698 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
29699
29700 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
29701 led->xpad = xpad;
29702 diff -urNp linux-2.6.32.41/drivers/input/serio/serio.c linux-2.6.32.41/drivers/input/serio/serio.c
29703 --- linux-2.6.32.41/drivers/input/serio/serio.c 2011-03-27 14:31:47.000000000 -0400
29704 +++ linux-2.6.32.41/drivers/input/serio/serio.c 2011-05-04 17:56:28.000000000 -0400
29705 @@ -527,7 +527,7 @@ static void serio_release_port(struct de
29706 */
29707 static void serio_init_port(struct serio *serio)
29708 {
29709 - static atomic_t serio_no = ATOMIC_INIT(0);
29710 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
29711
29712 __module_get(THIS_MODULE);
29713
29714 @@ -536,7 +536,7 @@ static void serio_init_port(struct serio
29715 mutex_init(&serio->drv_mutex);
29716 device_initialize(&serio->dev);
29717 dev_set_name(&serio->dev, "serio%ld",
29718 - (long)atomic_inc_return(&serio_no) - 1);
29719 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
29720 serio->dev.bus = &serio_bus;
29721 serio->dev.release = serio_release_port;
29722 if (serio->parent) {
29723 diff -urNp linux-2.6.32.41/drivers/isdn/gigaset/common.c linux-2.6.32.41/drivers/isdn/gigaset/common.c
29724 --- linux-2.6.32.41/drivers/isdn/gigaset/common.c 2011-03-27 14:31:47.000000000 -0400
29725 +++ linux-2.6.32.41/drivers/isdn/gigaset/common.c 2011-04-17 15:56:46.000000000 -0400
29726 @@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct
29727 cs->commands_pending = 0;
29728 cs->cur_at_seq = 0;
29729 cs->gotfwver = -1;
29730 - cs->open_count = 0;
29731 + local_set(&cs->open_count, 0);
29732 cs->dev = NULL;
29733 cs->tty = NULL;
29734 cs->tty_dev = NULL;
29735 diff -urNp linux-2.6.32.41/drivers/isdn/gigaset/gigaset.h linux-2.6.32.41/drivers/isdn/gigaset/gigaset.h
29736 --- linux-2.6.32.41/drivers/isdn/gigaset/gigaset.h 2011-03-27 14:31:47.000000000 -0400
29737 +++ linux-2.6.32.41/drivers/isdn/gigaset/gigaset.h 2011-04-17 15:56:46.000000000 -0400
29738 @@ -34,6 +34,7 @@
29739 #include <linux/tty_driver.h>
29740 #include <linux/list.h>
29741 #include <asm/atomic.h>
29742 +#include <asm/local.h>
29743
29744 #define GIG_VERSION {0,5,0,0}
29745 #define GIG_COMPAT {0,4,0,0}
29746 @@ -446,7 +447,7 @@ struct cardstate {
29747 spinlock_t cmdlock;
29748 unsigned curlen, cmdbytes;
29749
29750 - unsigned open_count;
29751 + local_t open_count;
29752 struct tty_struct *tty;
29753 struct tasklet_struct if_wake_tasklet;
29754 unsigned control_state;
29755 diff -urNp linux-2.6.32.41/drivers/isdn/gigaset/interface.c linux-2.6.32.41/drivers/isdn/gigaset/interface.c
29756 --- linux-2.6.32.41/drivers/isdn/gigaset/interface.c 2011-03-27 14:31:47.000000000 -0400
29757 +++ linux-2.6.32.41/drivers/isdn/gigaset/interface.c 2011-04-17 15:56:46.000000000 -0400
29758 @@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tt
29759 return -ERESTARTSYS; // FIXME -EINTR?
29760 tty->driver_data = cs;
29761
29762 - ++cs->open_count;
29763 -
29764 - if (cs->open_count == 1) {
29765 + if (local_inc_return(&cs->open_count) == 1) {
29766 spin_lock_irqsave(&cs->lock, flags);
29767 cs->tty = tty;
29768 spin_unlock_irqrestore(&cs->lock, flags);
29769 @@ -195,10 +193,10 @@ static void if_close(struct tty_struct *
29770
29771 if (!cs->connected)
29772 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
29773 - else if (!cs->open_count)
29774 + else if (!local_read(&cs->open_count))
29775 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29776 else {
29777 - if (!--cs->open_count) {
29778 + if (!local_dec_return(&cs->open_count)) {
29779 spin_lock_irqsave(&cs->lock, flags);
29780 cs->tty = NULL;
29781 spin_unlock_irqrestore(&cs->lock, flags);
29782 @@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *t
29783 if (!cs->connected) {
29784 gig_dbg(DEBUG_IF, "not connected");
29785 retval = -ENODEV;
29786 - } else if (!cs->open_count)
29787 + } else if (!local_read(&cs->open_count))
29788 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29789 else {
29790 retval = 0;
29791 @@ -361,7 +359,7 @@ static int if_write(struct tty_struct *t
29792 if (!cs->connected) {
29793 gig_dbg(DEBUG_IF, "not connected");
29794 retval = -ENODEV;
29795 - } else if (!cs->open_count)
29796 + } else if (!local_read(&cs->open_count))
29797 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29798 else if (cs->mstate != MS_LOCKED) {
29799 dev_warn(cs->dev, "can't write to unlocked device\n");
29800 @@ -395,7 +393,7 @@ static int if_write_room(struct tty_stru
29801 if (!cs->connected) {
29802 gig_dbg(DEBUG_IF, "not connected");
29803 retval = -ENODEV;
29804 - } else if (!cs->open_count)
29805 + } else if (!local_read(&cs->open_count))
29806 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29807 else if (cs->mstate != MS_LOCKED) {
29808 dev_warn(cs->dev, "can't write to unlocked device\n");
29809 @@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty
29810
29811 if (!cs->connected)
29812 gig_dbg(DEBUG_IF, "not connected");
29813 - else if (!cs->open_count)
29814 + else if (!local_read(&cs->open_count))
29815 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29816 else if (cs->mstate != MS_LOCKED)
29817 dev_warn(cs->dev, "can't write to unlocked device\n");
29818 @@ -453,7 +451,7 @@ static void if_throttle(struct tty_struc
29819
29820 if (!cs->connected)
29821 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
29822 - else if (!cs->open_count)
29823 + else if (!local_read(&cs->open_count))
29824 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29825 else {
29826 //FIXME
29827 @@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_str
29828
29829 if (!cs->connected)
29830 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
29831 - else if (!cs->open_count)
29832 + else if (!local_read(&cs->open_count))
29833 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29834 else {
29835 //FIXME
29836 @@ -510,7 +508,7 @@ static void if_set_termios(struct tty_st
29837 goto out;
29838 }
29839
29840 - if (!cs->open_count) {
29841 + if (!local_read(&cs->open_count)) {
29842 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29843 goto out;
29844 }
29845 diff -urNp linux-2.6.32.41/drivers/isdn/hardware/avm/b1.c linux-2.6.32.41/drivers/isdn/hardware/avm/b1.c
29846 --- linux-2.6.32.41/drivers/isdn/hardware/avm/b1.c 2011-03-27 14:31:47.000000000 -0400
29847 +++ linux-2.6.32.41/drivers/isdn/hardware/avm/b1.c 2011-04-17 15:56:46.000000000 -0400
29848 @@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capilo
29849 }
29850 if (left) {
29851 if (t4file->user) {
29852 - if (copy_from_user(buf, dp, left))
29853 + if (left > sizeof buf || copy_from_user(buf, dp, left))
29854 return -EFAULT;
29855 } else {
29856 memcpy(buf, dp, left);
29857 @@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capilo
29858 }
29859 if (left) {
29860 if (config->user) {
29861 - if (copy_from_user(buf, dp, left))
29862 + if (left > sizeof buf || copy_from_user(buf, dp, left))
29863 return -EFAULT;
29864 } else {
29865 memcpy(buf, dp, left);
29866 diff -urNp linux-2.6.32.41/drivers/isdn/hardware/eicon/capidtmf.c linux-2.6.32.41/drivers/isdn/hardware/eicon/capidtmf.c
29867 --- linux-2.6.32.41/drivers/isdn/hardware/eicon/capidtmf.c 2011-03-27 14:31:47.000000000 -0400
29868 +++ linux-2.6.32.41/drivers/isdn/hardware/eicon/capidtmf.c 2011-05-16 21:46:57.000000000 -0400
29869 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
29870 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
29871 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
29872
29873 + pax_track_stack();
29874
29875 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
29876 {
29877 diff -urNp linux-2.6.32.41/drivers/isdn/hardware/eicon/capifunc.c linux-2.6.32.41/drivers/isdn/hardware/eicon/capifunc.c
29878 --- linux-2.6.32.41/drivers/isdn/hardware/eicon/capifunc.c 2011-03-27 14:31:47.000000000 -0400
29879 +++ linux-2.6.32.41/drivers/isdn/hardware/eicon/capifunc.c 2011-05-16 21:46:57.000000000 -0400
29880 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
29881 IDI_SYNC_REQ req;
29882 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29883
29884 + pax_track_stack();
29885 +
29886 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29887
29888 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29889 diff -urNp linux-2.6.32.41/drivers/isdn/hardware/eicon/diddfunc.c linux-2.6.32.41/drivers/isdn/hardware/eicon/diddfunc.c
29890 --- linux-2.6.32.41/drivers/isdn/hardware/eicon/diddfunc.c 2011-03-27 14:31:47.000000000 -0400
29891 +++ linux-2.6.32.41/drivers/isdn/hardware/eicon/diddfunc.c 2011-05-16 21:46:57.000000000 -0400
29892 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
29893 IDI_SYNC_REQ req;
29894 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29895
29896 + pax_track_stack();
29897 +
29898 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29899
29900 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29901 diff -urNp linux-2.6.32.41/drivers/isdn/hardware/eicon/divasfunc.c linux-2.6.32.41/drivers/isdn/hardware/eicon/divasfunc.c
29902 --- linux-2.6.32.41/drivers/isdn/hardware/eicon/divasfunc.c 2011-03-27 14:31:47.000000000 -0400
29903 +++ linux-2.6.32.41/drivers/isdn/hardware/eicon/divasfunc.c 2011-05-16 21:46:57.000000000 -0400
29904 @@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_di
29905 IDI_SYNC_REQ req;
29906 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29907
29908 + pax_track_stack();
29909 +
29910 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29911
29912 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29913 diff -urNp linux-2.6.32.41/drivers/isdn/hardware/eicon/idifunc.c linux-2.6.32.41/drivers/isdn/hardware/eicon/idifunc.c
29914 --- linux-2.6.32.41/drivers/isdn/hardware/eicon/idifunc.c 2011-03-27 14:31:47.000000000 -0400
29915 +++ linux-2.6.32.41/drivers/isdn/hardware/eicon/idifunc.c 2011-05-16 21:46:57.000000000 -0400
29916 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
29917 IDI_SYNC_REQ req;
29918 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29919
29920 + pax_track_stack();
29921 +
29922 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29923
29924 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29925 diff -urNp linux-2.6.32.41/drivers/isdn/hardware/eicon/message.c linux-2.6.32.41/drivers/isdn/hardware/eicon/message.c
29926 --- linux-2.6.32.41/drivers/isdn/hardware/eicon/message.c 2011-03-27 14:31:47.000000000 -0400
29927 +++ linux-2.6.32.41/drivers/isdn/hardware/eicon/message.c 2011-05-16 21:46:57.000000000 -0400
29928 @@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
29929 dword d;
29930 word w;
29931
29932 + pax_track_stack();
29933 +
29934 a = plci->adapter;
29935 Id = ((word)plci->Id<<8)|a->Id;
29936 PUT_WORD(&SS_Ind[4],0x0000);
29937 @@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE
29938 word j, n, w;
29939 dword d;
29940
29941 + pax_track_stack();
29942 +
29943
29944 for(i=0;i<8;i++) bp_parms[i].length = 0;
29945 for(i=0;i<2;i++) global_config[i].length = 0;
29946 @@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARS
29947 const byte llc3[] = {4,3,2,2,6,6,0};
29948 const byte header[] = {0,2,3,3,0,0,0};
29949
29950 + pax_track_stack();
29951 +
29952 for(i=0;i<8;i++) bp_parms[i].length = 0;
29953 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
29954 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
29955 @@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI
29956 word appl_number_group_type[MAX_APPL];
29957 PLCI *auxplci;
29958
29959 + pax_track_stack();
29960 +
29961 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
29962
29963 if(!a->group_optimization_enabled)
29964 diff -urNp linux-2.6.32.41/drivers/isdn/hardware/eicon/mntfunc.c linux-2.6.32.41/drivers/isdn/hardware/eicon/mntfunc.c
29965 --- linux-2.6.32.41/drivers/isdn/hardware/eicon/mntfunc.c 2011-03-27 14:31:47.000000000 -0400
29966 +++ linux-2.6.32.41/drivers/isdn/hardware/eicon/mntfunc.c 2011-05-16 21:46:57.000000000 -0400
29967 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
29968 IDI_SYNC_REQ req;
29969 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29970
29971 + pax_track_stack();
29972 +
29973 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29974
29975 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29976 diff -urNp linux-2.6.32.41/drivers/isdn/i4l/isdn_common.c linux-2.6.32.41/drivers/isdn/i4l/isdn_common.c
29977 --- linux-2.6.32.41/drivers/isdn/i4l/isdn_common.c 2011-03-27 14:31:47.000000000 -0400
29978 +++ linux-2.6.32.41/drivers/isdn/i4l/isdn_common.c 2011-05-16 21:46:57.000000000 -0400
29979 @@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct f
29980 } iocpar;
29981 void __user *argp = (void __user *)arg;
29982
29983 + pax_track_stack();
29984 +
29985 #define name iocpar.name
29986 #define bname iocpar.bname
29987 #define iocts iocpar.iocts
29988 diff -urNp linux-2.6.32.41/drivers/isdn/icn/icn.c linux-2.6.32.41/drivers/isdn/icn/icn.c
29989 --- linux-2.6.32.41/drivers/isdn/icn/icn.c 2011-03-27 14:31:47.000000000 -0400
29990 +++ linux-2.6.32.41/drivers/isdn/icn/icn.c 2011-04-17 15:56:46.000000000 -0400
29991 @@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len
29992 if (count > len)
29993 count = len;
29994 if (user) {
29995 - if (copy_from_user(msg, buf, count))
29996 + if (count > sizeof msg || copy_from_user(msg, buf, count))
29997 return -EFAULT;
29998 } else
29999 memcpy(msg, buf, count);
30000 diff -urNp linux-2.6.32.41/drivers/isdn/mISDN/socket.c linux-2.6.32.41/drivers/isdn/mISDN/socket.c
30001 --- linux-2.6.32.41/drivers/isdn/mISDN/socket.c 2011-03-27 14:31:47.000000000 -0400
30002 +++ linux-2.6.32.41/drivers/isdn/mISDN/socket.c 2011-04-17 15:56:46.000000000 -0400
30003 @@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, uns
30004 if (dev) {
30005 struct mISDN_devinfo di;
30006
30007 + memset(&di, 0, sizeof(di));
30008 di.id = dev->id;
30009 di.Dprotocols = dev->Dprotocols;
30010 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
30011 @@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, uns
30012 if (dev) {
30013 struct mISDN_devinfo di;
30014
30015 + memset(&di, 0, sizeof(di));
30016 di.id = dev->id;
30017 di.Dprotocols = dev->Dprotocols;
30018 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
30019 diff -urNp linux-2.6.32.41/drivers/isdn/sc/interrupt.c linux-2.6.32.41/drivers/isdn/sc/interrupt.c
30020 --- linux-2.6.32.41/drivers/isdn/sc/interrupt.c 2011-03-27 14:31:47.000000000 -0400
30021 +++ linux-2.6.32.41/drivers/isdn/sc/interrupt.c 2011-04-17 15:56:46.000000000 -0400
30022 @@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy,
30023 }
30024 else if(callid>=0x0000 && callid<=0x7FFF)
30025 {
30026 + int len;
30027 +
30028 pr_debug("%s: Got Incoming Call\n",
30029 sc_adapter[card]->devicename);
30030 - strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
30031 - strcpy(setup.eazmsn,
30032 - sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
30033 + len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
30034 + sizeof(setup.phone));
30035 + if (len >= sizeof(setup.phone))
30036 + continue;
30037 + len = strlcpy(setup.eazmsn,
30038 + sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
30039 + sizeof(setup.eazmsn));
30040 + if (len >= sizeof(setup.eazmsn))
30041 + continue;
30042 setup.si1 = 7;
30043 setup.si2 = 0;
30044 setup.plan = 0;
30045 @@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy,
30046 * Handle a GetMyNumber Rsp
30047 */
30048 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
30049 - strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
30050 + strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
30051 + rcvmsg.msg_data.byte_array,
30052 + sizeof(rcvmsg.msg_data.byte_array));
30053 continue;
30054 }
30055
30056 diff -urNp linux-2.6.32.41/drivers/lguest/core.c linux-2.6.32.41/drivers/lguest/core.c
30057 --- linux-2.6.32.41/drivers/lguest/core.c 2011-03-27 14:31:47.000000000 -0400
30058 +++ linux-2.6.32.41/drivers/lguest/core.c 2011-04-17 15:56:46.000000000 -0400
30059 @@ -91,9 +91,17 @@ static __init int map_switcher(void)
30060 * it's worked so far. The end address needs +1 because __get_vm_area
30061 * allocates an extra guard page, so we need space for that.
30062 */
30063 +
30064 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
30065 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30066 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
30067 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30068 +#else
30069 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30070 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
30071 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30072 +#endif
30073 +
30074 if (!switcher_vma) {
30075 err = -ENOMEM;
30076 printk("lguest: could not map switcher pages high\n");
30077 @@ -118,7 +126,7 @@ static __init int map_switcher(void)
30078 * Now the Switcher is mapped at the right address, we can't fail!
30079 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
30080 */
30081 - memcpy(switcher_vma->addr, start_switcher_text,
30082 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
30083 end_switcher_text - start_switcher_text);
30084
30085 printk(KERN_INFO "lguest: mapped switcher at %p\n",
30086 diff -urNp linux-2.6.32.41/drivers/lguest/x86/core.c linux-2.6.32.41/drivers/lguest/x86/core.c
30087 --- linux-2.6.32.41/drivers/lguest/x86/core.c 2011-03-27 14:31:47.000000000 -0400
30088 +++ linux-2.6.32.41/drivers/lguest/x86/core.c 2011-04-17 15:56:46.000000000 -0400
30089 @@ -59,7 +59,7 @@ static struct {
30090 /* Offset from where switcher.S was compiled to where we've copied it */
30091 static unsigned long switcher_offset(void)
30092 {
30093 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
30094 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
30095 }
30096
30097 /* This cpu's struct lguest_pages. */
30098 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
30099 * These copies are pretty cheap, so we do them unconditionally: */
30100 /* Save the current Host top-level page directory.
30101 */
30102 +
30103 +#ifdef CONFIG_PAX_PER_CPU_PGD
30104 + pages->state.host_cr3 = read_cr3();
30105 +#else
30106 pages->state.host_cr3 = __pa(current->mm->pgd);
30107 +#endif
30108 +
30109 /*
30110 * Set up the Guest's page tables to see this CPU's pages (and no
30111 * other CPU's pages).
30112 @@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
30113 * compiled-in switcher code and the high-mapped copy we just made.
30114 */
30115 for (i = 0; i < IDT_ENTRIES; i++)
30116 - default_idt_entries[i] += switcher_offset();
30117 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
30118
30119 /*
30120 * Set up the Switcher's per-cpu areas.
30121 @@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
30122 * it will be undisturbed when we switch. To change %cs and jump we
30123 * need this structure to feed to Intel's "lcall" instruction.
30124 */
30125 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
30126 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
30127 lguest_entry.segment = LGUEST_CS;
30128
30129 /*
30130 diff -urNp linux-2.6.32.41/drivers/lguest/x86/switcher_32.S linux-2.6.32.41/drivers/lguest/x86/switcher_32.S
30131 --- linux-2.6.32.41/drivers/lguest/x86/switcher_32.S 2011-03-27 14:31:47.000000000 -0400
30132 +++ linux-2.6.32.41/drivers/lguest/x86/switcher_32.S 2011-04-17 15:56:46.000000000 -0400
30133 @@ -87,6 +87,7 @@
30134 #include <asm/page.h>
30135 #include <asm/segment.h>
30136 #include <asm/lguest.h>
30137 +#include <asm/processor-flags.h>
30138
30139 // We mark the start of the code to copy
30140 // It's placed in .text tho it's never run here
30141 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
30142 // Changes type when we load it: damn Intel!
30143 // For after we switch over our page tables
30144 // That entry will be read-only: we'd crash.
30145 +
30146 +#ifdef CONFIG_PAX_KERNEXEC
30147 + mov %cr0, %edx
30148 + xor $X86_CR0_WP, %edx
30149 + mov %edx, %cr0
30150 +#endif
30151 +
30152 movl $(GDT_ENTRY_TSS*8), %edx
30153 ltr %dx
30154
30155 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
30156 // Let's clear it again for our return.
30157 // The GDT descriptor of the Host
30158 // Points to the table after two "size" bytes
30159 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
30160 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
30161 // Clear "used" from type field (byte 5, bit 2)
30162 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
30163 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
30164 +
30165 +#ifdef CONFIG_PAX_KERNEXEC
30166 + mov %cr0, %eax
30167 + xor $X86_CR0_WP, %eax
30168 + mov %eax, %cr0
30169 +#endif
30170
30171 // Once our page table's switched, the Guest is live!
30172 // The Host fades as we run this final step.
30173 @@ -295,13 +309,12 @@ deliver_to_host:
30174 // I consulted gcc, and it gave
30175 // These instructions, which I gladly credit:
30176 leal (%edx,%ebx,8), %eax
30177 - movzwl (%eax),%edx
30178 - movl 4(%eax), %eax
30179 - xorw %ax, %ax
30180 - orl %eax, %edx
30181 + movl 4(%eax), %edx
30182 + movw (%eax), %dx
30183 // Now the address of the handler's in %edx
30184 // We call it now: its "iret" drops us home.
30185 - jmp *%edx
30186 + ljmp $__KERNEL_CS, $1f
30187 +1: jmp *%edx
30188
30189 // Every interrupt can come to us here
30190 // But we must truly tell each apart.
30191 diff -urNp linux-2.6.32.41/drivers/macintosh/via-pmu-backlight.c linux-2.6.32.41/drivers/macintosh/via-pmu-backlight.c
30192 --- linux-2.6.32.41/drivers/macintosh/via-pmu-backlight.c 2011-03-27 14:31:47.000000000 -0400
30193 +++ linux-2.6.32.41/drivers/macintosh/via-pmu-backlight.c 2011-04-17 15:56:46.000000000 -0400
30194 @@ -15,7 +15,7 @@
30195
30196 #define MAX_PMU_LEVEL 0xFF
30197
30198 -static struct backlight_ops pmu_backlight_data;
30199 +static const struct backlight_ops pmu_backlight_data;
30200 static DEFINE_SPINLOCK(pmu_backlight_lock);
30201 static int sleeping, uses_pmu_bl;
30202 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
30203 @@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(
30204 return bd->props.brightness;
30205 }
30206
30207 -static struct backlight_ops pmu_backlight_data = {
30208 +static const struct backlight_ops pmu_backlight_data = {
30209 .get_brightness = pmu_backlight_get_brightness,
30210 .update_status = pmu_backlight_update_status,
30211
30212 diff -urNp linux-2.6.32.41/drivers/macintosh/via-pmu.c linux-2.6.32.41/drivers/macintosh/via-pmu.c
30213 --- linux-2.6.32.41/drivers/macintosh/via-pmu.c 2011-03-27 14:31:47.000000000 -0400
30214 +++ linux-2.6.32.41/drivers/macintosh/via-pmu.c 2011-04-17 15:56:46.000000000 -0400
30215 @@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state
30216 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
30217 }
30218
30219 -static struct platform_suspend_ops pmu_pm_ops = {
30220 +static const struct platform_suspend_ops pmu_pm_ops = {
30221 .enter = powerbook_sleep,
30222 .valid = pmu_sleep_valid,
30223 };
30224 diff -urNp linux-2.6.32.41/drivers/md/dm.c linux-2.6.32.41/drivers/md/dm.c
30225 --- linux-2.6.32.41/drivers/md/dm.c 2011-03-27 14:31:47.000000000 -0400
30226 +++ linux-2.6.32.41/drivers/md/dm.c 2011-05-04 17:56:28.000000000 -0400
30227 @@ -163,9 +163,9 @@ struct mapped_device {
30228 /*
30229 * Event handling.
30230 */
30231 - atomic_t event_nr;
30232 + atomic_unchecked_t event_nr;
30233 wait_queue_head_t eventq;
30234 - atomic_t uevent_seq;
30235 + atomic_unchecked_t uevent_seq;
30236 struct list_head uevent_list;
30237 spinlock_t uevent_lock; /* Protect access to uevent_list */
30238
30239 @@ -1770,8 +1770,8 @@ static struct mapped_device *alloc_dev(i
30240 rwlock_init(&md->map_lock);
30241 atomic_set(&md->holders, 1);
30242 atomic_set(&md->open_count, 0);
30243 - atomic_set(&md->event_nr, 0);
30244 - atomic_set(&md->uevent_seq, 0);
30245 + atomic_set_unchecked(&md->event_nr, 0);
30246 + atomic_set_unchecked(&md->uevent_seq, 0);
30247 INIT_LIST_HEAD(&md->uevent_list);
30248 spin_lock_init(&md->uevent_lock);
30249
30250 @@ -1921,7 +1921,7 @@ static void event_callback(void *context
30251
30252 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
30253
30254 - atomic_inc(&md->event_nr);
30255 + atomic_inc_unchecked(&md->event_nr);
30256 wake_up(&md->eventq);
30257 }
30258
30259 @@ -2556,18 +2556,18 @@ void dm_kobject_uevent(struct mapped_dev
30260
30261 uint32_t dm_next_uevent_seq(struct mapped_device *md)
30262 {
30263 - return atomic_add_return(1, &md->uevent_seq);
30264 + return atomic_add_return_unchecked(1, &md->uevent_seq);
30265 }
30266
30267 uint32_t dm_get_event_nr(struct mapped_device *md)
30268 {
30269 - return atomic_read(&md->event_nr);
30270 + return atomic_read_unchecked(&md->event_nr);
30271 }
30272
30273 int dm_wait_event(struct mapped_device *md, int event_nr)
30274 {
30275 return wait_event_interruptible(md->eventq,
30276 - (event_nr != atomic_read(&md->event_nr)));
30277 + (event_nr != atomic_read_unchecked(&md->event_nr)));
30278 }
30279
30280 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
30281 diff -urNp linux-2.6.32.41/drivers/md/dm-ioctl.c linux-2.6.32.41/drivers/md/dm-ioctl.c
30282 --- linux-2.6.32.41/drivers/md/dm-ioctl.c 2011-03-27 14:31:47.000000000 -0400
30283 +++ linux-2.6.32.41/drivers/md/dm-ioctl.c 2011-04-17 15:56:46.000000000 -0400
30284 @@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, str
30285 cmd == DM_LIST_VERSIONS_CMD)
30286 return 0;
30287
30288 - if ((cmd == DM_DEV_CREATE_CMD)) {
30289 + if (cmd == DM_DEV_CREATE_CMD) {
30290 if (!*param->name) {
30291 DMWARN("name not supplied when creating device");
30292 return -EINVAL;
30293 diff -urNp linux-2.6.32.41/drivers/md/dm-raid1.c linux-2.6.32.41/drivers/md/dm-raid1.c
30294 --- linux-2.6.32.41/drivers/md/dm-raid1.c 2011-03-27 14:31:47.000000000 -0400
30295 +++ linux-2.6.32.41/drivers/md/dm-raid1.c 2011-05-04 17:56:28.000000000 -0400
30296 @@ -41,7 +41,7 @@ enum dm_raid1_error {
30297
30298 struct mirror {
30299 struct mirror_set *ms;
30300 - atomic_t error_count;
30301 + atomic_unchecked_t error_count;
30302 unsigned long error_type;
30303 struct dm_dev *dev;
30304 sector_t offset;
30305 @@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m
30306 * simple way to tell if a device has encountered
30307 * errors.
30308 */
30309 - atomic_inc(&m->error_count);
30310 + atomic_inc_unchecked(&m->error_count);
30311
30312 if (test_and_set_bit(error_type, &m->error_type))
30313 return;
30314 @@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m
30315 }
30316
30317 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
30318 - if (!atomic_read(&new->error_count)) {
30319 + if (!atomic_read_unchecked(&new->error_count)) {
30320 set_default_mirror(new);
30321 break;
30322 }
30323 @@ -363,7 +363,7 @@ static struct mirror *choose_mirror(stru
30324 struct mirror *m = get_default_mirror(ms);
30325
30326 do {
30327 - if (likely(!atomic_read(&m->error_count)))
30328 + if (likely(!atomic_read_unchecked(&m->error_count)))
30329 return m;
30330
30331 if (m-- == ms->mirror)
30332 @@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
30333 {
30334 struct mirror *default_mirror = get_default_mirror(m->ms);
30335
30336 - return !atomic_read(&default_mirror->error_count);
30337 + return !atomic_read_unchecked(&default_mirror->error_count);
30338 }
30339
30340 static int mirror_available(struct mirror_set *ms, struct bio *bio)
30341 @@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *
30342 */
30343 if (likely(region_in_sync(ms, region, 1)))
30344 m = choose_mirror(ms, bio->bi_sector);
30345 - else if (m && atomic_read(&m->error_count))
30346 + else if (m && atomic_read_unchecked(&m->error_count))
30347 m = NULL;
30348
30349 if (likely(m))
30350 @@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set
30351 }
30352
30353 ms->mirror[mirror].ms = ms;
30354 - atomic_set(&(ms->mirror[mirror].error_count), 0);
30355 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
30356 ms->mirror[mirror].error_type = 0;
30357 ms->mirror[mirror].offset = offset;
30358
30359 @@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_targ
30360 */
30361 static char device_status_char(struct mirror *m)
30362 {
30363 - if (!atomic_read(&(m->error_count)))
30364 + if (!atomic_read_unchecked(&(m->error_count)))
30365 return 'A';
30366
30367 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
30368 diff -urNp linux-2.6.32.41/drivers/md/dm-stripe.c linux-2.6.32.41/drivers/md/dm-stripe.c
30369 --- linux-2.6.32.41/drivers/md/dm-stripe.c 2011-03-27 14:31:47.000000000 -0400
30370 +++ linux-2.6.32.41/drivers/md/dm-stripe.c 2011-05-04 17:56:28.000000000 -0400
30371 @@ -20,7 +20,7 @@ struct stripe {
30372 struct dm_dev *dev;
30373 sector_t physical_start;
30374
30375 - atomic_t error_count;
30376 + atomic_unchecked_t error_count;
30377 };
30378
30379 struct stripe_c {
30380 @@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *
30381 kfree(sc);
30382 return r;
30383 }
30384 - atomic_set(&(sc->stripe[i].error_count), 0);
30385 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
30386 }
30387
30388 ti->private = sc;
30389 @@ -257,7 +257,7 @@ static int stripe_status(struct dm_targe
30390 DMEMIT("%d ", sc->stripes);
30391 for (i = 0; i < sc->stripes; i++) {
30392 DMEMIT("%s ", sc->stripe[i].dev->name);
30393 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
30394 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
30395 'D' : 'A';
30396 }
30397 buffer[i] = '\0';
30398 @@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_targe
30399 */
30400 for (i = 0; i < sc->stripes; i++)
30401 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
30402 - atomic_inc(&(sc->stripe[i].error_count));
30403 - if (atomic_read(&(sc->stripe[i].error_count)) <
30404 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
30405 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
30406 DM_IO_ERROR_THRESHOLD)
30407 queue_work(kstriped, &sc->kstriped_ws);
30408 }
30409 diff -urNp linux-2.6.32.41/drivers/md/dm-sysfs.c linux-2.6.32.41/drivers/md/dm-sysfs.c
30410 --- linux-2.6.32.41/drivers/md/dm-sysfs.c 2011-03-27 14:31:47.000000000 -0400
30411 +++ linux-2.6.32.41/drivers/md/dm-sysfs.c 2011-04-17 15:56:46.000000000 -0400
30412 @@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
30413 NULL,
30414 };
30415
30416 -static struct sysfs_ops dm_sysfs_ops = {
30417 +static const struct sysfs_ops dm_sysfs_ops = {
30418 .show = dm_attr_show,
30419 };
30420
30421 diff -urNp linux-2.6.32.41/drivers/md/dm-table.c linux-2.6.32.41/drivers/md/dm-table.c
30422 --- linux-2.6.32.41/drivers/md/dm-table.c 2011-03-27 14:31:47.000000000 -0400
30423 +++ linux-2.6.32.41/drivers/md/dm-table.c 2011-04-17 15:56:46.000000000 -0400
30424 @@ -359,7 +359,7 @@ static int device_area_is_invalid(struct
30425 if (!dev_size)
30426 return 0;
30427
30428 - if ((start >= dev_size) || (start + len > dev_size)) {
30429 + if ((start >= dev_size) || (len > dev_size - start)) {
30430 DMWARN("%s: %s too small for target: "
30431 "start=%llu, len=%llu, dev_size=%llu",
30432 dm_device_name(ti->table->md), bdevname(bdev, b),
30433 diff -urNp linux-2.6.32.41/drivers/md/md.c linux-2.6.32.41/drivers/md/md.c
30434 --- linux-2.6.32.41/drivers/md/md.c 2011-03-27 14:31:47.000000000 -0400
30435 +++ linux-2.6.32.41/drivers/md/md.c 2011-05-04 17:56:20.000000000 -0400
30436 @@ -153,10 +153,10 @@ static int start_readonly;
30437 * start build, activate spare
30438 */
30439 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
30440 -static atomic_t md_event_count;
30441 +static atomic_unchecked_t md_event_count;
30442 void md_new_event(mddev_t *mddev)
30443 {
30444 - atomic_inc(&md_event_count);
30445 + atomic_inc_unchecked(&md_event_count);
30446 wake_up(&md_event_waiters);
30447 }
30448 EXPORT_SYMBOL_GPL(md_new_event);
30449 @@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
30450 */
30451 static void md_new_event_inintr(mddev_t *mddev)
30452 {
30453 - atomic_inc(&md_event_count);
30454 + atomic_inc_unchecked(&md_event_count);
30455 wake_up(&md_event_waiters);
30456 }
30457
30458 @@ -1218,7 +1218,7 @@ static int super_1_load(mdk_rdev_t *rdev
30459
30460 rdev->preferred_minor = 0xffff;
30461 rdev->data_offset = le64_to_cpu(sb->data_offset);
30462 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
30463 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
30464
30465 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
30466 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
30467 @@ -1392,7 +1392,7 @@ static void super_1_sync(mddev_t *mddev,
30468 else
30469 sb->resync_offset = cpu_to_le64(0);
30470
30471 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
30472 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
30473
30474 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
30475 sb->size = cpu_to_le64(mddev->dev_sectors);
30476 @@ -2214,7 +2214,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
30477 static ssize_t
30478 errors_show(mdk_rdev_t *rdev, char *page)
30479 {
30480 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
30481 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
30482 }
30483
30484 static ssize_t
30485 @@ -2223,7 +2223,7 @@ errors_store(mdk_rdev_t *rdev, const cha
30486 char *e;
30487 unsigned long n = simple_strtoul(buf, &e, 10);
30488 if (*buf && (*e == 0 || *e == '\n')) {
30489 - atomic_set(&rdev->corrected_errors, n);
30490 + atomic_set_unchecked(&rdev->corrected_errors, n);
30491 return len;
30492 }
30493 return -EINVAL;
30494 @@ -2517,7 +2517,7 @@ static void rdev_free(struct kobject *ko
30495 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
30496 kfree(rdev);
30497 }
30498 -static struct sysfs_ops rdev_sysfs_ops = {
30499 +static const struct sysfs_ops rdev_sysfs_ops = {
30500 .show = rdev_attr_show,
30501 .store = rdev_attr_store,
30502 };
30503 @@ -2566,8 +2566,8 @@ static mdk_rdev_t *md_import_device(dev_
30504 rdev->data_offset = 0;
30505 rdev->sb_events = 0;
30506 atomic_set(&rdev->nr_pending, 0);
30507 - atomic_set(&rdev->read_errors, 0);
30508 - atomic_set(&rdev->corrected_errors, 0);
30509 + atomic_set_unchecked(&rdev->read_errors, 0);
30510 + atomic_set_unchecked(&rdev->corrected_errors, 0);
30511
30512 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
30513 if (!size) {
30514 @@ -3887,7 +3887,7 @@ static void md_free(struct kobject *ko)
30515 kfree(mddev);
30516 }
30517
30518 -static struct sysfs_ops md_sysfs_ops = {
30519 +static const struct sysfs_ops md_sysfs_ops = {
30520 .show = md_attr_show,
30521 .store = md_attr_store,
30522 };
30523 @@ -4474,7 +4474,8 @@ out:
30524 err = 0;
30525 blk_integrity_unregister(disk);
30526 md_new_event(mddev);
30527 - sysfs_notify_dirent(mddev->sysfs_state);
30528 + if (mddev->sysfs_state)
30529 + sysfs_notify_dirent(mddev->sysfs_state);
30530 return err;
30531 }
30532
30533 @@ -5954,7 +5955,7 @@ static int md_seq_show(struct seq_file *
30534
30535 spin_unlock(&pers_lock);
30536 seq_printf(seq, "\n");
30537 - mi->event = atomic_read(&md_event_count);
30538 + mi->event = atomic_read_unchecked(&md_event_count);
30539 return 0;
30540 }
30541 if (v == (void*)2) {
30542 @@ -6043,7 +6044,7 @@ static int md_seq_show(struct seq_file *
30543 chunk_kb ? "KB" : "B");
30544 if (bitmap->file) {
30545 seq_printf(seq, ", file: ");
30546 - seq_path(seq, &bitmap->file->f_path, " \t\n");
30547 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
30548 }
30549
30550 seq_printf(seq, "\n");
30551 @@ -6077,7 +6078,7 @@ static int md_seq_open(struct inode *ino
30552 else {
30553 struct seq_file *p = file->private_data;
30554 p->private = mi;
30555 - mi->event = atomic_read(&md_event_count);
30556 + mi->event = atomic_read_unchecked(&md_event_count);
30557 }
30558 return error;
30559 }
30560 @@ -6093,7 +6094,7 @@ static unsigned int mdstat_poll(struct f
30561 /* always allow read */
30562 mask = POLLIN | POLLRDNORM;
30563
30564 - if (mi->event != atomic_read(&md_event_count))
30565 + if (mi->event != atomic_read_unchecked(&md_event_count))
30566 mask |= POLLERR | POLLPRI;
30567 return mask;
30568 }
30569 @@ -6137,7 +6138,7 @@ static int is_mddev_idle(mddev_t *mddev,
30570 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
30571 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
30572 (int)part_stat_read(&disk->part0, sectors[1]) -
30573 - atomic_read(&disk->sync_io);
30574 + atomic_read_unchecked(&disk->sync_io);
30575 /* sync IO will cause sync_io to increase before the disk_stats
30576 * as sync_io is counted when a request starts, and
30577 * disk_stats is counted when it completes.
30578 diff -urNp linux-2.6.32.41/drivers/md/md.h linux-2.6.32.41/drivers/md/md.h
30579 --- linux-2.6.32.41/drivers/md/md.h 2011-03-27 14:31:47.000000000 -0400
30580 +++ linux-2.6.32.41/drivers/md/md.h 2011-05-04 17:56:20.000000000 -0400
30581 @@ -94,10 +94,10 @@ struct mdk_rdev_s
30582 * only maintained for arrays that
30583 * support hot removal
30584 */
30585 - atomic_t read_errors; /* number of consecutive read errors that
30586 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
30587 * we have tried to ignore.
30588 */
30589 - atomic_t corrected_errors; /* number of corrected read errors,
30590 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
30591 * for reporting to userspace and storing
30592 * in superblock.
30593 */
30594 @@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_
30595
30596 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
30597 {
30598 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
30599 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
30600 }
30601
30602 struct mdk_personality
30603 diff -urNp linux-2.6.32.41/drivers/md/raid10.c linux-2.6.32.41/drivers/md/raid10.c
30604 --- linux-2.6.32.41/drivers/md/raid10.c 2011-03-27 14:31:47.000000000 -0400
30605 +++ linux-2.6.32.41/drivers/md/raid10.c 2011-05-04 17:56:28.000000000 -0400
30606 @@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bi
30607 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
30608 set_bit(R10BIO_Uptodate, &r10_bio->state);
30609 else {
30610 - atomic_add(r10_bio->sectors,
30611 + atomic_add_unchecked(r10_bio->sectors,
30612 &conf->mirrors[d].rdev->corrected_errors);
30613 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
30614 md_error(r10_bio->mddev,
30615 @@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
30616 test_bit(In_sync, &rdev->flags)) {
30617 atomic_inc(&rdev->nr_pending);
30618 rcu_read_unlock();
30619 - atomic_add(s, &rdev->corrected_errors);
30620 + atomic_add_unchecked(s, &rdev->corrected_errors);
30621 if (sync_page_io(rdev->bdev,
30622 r10_bio->devs[sl].addr +
30623 sect + rdev->data_offset,
30624 diff -urNp linux-2.6.32.41/drivers/md/raid1.c linux-2.6.32.41/drivers/md/raid1.c
30625 --- linux-2.6.32.41/drivers/md/raid1.c 2011-03-27 14:31:47.000000000 -0400
30626 +++ linux-2.6.32.41/drivers/md/raid1.c 2011-05-04 17:56:28.000000000 -0400
30627 @@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *
30628 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
30629 continue;
30630 rdev = conf->mirrors[d].rdev;
30631 - atomic_add(s, &rdev->corrected_errors);
30632 + atomic_add_unchecked(s, &rdev->corrected_errors);
30633 if (sync_page_io(rdev->bdev,
30634 sect + rdev->data_offset,
30635 s<<9,
30636 @@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf,
30637 /* Well, this device is dead */
30638 md_error(mddev, rdev);
30639 else {
30640 - atomic_add(s, &rdev->corrected_errors);
30641 + atomic_add_unchecked(s, &rdev->corrected_errors);
30642 printk(KERN_INFO
30643 "raid1:%s: read error corrected "
30644 "(%d sectors at %llu on %s)\n",
30645 diff -urNp linux-2.6.32.41/drivers/md/raid5.c linux-2.6.32.41/drivers/md/raid5.c
30646 --- linux-2.6.32.41/drivers/md/raid5.c 2011-03-27 14:31:47.000000000 -0400
30647 +++ linux-2.6.32.41/drivers/md/raid5.c 2011-05-16 21:46:57.000000000 -0400
30648 @@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_hea
30649 bi->bi_next = NULL;
30650 if (rw == WRITE &&
30651 test_bit(R5_ReWrite, &sh->dev[i].flags))
30652 - atomic_add(STRIPE_SECTORS,
30653 + atomic_add_unchecked(STRIPE_SECTORS,
30654 &rdev->corrected_errors);
30655 generic_make_request(bi);
30656 } else {
30657 @@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struc
30658 clear_bit(R5_ReadError, &sh->dev[i].flags);
30659 clear_bit(R5_ReWrite, &sh->dev[i].flags);
30660 }
30661 - if (atomic_read(&conf->disks[i].rdev->read_errors))
30662 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
30663 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
30664 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
30665 } else {
30666 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
30667 int retry = 0;
30668 rdev = conf->disks[i].rdev;
30669
30670 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
30671 - atomic_inc(&rdev->read_errors);
30672 + atomic_inc_unchecked(&rdev->read_errors);
30673 if (conf->mddev->degraded >= conf->max_degraded)
30674 printk_rl(KERN_WARNING
30675 "raid5:%s: read error not correctable "
30676 @@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struc
30677 (unsigned long long)(sh->sector
30678 + rdev->data_offset),
30679 bdn);
30680 - else if (atomic_read(&rdev->read_errors)
30681 + else if (atomic_read_unchecked(&rdev->read_errors)
30682 > conf->max_nr_stripes)
30683 printk(KERN_WARNING
30684 "raid5:%s: Too many read errors, failing device %s.\n",
30685 @@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct s
30686 sector_t r_sector;
30687 struct stripe_head sh2;
30688
30689 + pax_track_stack();
30690
30691 chunk_offset = sector_div(new_sector, sectors_per_chunk);
30692 stripe = new_sector;
30693 diff -urNp linux-2.6.32.41/drivers/media/common/saa7146_hlp.c linux-2.6.32.41/drivers/media/common/saa7146_hlp.c
30694 --- linux-2.6.32.41/drivers/media/common/saa7146_hlp.c 2011-03-27 14:31:47.000000000 -0400
30695 +++ linux-2.6.32.41/drivers/media/common/saa7146_hlp.c 2011-05-16 21:46:57.000000000 -0400
30696 @@ -353,6 +353,8 @@ static void calculate_clipping_registers
30697
30698 int x[32], y[32], w[32], h[32];
30699
30700 + pax_track_stack();
30701 +
30702 /* clear out memory */
30703 memset(&line_list[0], 0x00, sizeof(u32)*32);
30704 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
30705 diff -urNp linux-2.6.32.41/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-2.6.32.41/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
30706 --- linux-2.6.32.41/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-03-27 14:31:47.000000000 -0400
30707 +++ linux-2.6.32.41/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-05-16 21:46:57.000000000 -0400
30708 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
30709 u8 buf[HOST_LINK_BUF_SIZE];
30710 int i;
30711
30712 + pax_track_stack();
30713 +
30714 dprintk("%s\n", __func__);
30715
30716 /* check if we have space for a link buf in the rx_buffer */
30717 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
30718 unsigned long timeout;
30719 int written;
30720
30721 + pax_track_stack();
30722 +
30723 dprintk("%s\n", __func__);
30724
30725 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
30726 diff -urNp linux-2.6.32.41/drivers/media/dvb/dvb-core/dvbdev.c linux-2.6.32.41/drivers/media/dvb/dvb-core/dvbdev.c
30727 --- linux-2.6.32.41/drivers/media/dvb/dvb-core/dvbdev.c 2011-03-27 14:31:47.000000000 -0400
30728 +++ linux-2.6.32.41/drivers/media/dvb/dvb-core/dvbdev.c 2011-04-17 15:56:46.000000000 -0400
30729 @@ -191,6 +191,7 @@ int dvb_register_device(struct dvb_adapt
30730 const struct dvb_device *template, void *priv, int type)
30731 {
30732 struct dvb_device *dvbdev;
30733 + /* cannot be const */
30734 struct file_operations *dvbdevfops;
30735 struct device *clsdev;
30736 int minor;
30737 diff -urNp linux-2.6.32.41/drivers/media/dvb/dvb-usb/dib0700_core.c linux-2.6.32.41/drivers/media/dvb/dvb-usb/dib0700_core.c
30738 --- linux-2.6.32.41/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-03-27 14:31:47.000000000 -0400
30739 +++ linux-2.6.32.41/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-05-16 21:46:57.000000000 -0400
30740 @@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb
30741
30742 u8 buf[260];
30743
30744 + pax_track_stack();
30745 +
30746 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
30747 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
30748
30749 diff -urNp linux-2.6.32.41/drivers/media/dvb/frontends/or51211.c linux-2.6.32.41/drivers/media/dvb/frontends/or51211.c
30750 --- linux-2.6.32.41/drivers/media/dvb/frontends/or51211.c 2011-03-27 14:31:47.000000000 -0400
30751 +++ linux-2.6.32.41/drivers/media/dvb/frontends/or51211.c 2011-05-16 21:46:57.000000000 -0400
30752 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
30753 u8 tudata[585];
30754 int i;
30755
30756 + pax_track_stack();
30757 +
30758 dprintk("Firmware is %zd bytes\n",fw->size);
30759
30760 /* Get eprom data */
30761 diff -urNp linux-2.6.32.41/drivers/media/radio/radio-cadet.c linux-2.6.32.41/drivers/media/radio/radio-cadet.c
30762 --- linux-2.6.32.41/drivers/media/radio/radio-cadet.c 2011-03-27 14:31:47.000000000 -0400
30763 +++ linux-2.6.32.41/drivers/media/radio/radio-cadet.c 2011-04-17 15:56:46.000000000 -0400
30764 @@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *f
30765 while (i < count && dev->rdsin != dev->rdsout)
30766 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
30767
30768 - if (copy_to_user(data, readbuf, i))
30769 + if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
30770 return -EFAULT;
30771 return i;
30772 }
30773 diff -urNp linux-2.6.32.41/drivers/media/video/cx18/cx18-driver.c linux-2.6.32.41/drivers/media/video/cx18/cx18-driver.c
30774 --- linux-2.6.32.41/drivers/media/video/cx18/cx18-driver.c 2011-03-27 14:31:47.000000000 -0400
30775 +++ linux-2.6.32.41/drivers/media/video/cx18/cx18-driver.c 2011-05-16 21:46:57.000000000 -0400
30776 @@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl
30777
30778 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
30779
30780 -static atomic_t cx18_instance = ATOMIC_INIT(0);
30781 +static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
30782
30783 /* Parameter declarations */
30784 static int cardtype[CX18_MAX_CARDS];
30785 @@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
30786 struct i2c_client c;
30787 u8 eedata[256];
30788
30789 + pax_track_stack();
30790 +
30791 memset(&c, 0, sizeof(c));
30792 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
30793 c.adapter = &cx->i2c_adap[0];
30794 @@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct p
30795 struct cx18 *cx;
30796
30797 /* FIXME - module parameter arrays constrain max instances */
30798 - i = atomic_inc_return(&cx18_instance) - 1;
30799 + i = atomic_inc_return_unchecked(&cx18_instance) - 1;
30800 if (i >= CX18_MAX_CARDS) {
30801 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
30802 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
30803 diff -urNp linux-2.6.32.41/drivers/media/video/ivtv/ivtv-driver.c linux-2.6.32.41/drivers/media/video/ivtv/ivtv-driver.c
30804 --- linux-2.6.32.41/drivers/media/video/ivtv/ivtv-driver.c 2011-03-27 14:31:47.000000000 -0400
30805 +++ linux-2.6.32.41/drivers/media/video/ivtv/ivtv-driver.c 2011-05-04 17:56:28.000000000 -0400
30806 @@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl
30807 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
30808
30809 /* ivtv instance counter */
30810 -static atomic_t ivtv_instance = ATOMIC_INIT(0);
30811 +static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
30812
30813 /* Parameter declarations */
30814 static int cardtype[IVTV_MAX_CARDS];
30815 diff -urNp linux-2.6.32.41/drivers/media/video/omap24xxcam.c linux-2.6.32.41/drivers/media/video/omap24xxcam.c
30816 --- linux-2.6.32.41/drivers/media/video/omap24xxcam.c 2011-03-27 14:31:47.000000000 -0400
30817 +++ linux-2.6.32.41/drivers/media/video/omap24xxcam.c 2011-05-04 17:56:28.000000000 -0400
30818 @@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(str
30819 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
30820
30821 do_gettimeofday(&vb->ts);
30822 - vb->field_count = atomic_add_return(2, &fh->field_count);
30823 + vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
30824 if (csr & csr_error) {
30825 vb->state = VIDEOBUF_ERROR;
30826 if (!atomic_read(&fh->cam->in_reset)) {
30827 diff -urNp linux-2.6.32.41/drivers/media/video/omap24xxcam.h linux-2.6.32.41/drivers/media/video/omap24xxcam.h
30828 --- linux-2.6.32.41/drivers/media/video/omap24xxcam.h 2011-03-27 14:31:47.000000000 -0400
30829 +++ linux-2.6.32.41/drivers/media/video/omap24xxcam.h 2011-05-04 17:56:28.000000000 -0400
30830 @@ -533,7 +533,7 @@ struct omap24xxcam_fh {
30831 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
30832 struct videobuf_queue vbq;
30833 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
30834 - atomic_t field_count; /* field counter for videobuf_buffer */
30835 + atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
30836 /* accessing cam here doesn't need serialisation: it's constant */
30837 struct omap24xxcam_device *cam;
30838 };
30839 diff -urNp linux-2.6.32.41/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-2.6.32.41/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
30840 --- linux-2.6.32.41/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-03-27 14:31:47.000000000 -0400
30841 +++ linux-2.6.32.41/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-05-16 21:46:57.000000000 -0400
30842 @@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
30843 u8 *eeprom;
30844 struct tveeprom tvdata;
30845
30846 + pax_track_stack();
30847 +
30848 memset(&tvdata,0,sizeof(tvdata));
30849
30850 eeprom = pvr2_eeprom_fetch(hdw);
30851 diff -urNp linux-2.6.32.41/drivers/media/video/saa7134/saa6752hs.c linux-2.6.32.41/drivers/media/video/saa7134/saa6752hs.c
30852 --- linux-2.6.32.41/drivers/media/video/saa7134/saa6752hs.c 2011-03-27 14:31:47.000000000 -0400
30853 +++ linux-2.6.32.41/drivers/media/video/saa7134/saa6752hs.c 2011-05-16 21:46:57.000000000 -0400
30854 @@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_su
30855 unsigned char localPAT[256];
30856 unsigned char localPMT[256];
30857
30858 + pax_track_stack();
30859 +
30860 /* Set video format - must be done first as it resets other settings */
30861 set_reg8(client, 0x41, h->video_format);
30862
30863 diff -urNp linux-2.6.32.41/drivers/media/video/saa7164/saa7164-cmd.c linux-2.6.32.41/drivers/media/video/saa7164/saa7164-cmd.c
30864 --- linux-2.6.32.41/drivers/media/video/saa7164/saa7164-cmd.c 2011-03-27 14:31:47.000000000 -0400
30865 +++ linux-2.6.32.41/drivers/media/video/saa7164/saa7164-cmd.c 2011-05-16 21:46:57.000000000 -0400
30866 @@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_d
30867 wait_queue_head_t *q = 0;
30868 dprintk(DBGLVL_CMD, "%s()\n", __func__);
30869
30870 + pax_track_stack();
30871 +
30872 /* While any outstand message on the bus exists... */
30873 do {
30874
30875 @@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
30876 u8 tmp[512];
30877 dprintk(DBGLVL_CMD, "%s()\n", __func__);
30878
30879 + pax_track_stack();
30880 +
30881 while (loop) {
30882
30883 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
30884 diff -urNp linux-2.6.32.41/drivers/media/video/usbvideo/konicawc.c linux-2.6.32.41/drivers/media/video/usbvideo/konicawc.c
30885 --- linux-2.6.32.41/drivers/media/video/usbvideo/konicawc.c 2011-03-27 14:31:47.000000000 -0400
30886 +++ linux-2.6.32.41/drivers/media/video/usbvideo/konicawc.c 2011-04-17 15:56:46.000000000 -0400
30887 @@ -225,7 +225,7 @@ static void konicawc_register_input(stru
30888 int error;
30889
30890 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
30891 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
30892 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
30893
30894 cam->input = input_dev = input_allocate_device();
30895 if (!input_dev) {
30896 diff -urNp linux-2.6.32.41/drivers/media/video/usbvideo/quickcam_messenger.c linux-2.6.32.41/drivers/media/video/usbvideo/quickcam_messenger.c
30897 --- linux-2.6.32.41/drivers/media/video/usbvideo/quickcam_messenger.c 2011-03-27 14:31:47.000000000 -0400
30898 +++ linux-2.6.32.41/drivers/media/video/usbvideo/quickcam_messenger.c 2011-04-17 15:56:46.000000000 -0400
30899 @@ -89,7 +89,7 @@ static void qcm_register_input(struct qc
30900 int error;
30901
30902 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
30903 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
30904 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
30905
30906 cam->input = input_dev = input_allocate_device();
30907 if (!input_dev) {
30908 diff -urNp linux-2.6.32.41/drivers/media/video/usbvision/usbvision-core.c linux-2.6.32.41/drivers/media/video/usbvision/usbvision-core.c
30909 --- linux-2.6.32.41/drivers/media/video/usbvision/usbvision-core.c 2011-03-27 14:31:47.000000000 -0400
30910 +++ linux-2.6.32.41/drivers/media/video/usbvision/usbvision-core.c 2011-05-16 21:46:57.000000000 -0400
30911 @@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_c
30912 unsigned char rv, gv, bv;
30913 static unsigned char *Y, *U, *V;
30914
30915 + pax_track_stack();
30916 +
30917 frame = usbvision->curFrame;
30918 imageSize = frame->frmwidth * frame->frmheight;
30919 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
30920 diff -urNp linux-2.6.32.41/drivers/media/video/v4l2-device.c linux-2.6.32.41/drivers/media/video/v4l2-device.c
30921 --- linux-2.6.32.41/drivers/media/video/v4l2-device.c 2011-03-27 14:31:47.000000000 -0400
30922 +++ linux-2.6.32.41/drivers/media/video/v4l2-device.c 2011-05-04 17:56:28.000000000 -0400
30923 @@ -50,9 +50,9 @@ int v4l2_device_register(struct device *
30924 EXPORT_SYMBOL_GPL(v4l2_device_register);
30925
30926 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
30927 - atomic_t *instance)
30928 + atomic_unchecked_t *instance)
30929 {
30930 - int num = atomic_inc_return(instance) - 1;
30931 + int num = atomic_inc_return_unchecked(instance) - 1;
30932 int len = strlen(basename);
30933
30934 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
30935 diff -urNp linux-2.6.32.41/drivers/media/video/videobuf-dma-sg.c linux-2.6.32.41/drivers/media/video/videobuf-dma-sg.c
30936 --- linux-2.6.32.41/drivers/media/video/videobuf-dma-sg.c 2011-03-27 14:31:47.000000000 -0400
30937 +++ linux-2.6.32.41/drivers/media/video/videobuf-dma-sg.c 2011-05-16 21:46:57.000000000 -0400
30938 @@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
30939 {
30940 struct videobuf_queue q;
30941
30942 + pax_track_stack();
30943 +
30944 /* Required to make generic handler to call __videobuf_alloc */
30945 q.int_ops = &sg_ops;
30946
30947 diff -urNp linux-2.6.32.41/drivers/message/fusion/mptbase.c linux-2.6.32.41/drivers/message/fusion/mptbase.c
30948 --- linux-2.6.32.41/drivers/message/fusion/mptbase.c 2011-03-27 14:31:47.000000000 -0400
30949 +++ linux-2.6.32.41/drivers/message/fusion/mptbase.c 2011-04-17 15:56:46.000000000 -0400
30950 @@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **s
30951 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
30952 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
30953
30954 +#ifdef CONFIG_GRKERNSEC_HIDESYM
30955 + len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
30956 + NULL, NULL);
30957 +#else
30958 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
30959 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
30960 +#endif
30961 +
30962 /*
30963 * Rounding UP to nearest 4-kB boundary here...
30964 */
30965 diff -urNp linux-2.6.32.41/drivers/message/fusion/mptsas.c linux-2.6.32.41/drivers/message/fusion/mptsas.c
30966 --- linux-2.6.32.41/drivers/message/fusion/mptsas.c 2011-03-27 14:31:47.000000000 -0400
30967 +++ linux-2.6.32.41/drivers/message/fusion/mptsas.c 2011-04-17 15:56:46.000000000 -0400
30968 @@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devin
30969 return 0;
30970 }
30971
30972 +static inline void
30973 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
30974 +{
30975 + if (phy_info->port_details) {
30976 + phy_info->port_details->rphy = rphy;
30977 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
30978 + ioc->name, rphy));
30979 + }
30980 +
30981 + if (rphy) {
30982 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
30983 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
30984 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
30985 + ioc->name, rphy, rphy->dev.release));
30986 + }
30987 +}
30988 +
30989 /* no mutex */
30990 static void
30991 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
30992 @@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
30993 return NULL;
30994 }
30995
30996 -static inline void
30997 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
30998 -{
30999 - if (phy_info->port_details) {
31000 - phy_info->port_details->rphy = rphy;
31001 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31002 - ioc->name, rphy));
31003 - }
31004 -
31005 - if (rphy) {
31006 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31007 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31008 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31009 - ioc->name, rphy, rphy->dev.release));
31010 - }
31011 -}
31012 -
31013 static inline struct sas_port *
31014 mptsas_get_port(struct mptsas_phyinfo *phy_info)
31015 {
31016 diff -urNp linux-2.6.32.41/drivers/message/fusion/mptscsih.c linux-2.6.32.41/drivers/message/fusion/mptscsih.c
31017 --- linux-2.6.32.41/drivers/message/fusion/mptscsih.c 2011-03-27 14:31:47.000000000 -0400
31018 +++ linux-2.6.32.41/drivers/message/fusion/mptscsih.c 2011-04-17 15:56:46.000000000 -0400
31019 @@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
31020
31021 h = shost_priv(SChost);
31022
31023 - if (h) {
31024 - if (h->info_kbuf == NULL)
31025 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31026 - return h->info_kbuf;
31027 - h->info_kbuf[0] = '\0';
31028 + if (!h)
31029 + return NULL;
31030
31031 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31032 - h->info_kbuf[size-1] = '\0';
31033 - }
31034 + if (h->info_kbuf == NULL)
31035 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31036 + return h->info_kbuf;
31037 + h->info_kbuf[0] = '\0';
31038 +
31039 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31040 + h->info_kbuf[size-1] = '\0';
31041
31042 return h->info_kbuf;
31043 }
31044 diff -urNp linux-2.6.32.41/drivers/message/i2o/i2o_config.c linux-2.6.32.41/drivers/message/i2o/i2o_config.c
31045 --- linux-2.6.32.41/drivers/message/i2o/i2o_config.c 2011-03-27 14:31:47.000000000 -0400
31046 +++ linux-2.6.32.41/drivers/message/i2o/i2o_config.c 2011-05-16 21:46:57.000000000 -0400
31047 @@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned lon
31048 struct i2o_message *msg;
31049 unsigned int iop;
31050
31051 + pax_track_stack();
31052 +
31053 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
31054 return -EFAULT;
31055
31056 diff -urNp linux-2.6.32.41/drivers/message/i2o/i2o_proc.c linux-2.6.32.41/drivers/message/i2o/i2o_proc.c
31057 --- linux-2.6.32.41/drivers/message/i2o/i2o_proc.c 2011-03-27 14:31:47.000000000 -0400
31058 +++ linux-2.6.32.41/drivers/message/i2o/i2o_proc.c 2011-04-17 15:56:46.000000000 -0400
31059 @@ -259,13 +259,6 @@ static char *scsi_devices[] = {
31060 "Array Controller Device"
31061 };
31062
31063 -static char *chtostr(u8 * chars, int n)
31064 -{
31065 - char tmp[256];
31066 - tmp[0] = 0;
31067 - return strncat(tmp, (char *)chars, n);
31068 -}
31069 -
31070 static int i2o_report_query_status(struct seq_file *seq, int block_status,
31071 char *group)
31072 {
31073 @@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct
31074
31075 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
31076 seq_printf(seq, "%-#8x", ddm_table.module_id);
31077 - seq_printf(seq, "%-29s",
31078 - chtostr(ddm_table.module_name_version, 28));
31079 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
31080 seq_printf(seq, "%9d ", ddm_table.data_size);
31081 seq_printf(seq, "%8d", ddm_table.code_size);
31082
31083 @@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(s
31084
31085 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
31086 seq_printf(seq, "%-#8x", dst->module_id);
31087 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
31088 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
31089 + seq_printf(seq, "%-.28s", dst->module_name_version);
31090 + seq_printf(seq, "%-.8s", dst->date);
31091 seq_printf(seq, "%8d ", dst->module_size);
31092 seq_printf(seq, "%8d ", dst->mpb_size);
31093 seq_printf(seq, "0x%04x", dst->module_flags);
31094 @@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(str
31095 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
31096 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
31097 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
31098 - seq_printf(seq, "Vendor info : %s\n",
31099 - chtostr((u8 *) (work32 + 2), 16));
31100 - seq_printf(seq, "Product info : %s\n",
31101 - chtostr((u8 *) (work32 + 6), 16));
31102 - seq_printf(seq, "Description : %s\n",
31103 - chtostr((u8 *) (work32 + 10), 16));
31104 - seq_printf(seq, "Product rev. : %s\n",
31105 - chtostr((u8 *) (work32 + 14), 8));
31106 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
31107 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
31108 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
31109 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
31110
31111 seq_printf(seq, "Serial number : ");
31112 print_serial_number(seq, (u8 *) (work32 + 16),
31113 @@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(str
31114 }
31115
31116 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
31117 - seq_printf(seq, "Module name : %s\n",
31118 - chtostr(result.module_name, 24));
31119 - seq_printf(seq, "Module revision : %s\n",
31120 - chtostr(result.module_rev, 8));
31121 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
31122 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
31123
31124 seq_printf(seq, "Serial number : ");
31125 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
31126 @@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq
31127 return 0;
31128 }
31129
31130 - seq_printf(seq, "Device name : %s\n",
31131 - chtostr(result.device_name, 64));
31132 - seq_printf(seq, "Service name : %s\n",
31133 - chtostr(result.service_name, 64));
31134 - seq_printf(seq, "Physical name : %s\n",
31135 - chtostr(result.physical_location, 64));
31136 - seq_printf(seq, "Instance number : %s\n",
31137 - chtostr(result.instance_number, 4));
31138 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
31139 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
31140 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
31141 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
31142
31143 return 0;
31144 }
31145 diff -urNp linux-2.6.32.41/drivers/message/i2o/iop.c linux-2.6.32.41/drivers/message/i2o/iop.c
31146 --- linux-2.6.32.41/drivers/message/i2o/iop.c 2011-03-27 14:31:47.000000000 -0400
31147 +++ linux-2.6.32.41/drivers/message/i2o/iop.c 2011-05-04 17:56:28.000000000 -0400
31148 @@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
31149
31150 spin_lock_irqsave(&c->context_list_lock, flags);
31151
31152 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
31153 - atomic_inc(&c->context_list_counter);
31154 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
31155 + atomic_inc_unchecked(&c->context_list_counter);
31156
31157 - entry->context = atomic_read(&c->context_list_counter);
31158 + entry->context = atomic_read_unchecked(&c->context_list_counter);
31159
31160 list_add(&entry->list, &c->context_list);
31161
31162 @@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(voi
31163
31164 #if BITS_PER_LONG == 64
31165 spin_lock_init(&c->context_list_lock);
31166 - atomic_set(&c->context_list_counter, 0);
31167 + atomic_set_unchecked(&c->context_list_counter, 0);
31168 INIT_LIST_HEAD(&c->context_list);
31169 #endif
31170
31171 diff -urNp linux-2.6.32.41/drivers/mfd/wm8350-i2c.c linux-2.6.32.41/drivers/mfd/wm8350-i2c.c
31172 --- linux-2.6.32.41/drivers/mfd/wm8350-i2c.c 2011-03-27 14:31:47.000000000 -0400
31173 +++ linux-2.6.32.41/drivers/mfd/wm8350-i2c.c 2011-05-16 21:46:57.000000000 -0400
31174 @@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struc
31175 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
31176 int ret;
31177
31178 + pax_track_stack();
31179 +
31180 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
31181 return -EINVAL;
31182
31183 diff -urNp linux-2.6.32.41/drivers/misc/kgdbts.c linux-2.6.32.41/drivers/misc/kgdbts.c
31184 --- linux-2.6.32.41/drivers/misc/kgdbts.c 2011-03-27 14:31:47.000000000 -0400
31185 +++ linux-2.6.32.41/drivers/misc/kgdbts.c 2011-04-17 15:56:46.000000000 -0400
31186 @@ -118,7 +118,7 @@
31187 } while (0)
31188 #define MAX_CONFIG_LEN 40
31189
31190 -static struct kgdb_io kgdbts_io_ops;
31191 +static const struct kgdb_io kgdbts_io_ops;
31192 static char get_buf[BUFMAX];
31193 static int get_buf_cnt;
31194 static char put_buf[BUFMAX];
31195 @@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void
31196 module_put(THIS_MODULE);
31197 }
31198
31199 -static struct kgdb_io kgdbts_io_ops = {
31200 +static const struct kgdb_io kgdbts_io_ops = {
31201 .name = "kgdbts",
31202 .read_char = kgdbts_get_char,
31203 .write_char = kgdbts_put_char,
31204 diff -urNp linux-2.6.32.41/drivers/misc/sgi-gru/gruhandles.c linux-2.6.32.41/drivers/misc/sgi-gru/gruhandles.c
31205 --- linux-2.6.32.41/drivers/misc/sgi-gru/gruhandles.c 2011-03-27 14:31:47.000000000 -0400
31206 +++ linux-2.6.32.41/drivers/misc/sgi-gru/gruhandles.c 2011-04-17 15:56:46.000000000 -0400
31207 @@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistic
31208
31209 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
31210 {
31211 - atomic_long_inc(&mcs_op_statistics[op].count);
31212 - atomic_long_add(clks, &mcs_op_statistics[op].total);
31213 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
31214 + atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
31215 if (mcs_op_statistics[op].max < clks)
31216 mcs_op_statistics[op].max = clks;
31217 }
31218 diff -urNp linux-2.6.32.41/drivers/misc/sgi-gru/gruprocfs.c linux-2.6.32.41/drivers/misc/sgi-gru/gruprocfs.c
31219 --- linux-2.6.32.41/drivers/misc/sgi-gru/gruprocfs.c 2011-03-27 14:31:47.000000000 -0400
31220 +++ linux-2.6.32.41/drivers/misc/sgi-gru/gruprocfs.c 2011-04-17 15:56:46.000000000 -0400
31221 @@ -32,9 +32,9 @@
31222
31223 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
31224
31225 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
31226 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
31227 {
31228 - unsigned long val = atomic_long_read(v);
31229 + unsigned long val = atomic_long_read_unchecked(v);
31230
31231 if (val)
31232 seq_printf(s, "%16lu %s\n", val, id);
31233 @@ -136,8 +136,8 @@ static int mcs_statistics_show(struct se
31234 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
31235
31236 for (op = 0; op < mcsop_last; op++) {
31237 - count = atomic_long_read(&mcs_op_statistics[op].count);
31238 - total = atomic_long_read(&mcs_op_statistics[op].total);
31239 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
31240 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
31241 max = mcs_op_statistics[op].max;
31242 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
31243 count ? total / count : 0, max);
31244 diff -urNp linux-2.6.32.41/drivers/misc/sgi-gru/grutables.h linux-2.6.32.41/drivers/misc/sgi-gru/grutables.h
31245 --- linux-2.6.32.41/drivers/misc/sgi-gru/grutables.h 2011-03-27 14:31:47.000000000 -0400
31246 +++ linux-2.6.32.41/drivers/misc/sgi-gru/grutables.h 2011-04-17 15:56:46.000000000 -0400
31247 @@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
31248 * GRU statistics.
31249 */
31250 struct gru_stats_s {
31251 - atomic_long_t vdata_alloc;
31252 - atomic_long_t vdata_free;
31253 - atomic_long_t gts_alloc;
31254 - atomic_long_t gts_free;
31255 - atomic_long_t vdata_double_alloc;
31256 - atomic_long_t gts_double_allocate;
31257 - atomic_long_t assign_context;
31258 - atomic_long_t assign_context_failed;
31259 - atomic_long_t free_context;
31260 - atomic_long_t load_user_context;
31261 - atomic_long_t load_kernel_context;
31262 - atomic_long_t lock_kernel_context;
31263 - atomic_long_t unlock_kernel_context;
31264 - atomic_long_t steal_user_context;
31265 - atomic_long_t steal_kernel_context;
31266 - atomic_long_t steal_context_failed;
31267 - atomic_long_t nopfn;
31268 - atomic_long_t break_cow;
31269 - atomic_long_t asid_new;
31270 - atomic_long_t asid_next;
31271 - atomic_long_t asid_wrap;
31272 - atomic_long_t asid_reuse;
31273 - atomic_long_t intr;
31274 - atomic_long_t intr_mm_lock_failed;
31275 - atomic_long_t call_os;
31276 - atomic_long_t call_os_offnode_reference;
31277 - atomic_long_t call_os_check_for_bug;
31278 - atomic_long_t call_os_wait_queue;
31279 - atomic_long_t user_flush_tlb;
31280 - atomic_long_t user_unload_context;
31281 - atomic_long_t user_exception;
31282 - atomic_long_t set_context_option;
31283 - atomic_long_t migrate_check;
31284 - atomic_long_t migrated_retarget;
31285 - atomic_long_t migrated_unload;
31286 - atomic_long_t migrated_unload_delay;
31287 - atomic_long_t migrated_nopfn_retarget;
31288 - atomic_long_t migrated_nopfn_unload;
31289 - atomic_long_t tlb_dropin;
31290 - atomic_long_t tlb_dropin_fail_no_asid;
31291 - atomic_long_t tlb_dropin_fail_upm;
31292 - atomic_long_t tlb_dropin_fail_invalid;
31293 - atomic_long_t tlb_dropin_fail_range_active;
31294 - atomic_long_t tlb_dropin_fail_idle;
31295 - atomic_long_t tlb_dropin_fail_fmm;
31296 - atomic_long_t tlb_dropin_fail_no_exception;
31297 - atomic_long_t tlb_dropin_fail_no_exception_war;
31298 - atomic_long_t tfh_stale_on_fault;
31299 - atomic_long_t mmu_invalidate_range;
31300 - atomic_long_t mmu_invalidate_page;
31301 - atomic_long_t mmu_clear_flush_young;
31302 - atomic_long_t flush_tlb;
31303 - atomic_long_t flush_tlb_gru;
31304 - atomic_long_t flush_tlb_gru_tgh;
31305 - atomic_long_t flush_tlb_gru_zero_asid;
31306 -
31307 - atomic_long_t copy_gpa;
31308 -
31309 - atomic_long_t mesq_receive;
31310 - atomic_long_t mesq_receive_none;
31311 - atomic_long_t mesq_send;
31312 - atomic_long_t mesq_send_failed;
31313 - atomic_long_t mesq_noop;
31314 - atomic_long_t mesq_send_unexpected_error;
31315 - atomic_long_t mesq_send_lb_overflow;
31316 - atomic_long_t mesq_send_qlimit_reached;
31317 - atomic_long_t mesq_send_amo_nacked;
31318 - atomic_long_t mesq_send_put_nacked;
31319 - atomic_long_t mesq_qf_not_full;
31320 - atomic_long_t mesq_qf_locked;
31321 - atomic_long_t mesq_qf_noop_not_full;
31322 - atomic_long_t mesq_qf_switch_head_failed;
31323 - atomic_long_t mesq_qf_unexpected_error;
31324 - atomic_long_t mesq_noop_unexpected_error;
31325 - atomic_long_t mesq_noop_lb_overflow;
31326 - atomic_long_t mesq_noop_qlimit_reached;
31327 - atomic_long_t mesq_noop_amo_nacked;
31328 - atomic_long_t mesq_noop_put_nacked;
31329 + atomic_long_unchecked_t vdata_alloc;
31330 + atomic_long_unchecked_t vdata_free;
31331 + atomic_long_unchecked_t gts_alloc;
31332 + atomic_long_unchecked_t gts_free;
31333 + atomic_long_unchecked_t vdata_double_alloc;
31334 + atomic_long_unchecked_t gts_double_allocate;
31335 + atomic_long_unchecked_t assign_context;
31336 + atomic_long_unchecked_t assign_context_failed;
31337 + atomic_long_unchecked_t free_context;
31338 + atomic_long_unchecked_t load_user_context;
31339 + atomic_long_unchecked_t load_kernel_context;
31340 + atomic_long_unchecked_t lock_kernel_context;
31341 + atomic_long_unchecked_t unlock_kernel_context;
31342 + atomic_long_unchecked_t steal_user_context;
31343 + atomic_long_unchecked_t steal_kernel_context;
31344 + atomic_long_unchecked_t steal_context_failed;
31345 + atomic_long_unchecked_t nopfn;
31346 + atomic_long_unchecked_t break_cow;
31347 + atomic_long_unchecked_t asid_new;
31348 + atomic_long_unchecked_t asid_next;
31349 + atomic_long_unchecked_t asid_wrap;
31350 + atomic_long_unchecked_t asid_reuse;
31351 + atomic_long_unchecked_t intr;
31352 + atomic_long_unchecked_t intr_mm_lock_failed;
31353 + atomic_long_unchecked_t call_os;
31354 + atomic_long_unchecked_t call_os_offnode_reference;
31355 + atomic_long_unchecked_t call_os_check_for_bug;
31356 + atomic_long_unchecked_t call_os_wait_queue;
31357 + atomic_long_unchecked_t user_flush_tlb;
31358 + atomic_long_unchecked_t user_unload_context;
31359 + atomic_long_unchecked_t user_exception;
31360 + atomic_long_unchecked_t set_context_option;
31361 + atomic_long_unchecked_t migrate_check;
31362 + atomic_long_unchecked_t migrated_retarget;
31363 + atomic_long_unchecked_t migrated_unload;
31364 + atomic_long_unchecked_t migrated_unload_delay;
31365 + atomic_long_unchecked_t migrated_nopfn_retarget;
31366 + atomic_long_unchecked_t migrated_nopfn_unload;
31367 + atomic_long_unchecked_t tlb_dropin;
31368 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
31369 + atomic_long_unchecked_t tlb_dropin_fail_upm;
31370 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
31371 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
31372 + atomic_long_unchecked_t tlb_dropin_fail_idle;
31373 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
31374 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
31375 + atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
31376 + atomic_long_unchecked_t tfh_stale_on_fault;
31377 + atomic_long_unchecked_t mmu_invalidate_range;
31378 + atomic_long_unchecked_t mmu_invalidate_page;
31379 + atomic_long_unchecked_t mmu_clear_flush_young;
31380 + atomic_long_unchecked_t flush_tlb;
31381 + atomic_long_unchecked_t flush_tlb_gru;
31382 + atomic_long_unchecked_t flush_tlb_gru_tgh;
31383 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
31384 +
31385 + atomic_long_unchecked_t copy_gpa;
31386 +
31387 + atomic_long_unchecked_t mesq_receive;
31388 + atomic_long_unchecked_t mesq_receive_none;
31389 + atomic_long_unchecked_t mesq_send;
31390 + atomic_long_unchecked_t mesq_send_failed;
31391 + atomic_long_unchecked_t mesq_noop;
31392 + atomic_long_unchecked_t mesq_send_unexpected_error;
31393 + atomic_long_unchecked_t mesq_send_lb_overflow;
31394 + atomic_long_unchecked_t mesq_send_qlimit_reached;
31395 + atomic_long_unchecked_t mesq_send_amo_nacked;
31396 + atomic_long_unchecked_t mesq_send_put_nacked;
31397 + atomic_long_unchecked_t mesq_qf_not_full;
31398 + atomic_long_unchecked_t mesq_qf_locked;
31399 + atomic_long_unchecked_t mesq_qf_noop_not_full;
31400 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
31401 + atomic_long_unchecked_t mesq_qf_unexpected_error;
31402 + atomic_long_unchecked_t mesq_noop_unexpected_error;
31403 + atomic_long_unchecked_t mesq_noop_lb_overflow;
31404 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
31405 + atomic_long_unchecked_t mesq_noop_amo_nacked;
31406 + atomic_long_unchecked_t mesq_noop_put_nacked;
31407
31408 };
31409
31410 @@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start
31411 cchop_deallocate, tghop_invalidate, mcsop_last};
31412
31413 struct mcs_op_statistic {
31414 - atomic_long_t count;
31415 - atomic_long_t total;
31416 + atomic_long_unchecked_t count;
31417 + atomic_long_unchecked_t total;
31418 unsigned long max;
31419 };
31420
31421 @@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_st
31422
31423 #define STAT(id) do { \
31424 if (gru_options & OPT_STATS) \
31425 - atomic_long_inc(&gru_stats.id); \
31426 + atomic_long_inc_unchecked(&gru_stats.id); \
31427 } while (0)
31428
31429 #ifdef CONFIG_SGI_GRU_DEBUG
31430 diff -urNp linux-2.6.32.41/drivers/mtd/chips/cfi_cmdset_0001.c linux-2.6.32.41/drivers/mtd/chips/cfi_cmdset_0001.c
31431 --- linux-2.6.32.41/drivers/mtd/chips/cfi_cmdset_0001.c 2011-03-27 14:31:47.000000000 -0400
31432 +++ linux-2.6.32.41/drivers/mtd/chips/cfi_cmdset_0001.c 2011-05-16 21:46:57.000000000 -0400
31433 @@ -743,6 +743,8 @@ static int chip_ready (struct map_info *
31434 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
31435 unsigned long timeo = jiffies + HZ;
31436
31437 + pax_track_stack();
31438 +
31439 /* Prevent setting state FL_SYNCING for chip in suspended state. */
31440 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
31441 goto sleep;
31442 @@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(stru
31443 unsigned long initial_adr;
31444 int initial_len = len;
31445
31446 + pax_track_stack();
31447 +
31448 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
31449 adr += chip->start;
31450 initial_adr = adr;
31451 @@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(st
31452 int retries = 3;
31453 int ret;
31454
31455 + pax_track_stack();
31456 +
31457 adr += chip->start;
31458
31459 retry:
31460 diff -urNp linux-2.6.32.41/drivers/mtd/chips/cfi_cmdset_0020.c linux-2.6.32.41/drivers/mtd/chips/cfi_cmdset_0020.c
31461 --- linux-2.6.32.41/drivers/mtd/chips/cfi_cmdset_0020.c 2011-03-27 14:31:47.000000000 -0400
31462 +++ linux-2.6.32.41/drivers/mtd/chips/cfi_cmdset_0020.c 2011-05-16 21:46:57.000000000 -0400
31463 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
31464 unsigned long cmd_addr;
31465 struct cfi_private *cfi = map->fldrv_priv;
31466
31467 + pax_track_stack();
31468 +
31469 adr += chip->start;
31470
31471 /* Ensure cmd read/writes are aligned. */
31472 @@ -428,6 +430,8 @@ static inline int do_write_buffer(struct
31473 DECLARE_WAITQUEUE(wait, current);
31474 int wbufsize, z;
31475
31476 + pax_track_stack();
31477 +
31478 /* M58LW064A requires bus alignment for buffer wriets -- saw */
31479 if (adr & (map_bankwidth(map)-1))
31480 return -EINVAL;
31481 @@ -742,6 +746,8 @@ static inline int do_erase_oneblock(stru
31482 DECLARE_WAITQUEUE(wait, current);
31483 int ret = 0;
31484
31485 + pax_track_stack();
31486 +
31487 adr += chip->start;
31488
31489 /* Let's determine this according to the interleave only once */
31490 @@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struc
31491 unsigned long timeo = jiffies + HZ;
31492 DECLARE_WAITQUEUE(wait, current);
31493
31494 + pax_track_stack();
31495 +
31496 adr += chip->start;
31497
31498 /* Let's determine this according to the interleave only once */
31499 @@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(str
31500 unsigned long timeo = jiffies + HZ;
31501 DECLARE_WAITQUEUE(wait, current);
31502
31503 + pax_track_stack();
31504 +
31505 adr += chip->start;
31506
31507 /* Let's determine this according to the interleave only once */
31508 diff -urNp linux-2.6.32.41/drivers/mtd/devices/doc2000.c linux-2.6.32.41/drivers/mtd/devices/doc2000.c
31509 --- linux-2.6.32.41/drivers/mtd/devices/doc2000.c 2011-03-27 14:31:47.000000000 -0400
31510 +++ linux-2.6.32.41/drivers/mtd/devices/doc2000.c 2011-04-17 15:56:46.000000000 -0400
31511 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
31512
31513 /* The ECC will not be calculated correctly if less than 512 is written */
31514 /* DBB-
31515 - if (len != 0x200 && eccbuf)
31516 + if (len != 0x200)
31517 printk(KERN_WARNING
31518 "ECC needs a full sector write (adr: %lx size %lx)\n",
31519 (long) to, (long) len);
31520 diff -urNp linux-2.6.32.41/drivers/mtd/devices/doc2001.c linux-2.6.32.41/drivers/mtd/devices/doc2001.c
31521 --- linux-2.6.32.41/drivers/mtd/devices/doc2001.c 2011-03-27 14:31:47.000000000 -0400
31522 +++ linux-2.6.32.41/drivers/mtd/devices/doc2001.c 2011-04-17 15:56:46.000000000 -0400
31523 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
31524 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
31525
31526 /* Don't allow read past end of device */
31527 - if (from >= this->totlen)
31528 + if (from >= this->totlen || !len)
31529 return -EINVAL;
31530
31531 /* Don't allow a single read to cross a 512-byte block boundary */
31532 diff -urNp linux-2.6.32.41/drivers/mtd/ftl.c linux-2.6.32.41/drivers/mtd/ftl.c
31533 --- linux-2.6.32.41/drivers/mtd/ftl.c 2011-03-27 14:31:47.000000000 -0400
31534 +++ linux-2.6.32.41/drivers/mtd/ftl.c 2011-05-16 21:46:57.000000000 -0400
31535 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
31536 loff_t offset;
31537 uint16_t srcunitswap = cpu_to_le16(srcunit);
31538
31539 + pax_track_stack();
31540 +
31541 eun = &part->EUNInfo[srcunit];
31542 xfer = &part->XferInfo[xferunit];
31543 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
31544 diff -urNp linux-2.6.32.41/drivers/mtd/inftlcore.c linux-2.6.32.41/drivers/mtd/inftlcore.c
31545 --- linux-2.6.32.41/drivers/mtd/inftlcore.c 2011-03-27 14:31:47.000000000 -0400
31546 +++ linux-2.6.32.41/drivers/mtd/inftlcore.c 2011-05-16 21:46:57.000000000 -0400
31547 @@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLr
31548 struct inftl_oob oob;
31549 size_t retlen;
31550
31551 + pax_track_stack();
31552 +
31553 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
31554 "pending=%d)\n", inftl, thisVUC, pendingblock);
31555
31556 diff -urNp linux-2.6.32.41/drivers/mtd/inftlmount.c linux-2.6.32.41/drivers/mtd/inftlmount.c
31557 --- linux-2.6.32.41/drivers/mtd/inftlmount.c 2011-03-27 14:31:47.000000000 -0400
31558 +++ linux-2.6.32.41/drivers/mtd/inftlmount.c 2011-05-16 21:46:57.000000000 -0400
31559 @@ -54,6 +54,8 @@ static int find_boot_record(struct INFTL
31560 struct INFTLPartition *ip;
31561 size_t retlen;
31562
31563 + pax_track_stack();
31564 +
31565 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
31566
31567 /*
31568 diff -urNp linux-2.6.32.41/drivers/mtd/lpddr/qinfo_probe.c linux-2.6.32.41/drivers/mtd/lpddr/qinfo_probe.c
31569 --- linux-2.6.32.41/drivers/mtd/lpddr/qinfo_probe.c 2011-03-27 14:31:47.000000000 -0400
31570 +++ linux-2.6.32.41/drivers/mtd/lpddr/qinfo_probe.c 2011-05-16 21:46:57.000000000 -0400
31571 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
31572 {
31573 map_word pfow_val[4];
31574
31575 + pax_track_stack();
31576 +
31577 /* Check identification string */
31578 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
31579 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
31580 diff -urNp linux-2.6.32.41/drivers/mtd/mtdchar.c linux-2.6.32.41/drivers/mtd/mtdchar.c
31581 --- linux-2.6.32.41/drivers/mtd/mtdchar.c 2011-03-27 14:31:47.000000000 -0400
31582 +++ linux-2.6.32.41/drivers/mtd/mtdchar.c 2011-05-16 21:46:57.000000000 -0400
31583 @@ -460,6 +460,8 @@ static int mtd_ioctl(struct inode *inode
31584 u_long size;
31585 struct mtd_info_user info;
31586
31587 + pax_track_stack();
31588 +
31589 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
31590
31591 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
31592 diff -urNp linux-2.6.32.41/drivers/mtd/nftlcore.c linux-2.6.32.41/drivers/mtd/nftlcore.c
31593 --- linux-2.6.32.41/drivers/mtd/nftlcore.c 2011-03-27 14:31:47.000000000 -0400
31594 +++ linux-2.6.32.41/drivers/mtd/nftlcore.c 2011-05-16 21:46:57.000000000 -0400
31595 @@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLre
31596 int inplace = 1;
31597 size_t retlen;
31598
31599 + pax_track_stack();
31600 +
31601 memset(BlockMap, 0xff, sizeof(BlockMap));
31602 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
31603
31604 diff -urNp linux-2.6.32.41/drivers/mtd/nftlmount.c linux-2.6.32.41/drivers/mtd/nftlmount.c
31605 --- linux-2.6.32.41/drivers/mtd/nftlmount.c 2011-03-27 14:31:47.000000000 -0400
31606 +++ linux-2.6.32.41/drivers/mtd/nftlmount.c 2011-05-18 20:09:37.000000000 -0400
31607 @@ -23,6 +23,7 @@
31608 #include <asm/errno.h>
31609 #include <linux/delay.h>
31610 #include <linux/slab.h>
31611 +#include <linux/sched.h>
31612 #include <linux/mtd/mtd.h>
31613 #include <linux/mtd/nand.h>
31614 #include <linux/mtd/nftl.h>
31615 @@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLr
31616 struct mtd_info *mtd = nftl->mbd.mtd;
31617 unsigned int i;
31618
31619 + pax_track_stack();
31620 +
31621 /* Assume logical EraseSize == physical erasesize for starting the scan.
31622 We'll sort it out later if we find a MediaHeader which says otherwise */
31623 /* Actually, we won't. The new DiskOnChip driver has already scanned
31624 diff -urNp linux-2.6.32.41/drivers/mtd/ubi/build.c linux-2.6.32.41/drivers/mtd/ubi/build.c
31625 --- linux-2.6.32.41/drivers/mtd/ubi/build.c 2011-03-27 14:31:47.000000000 -0400
31626 +++ linux-2.6.32.41/drivers/mtd/ubi/build.c 2011-04-17 15:56:46.000000000 -0400
31627 @@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
31628 static int __init bytes_str_to_int(const char *str)
31629 {
31630 char *endp;
31631 - unsigned long result;
31632 + unsigned long result, scale = 1;
31633
31634 result = simple_strtoul(str, &endp, 0);
31635 if (str == endp || result >= INT_MAX) {
31636 @@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const
31637
31638 switch (*endp) {
31639 case 'G':
31640 - result *= 1024;
31641 + scale *= 1024;
31642 case 'M':
31643 - result *= 1024;
31644 + scale *= 1024;
31645 case 'K':
31646 - result *= 1024;
31647 + scale *= 1024;
31648 if (endp[1] == 'i' && endp[2] == 'B')
31649 endp += 2;
31650 case '\0':
31651 @@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const
31652 return -EINVAL;
31653 }
31654
31655 - return result;
31656 + if ((intoverflow_t)result*scale >= INT_MAX) {
31657 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
31658 + str);
31659 + return -EINVAL;
31660 + }
31661 +
31662 + return result*scale;
31663 }
31664
31665 /**
31666 diff -urNp linux-2.6.32.41/drivers/net/bnx2.c linux-2.6.32.41/drivers/net/bnx2.c
31667 --- linux-2.6.32.41/drivers/net/bnx2.c 2011-03-27 14:31:47.000000000 -0400
31668 +++ linux-2.6.32.41/drivers/net/bnx2.c 2011-05-16 21:46:57.000000000 -0400
31669 @@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
31670 int rc = 0;
31671 u32 magic, csum;
31672
31673 + pax_track_stack();
31674 +
31675 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
31676 goto test_nvram_done;
31677
31678 diff -urNp linux-2.6.32.41/drivers/net/cxgb3/t3_hw.c linux-2.6.32.41/drivers/net/cxgb3/t3_hw.c
31679 --- linux-2.6.32.41/drivers/net/cxgb3/t3_hw.c 2011-03-27 14:31:47.000000000 -0400
31680 +++ linux-2.6.32.41/drivers/net/cxgb3/t3_hw.c 2011-05-16 21:46:57.000000000 -0400
31681 @@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter
31682 int i, addr, ret;
31683 struct t3_vpd vpd;
31684
31685 + pax_track_stack();
31686 +
31687 /*
31688 * Card information is normally at VPD_BASE but some early cards had
31689 * it at 0.
31690 diff -urNp linux-2.6.32.41/drivers/net/e1000e/82571.c linux-2.6.32.41/drivers/net/e1000e/82571.c
31691 --- linux-2.6.32.41/drivers/net/e1000e/82571.c 2011-03-27 14:31:47.000000000 -0400
31692 +++ linux-2.6.32.41/drivers/net/e1000e/82571.c 2011-04-17 15:56:46.000000000 -0400
31693 @@ -212,6 +212,7 @@ static s32 e1000_init_mac_params_82571(s
31694 {
31695 struct e1000_hw *hw = &adapter->hw;
31696 struct e1000_mac_info *mac = &hw->mac;
31697 + /* cannot be const */
31698 struct e1000_mac_operations *func = &mac->ops;
31699 u32 swsm = 0;
31700 u32 swsm2 = 0;
31701 @@ -1656,7 +1657,7 @@ static void e1000_clear_hw_cntrs_82571(s
31702 temp = er32(ICRXDMTC);
31703 }
31704
31705 -static struct e1000_mac_operations e82571_mac_ops = {
31706 +static const struct e1000_mac_operations e82571_mac_ops = {
31707 /* .check_mng_mode: mac type dependent */
31708 /* .check_for_link: media type dependent */
31709 .id_led_init = e1000e_id_led_init,
31710 @@ -1674,7 +1675,7 @@ static struct e1000_mac_operations e8257
31711 .setup_led = e1000e_setup_led_generic,
31712 };
31713
31714 -static struct e1000_phy_operations e82_phy_ops_igp = {
31715 +static const struct e1000_phy_operations e82_phy_ops_igp = {
31716 .acquire_phy = e1000_get_hw_semaphore_82571,
31717 .check_reset_block = e1000e_check_reset_block_generic,
31718 .commit_phy = NULL,
31719 @@ -1691,7 +1692,7 @@ static struct e1000_phy_operations e82_p
31720 .cfg_on_link_up = NULL,
31721 };
31722
31723 -static struct e1000_phy_operations e82_phy_ops_m88 = {
31724 +static const struct e1000_phy_operations e82_phy_ops_m88 = {
31725 .acquire_phy = e1000_get_hw_semaphore_82571,
31726 .check_reset_block = e1000e_check_reset_block_generic,
31727 .commit_phy = e1000e_phy_sw_reset,
31728 @@ -1708,7 +1709,7 @@ static struct e1000_phy_operations e82_p
31729 .cfg_on_link_up = NULL,
31730 };
31731
31732 -static struct e1000_phy_operations e82_phy_ops_bm = {
31733 +static const struct e1000_phy_operations e82_phy_ops_bm = {
31734 .acquire_phy = e1000_get_hw_semaphore_82571,
31735 .check_reset_block = e1000e_check_reset_block_generic,
31736 .commit_phy = e1000e_phy_sw_reset,
31737 @@ -1725,7 +1726,7 @@ static struct e1000_phy_operations e82_p
31738 .cfg_on_link_up = NULL,
31739 };
31740
31741 -static struct e1000_nvm_operations e82571_nvm_ops = {
31742 +static const struct e1000_nvm_operations e82571_nvm_ops = {
31743 .acquire_nvm = e1000_acquire_nvm_82571,
31744 .read_nvm = e1000e_read_nvm_eerd,
31745 .release_nvm = e1000_release_nvm_82571,
31746 diff -urNp linux-2.6.32.41/drivers/net/e1000e/e1000.h linux-2.6.32.41/drivers/net/e1000e/e1000.h
31747 --- linux-2.6.32.41/drivers/net/e1000e/e1000.h 2011-03-27 14:31:47.000000000 -0400
31748 +++ linux-2.6.32.41/drivers/net/e1000e/e1000.h 2011-04-17 15:56:46.000000000 -0400
31749 @@ -375,9 +375,9 @@ struct e1000_info {
31750 u32 pba;
31751 u32 max_hw_frame_size;
31752 s32 (*get_variants)(struct e1000_adapter *);
31753 - struct e1000_mac_operations *mac_ops;
31754 - struct e1000_phy_operations *phy_ops;
31755 - struct e1000_nvm_operations *nvm_ops;
31756 + const struct e1000_mac_operations *mac_ops;
31757 + const struct e1000_phy_operations *phy_ops;
31758 + const struct e1000_nvm_operations *nvm_ops;
31759 };
31760
31761 /* hardware capability, feature, and workaround flags */
31762 diff -urNp linux-2.6.32.41/drivers/net/e1000e/es2lan.c linux-2.6.32.41/drivers/net/e1000e/es2lan.c
31763 --- linux-2.6.32.41/drivers/net/e1000e/es2lan.c 2011-03-27 14:31:47.000000000 -0400
31764 +++ linux-2.6.32.41/drivers/net/e1000e/es2lan.c 2011-04-17 15:56:46.000000000 -0400
31765 @@ -207,6 +207,7 @@ static s32 e1000_init_mac_params_80003es
31766 {
31767 struct e1000_hw *hw = &adapter->hw;
31768 struct e1000_mac_info *mac = &hw->mac;
31769 + /* cannot be const */
31770 struct e1000_mac_operations *func = &mac->ops;
31771
31772 /* Set media type */
31773 @@ -1365,7 +1366,7 @@ static void e1000_clear_hw_cntrs_80003es
31774 temp = er32(ICRXDMTC);
31775 }
31776
31777 -static struct e1000_mac_operations es2_mac_ops = {
31778 +static const struct e1000_mac_operations es2_mac_ops = {
31779 .id_led_init = e1000e_id_led_init,
31780 .check_mng_mode = e1000e_check_mng_mode_generic,
31781 /* check_for_link dependent on media type */
31782 @@ -1383,7 +1384,7 @@ static struct e1000_mac_operations es2_m
31783 .setup_led = e1000e_setup_led_generic,
31784 };
31785
31786 -static struct e1000_phy_operations es2_phy_ops = {
31787 +static const struct e1000_phy_operations es2_phy_ops = {
31788 .acquire_phy = e1000_acquire_phy_80003es2lan,
31789 .check_reset_block = e1000e_check_reset_block_generic,
31790 .commit_phy = e1000e_phy_sw_reset,
31791 @@ -1400,7 +1401,7 @@ static struct e1000_phy_operations es2_p
31792 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
31793 };
31794
31795 -static struct e1000_nvm_operations es2_nvm_ops = {
31796 +static const struct e1000_nvm_operations es2_nvm_ops = {
31797 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
31798 .read_nvm = e1000e_read_nvm_eerd,
31799 .release_nvm = e1000_release_nvm_80003es2lan,
31800 diff -urNp linux-2.6.32.41/drivers/net/e1000e/hw.h linux-2.6.32.41/drivers/net/e1000e/hw.h
31801 --- linux-2.6.32.41/drivers/net/e1000e/hw.h 2011-03-27 14:31:47.000000000 -0400
31802 +++ linux-2.6.32.41/drivers/net/e1000e/hw.h 2011-04-17 15:56:46.000000000 -0400
31803 @@ -756,34 +756,34 @@ struct e1000_mac_operations {
31804
31805 /* Function pointers for the PHY. */
31806 struct e1000_phy_operations {
31807 - s32 (*acquire_phy)(struct e1000_hw *);
31808 - s32 (*check_polarity)(struct e1000_hw *);
31809 - s32 (*check_reset_block)(struct e1000_hw *);
31810 - s32 (*commit_phy)(struct e1000_hw *);
31811 - s32 (*force_speed_duplex)(struct e1000_hw *);
31812 - s32 (*get_cfg_done)(struct e1000_hw *hw);
31813 - s32 (*get_cable_length)(struct e1000_hw *);
31814 - s32 (*get_phy_info)(struct e1000_hw *);
31815 - s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *);
31816 - s32 (*read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
31817 - void (*release_phy)(struct e1000_hw *);
31818 - s32 (*reset_phy)(struct e1000_hw *);
31819 - s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
31820 - s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
31821 - s32 (*write_phy_reg)(struct e1000_hw *, u32, u16);
31822 - s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
31823 - s32 (*cfg_on_link_up)(struct e1000_hw *);
31824 + s32 (* acquire_phy)(struct e1000_hw *);
31825 + s32 (* check_polarity)(struct e1000_hw *);
31826 + s32 (* check_reset_block)(struct e1000_hw *);
31827 + s32 (* commit_phy)(struct e1000_hw *);
31828 + s32 (* force_speed_duplex)(struct e1000_hw *);
31829 + s32 (* get_cfg_done)(struct e1000_hw *hw);
31830 + s32 (* get_cable_length)(struct e1000_hw *);
31831 + s32 (* get_phy_info)(struct e1000_hw *);
31832 + s32 (* read_phy_reg)(struct e1000_hw *, u32, u16 *);
31833 + s32 (* read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
31834 + void (* release_phy)(struct e1000_hw *);
31835 + s32 (* reset_phy)(struct e1000_hw *);
31836 + s32 (* set_d0_lplu_state)(struct e1000_hw *, bool);
31837 + s32 (* set_d3_lplu_state)(struct e1000_hw *, bool);
31838 + s32 (* write_phy_reg)(struct e1000_hw *, u32, u16);
31839 + s32 (* write_phy_reg_locked)(struct e1000_hw *, u32, u16);
31840 + s32 (* cfg_on_link_up)(struct e1000_hw *);
31841 };
31842
31843 /* Function pointers for the NVM. */
31844 struct e1000_nvm_operations {
31845 - s32 (*acquire_nvm)(struct e1000_hw *);
31846 - s32 (*read_nvm)(struct e1000_hw *, u16, u16, u16 *);
31847 - void (*release_nvm)(struct e1000_hw *);
31848 - s32 (*update_nvm)(struct e1000_hw *);
31849 - s32 (*valid_led_default)(struct e1000_hw *, u16 *);
31850 - s32 (*validate_nvm)(struct e1000_hw *);
31851 - s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
31852 + s32 (* const acquire_nvm)(struct e1000_hw *);
31853 + s32 (* const read_nvm)(struct e1000_hw *, u16, u16, u16 *);
31854 + void (* const release_nvm)(struct e1000_hw *);
31855 + s32 (* const update_nvm)(struct e1000_hw *);
31856 + s32 (* const valid_led_default)(struct e1000_hw *, u16 *);
31857 + s32 (* const validate_nvm)(struct e1000_hw *);
31858 + s32 (* const write_nvm)(struct e1000_hw *, u16, u16, u16 *);
31859 };
31860
31861 struct e1000_mac_info {
31862 diff -urNp linux-2.6.32.41/drivers/net/e1000e/ich8lan.c linux-2.6.32.41/drivers/net/e1000e/ich8lan.c
31863 --- linux-2.6.32.41/drivers/net/e1000e/ich8lan.c 2011-05-10 22:12:01.000000000 -0400
31864 +++ linux-2.6.32.41/drivers/net/e1000e/ich8lan.c 2011-05-10 22:12:32.000000000 -0400
31865 @@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan
31866 }
31867 }
31868
31869 -static struct e1000_mac_operations ich8_mac_ops = {
31870 +static const struct e1000_mac_operations ich8_mac_ops = {
31871 .id_led_init = e1000e_id_led_init,
31872 .check_mng_mode = e1000_check_mng_mode_ich8lan,
31873 .check_for_link = e1000_check_for_copper_link_ich8lan,
31874 @@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_
31875 /* id_led_init dependent on mac type */
31876 };
31877
31878 -static struct e1000_phy_operations ich8_phy_ops = {
31879 +static const struct e1000_phy_operations ich8_phy_ops = {
31880 .acquire_phy = e1000_acquire_swflag_ich8lan,
31881 .check_reset_block = e1000_check_reset_block_ich8lan,
31882 .commit_phy = NULL,
31883 @@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_
31884 .write_phy_reg = e1000e_write_phy_reg_igp,
31885 };
31886
31887 -static struct e1000_nvm_operations ich8_nvm_ops = {
31888 +static const struct e1000_nvm_operations ich8_nvm_ops = {
31889 .acquire_nvm = e1000_acquire_nvm_ich8lan,
31890 .read_nvm = e1000_read_nvm_ich8lan,
31891 .release_nvm = e1000_release_nvm_ich8lan,
31892 diff -urNp linux-2.6.32.41/drivers/net/hamradio/6pack.c linux-2.6.32.41/drivers/net/hamradio/6pack.c
31893 --- linux-2.6.32.41/drivers/net/hamradio/6pack.c 2011-03-27 14:31:47.000000000 -0400
31894 +++ linux-2.6.32.41/drivers/net/hamradio/6pack.c 2011-05-16 21:46:57.000000000 -0400
31895 @@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct t
31896 unsigned char buf[512];
31897 int count1;
31898
31899 + pax_track_stack();
31900 +
31901 if (!count)
31902 return;
31903
31904 diff -urNp linux-2.6.32.41/drivers/net/ibmveth.c linux-2.6.32.41/drivers/net/ibmveth.c
31905 --- linux-2.6.32.41/drivers/net/ibmveth.c 2011-03-27 14:31:47.000000000 -0400
31906 +++ linux-2.6.32.41/drivers/net/ibmveth.c 2011-04-17 15:56:46.000000000 -0400
31907 @@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attr
31908 NULL,
31909 };
31910
31911 -static struct sysfs_ops veth_pool_ops = {
31912 +static const struct sysfs_ops veth_pool_ops = {
31913 .show = veth_pool_show,
31914 .store = veth_pool_store,
31915 };
31916 diff -urNp linux-2.6.32.41/drivers/net/igb/e1000_82575.c linux-2.6.32.41/drivers/net/igb/e1000_82575.c
31917 --- linux-2.6.32.41/drivers/net/igb/e1000_82575.c 2011-03-27 14:31:47.000000000 -0400
31918 +++ linux-2.6.32.41/drivers/net/igb/e1000_82575.c 2011-04-17 15:56:46.000000000 -0400
31919 @@ -1410,7 +1410,7 @@ void igb_vmdq_set_replication_pf(struct
31920 wr32(E1000_VT_CTL, vt_ctl);
31921 }
31922
31923 -static struct e1000_mac_operations e1000_mac_ops_82575 = {
31924 +static const struct e1000_mac_operations e1000_mac_ops_82575 = {
31925 .reset_hw = igb_reset_hw_82575,
31926 .init_hw = igb_init_hw_82575,
31927 .check_for_link = igb_check_for_link_82575,
31928 @@ -1419,13 +1419,13 @@ static struct e1000_mac_operations e1000
31929 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
31930 };
31931
31932 -static struct e1000_phy_operations e1000_phy_ops_82575 = {
31933 +static const struct e1000_phy_operations e1000_phy_ops_82575 = {
31934 .acquire = igb_acquire_phy_82575,
31935 .get_cfg_done = igb_get_cfg_done_82575,
31936 .release = igb_release_phy_82575,
31937 };
31938
31939 -static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
31940 +static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
31941 .acquire = igb_acquire_nvm_82575,
31942 .read = igb_read_nvm_eerd,
31943 .release = igb_release_nvm_82575,
31944 diff -urNp linux-2.6.32.41/drivers/net/igb/e1000_hw.h linux-2.6.32.41/drivers/net/igb/e1000_hw.h
31945 --- linux-2.6.32.41/drivers/net/igb/e1000_hw.h 2011-03-27 14:31:47.000000000 -0400
31946 +++ linux-2.6.32.41/drivers/net/igb/e1000_hw.h 2011-04-17 15:56:46.000000000 -0400
31947 @@ -305,17 +305,17 @@ struct e1000_phy_operations {
31948 };
31949
31950 struct e1000_nvm_operations {
31951 - s32 (*acquire)(struct e1000_hw *);
31952 - s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
31953 - void (*release)(struct e1000_hw *);
31954 - s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
31955 + s32 (* const acquire)(struct e1000_hw *);
31956 + s32 (* const read)(struct e1000_hw *, u16, u16, u16 *);
31957 + void (* const release)(struct e1000_hw *);
31958 + s32 (* const write)(struct e1000_hw *, u16, u16, u16 *);
31959 };
31960
31961 struct e1000_info {
31962 s32 (*get_invariants)(struct e1000_hw *);
31963 - struct e1000_mac_operations *mac_ops;
31964 - struct e1000_phy_operations *phy_ops;
31965 - struct e1000_nvm_operations *nvm_ops;
31966 + const struct e1000_mac_operations *mac_ops;
31967 + const struct e1000_phy_operations *phy_ops;
31968 + const struct e1000_nvm_operations *nvm_ops;
31969 };
31970
31971 extern const struct e1000_info e1000_82575_info;
31972 diff -urNp linux-2.6.32.41/drivers/net/iseries_veth.c linux-2.6.32.41/drivers/net/iseries_veth.c
31973 --- linux-2.6.32.41/drivers/net/iseries_veth.c 2011-03-27 14:31:47.000000000 -0400
31974 +++ linux-2.6.32.41/drivers/net/iseries_veth.c 2011-04-17 15:56:46.000000000 -0400
31975 @@ -384,7 +384,7 @@ static struct attribute *veth_cnx_defaul
31976 NULL
31977 };
31978
31979 -static struct sysfs_ops veth_cnx_sysfs_ops = {
31980 +static const struct sysfs_ops veth_cnx_sysfs_ops = {
31981 .show = veth_cnx_attribute_show
31982 };
31983
31984 @@ -441,7 +441,7 @@ static struct attribute *veth_port_defau
31985 NULL
31986 };
31987
31988 -static struct sysfs_ops veth_port_sysfs_ops = {
31989 +static const struct sysfs_ops veth_port_sysfs_ops = {
31990 .show = veth_port_attribute_show
31991 };
31992
31993 diff -urNp linux-2.6.32.41/drivers/net/ixgb/ixgb_main.c linux-2.6.32.41/drivers/net/ixgb/ixgb_main.c
31994 --- linux-2.6.32.41/drivers/net/ixgb/ixgb_main.c 2011-03-27 14:31:47.000000000 -0400
31995 +++ linux-2.6.32.41/drivers/net/ixgb/ixgb_main.c 2011-05-16 21:46:57.000000000 -0400
31996 @@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev
31997 u32 rctl;
31998 int i;
31999
32000 + pax_track_stack();
32001 +
32002 /* Check for Promiscuous and All Multicast modes */
32003
32004 rctl = IXGB_READ_REG(hw, RCTL);
32005 diff -urNp linux-2.6.32.41/drivers/net/ixgb/ixgb_param.c linux-2.6.32.41/drivers/net/ixgb/ixgb_param.c
32006 --- linux-2.6.32.41/drivers/net/ixgb/ixgb_param.c 2011-03-27 14:31:47.000000000 -0400
32007 +++ linux-2.6.32.41/drivers/net/ixgb/ixgb_param.c 2011-05-16 21:46:57.000000000 -0400
32008 @@ -260,6 +260,9 @@ void __devinit
32009 ixgb_check_options(struct ixgb_adapter *adapter)
32010 {
32011 int bd = adapter->bd_number;
32012 +
32013 + pax_track_stack();
32014 +
32015 if (bd >= IXGB_MAX_NIC) {
32016 printk(KERN_NOTICE
32017 "Warning: no configuration for board #%i\n", bd);
32018 diff -urNp linux-2.6.32.41/drivers/net/mlx4/main.c linux-2.6.32.41/drivers/net/mlx4/main.c
32019 --- linux-2.6.32.41/drivers/net/mlx4/main.c 2011-03-27 14:31:47.000000000 -0400
32020 +++ linux-2.6.32.41/drivers/net/mlx4/main.c 2011-05-18 20:09:37.000000000 -0400
32021 @@ -38,6 +38,7 @@
32022 #include <linux/errno.h>
32023 #include <linux/pci.h>
32024 #include <linux/dma-mapping.h>
32025 +#include <linux/sched.h>
32026
32027 #include <linux/mlx4/device.h>
32028 #include <linux/mlx4/doorbell.h>
32029 @@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev
32030 u64 icm_size;
32031 int err;
32032
32033 + pax_track_stack();
32034 +
32035 err = mlx4_QUERY_FW(dev);
32036 if (err) {
32037 if (err == -EACCES)
32038 diff -urNp linux-2.6.32.41/drivers/net/niu.c linux-2.6.32.41/drivers/net/niu.c
32039 --- linux-2.6.32.41/drivers/net/niu.c 2011-05-10 22:12:01.000000000 -0400
32040 +++ linux-2.6.32.41/drivers/net/niu.c 2011-05-16 21:46:57.000000000 -0400
32041 @@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struc
32042 int i, num_irqs, err;
32043 u8 first_ldg;
32044
32045 + pax_track_stack();
32046 +
32047 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
32048 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
32049 ldg_num_map[i] = first_ldg + i;
32050 diff -urNp linux-2.6.32.41/drivers/net/pcnet32.c linux-2.6.32.41/drivers/net/pcnet32.c
32051 --- linux-2.6.32.41/drivers/net/pcnet32.c 2011-03-27 14:31:47.000000000 -0400
32052 +++ linux-2.6.32.41/drivers/net/pcnet32.c 2011-04-17 15:56:46.000000000 -0400
32053 @@ -79,7 +79,7 @@ static int cards_found;
32054 /*
32055 * VLB I/O addresses
32056 */
32057 -static unsigned int pcnet32_portlist[] __initdata =
32058 +static unsigned int pcnet32_portlist[] __devinitdata =
32059 { 0x300, 0x320, 0x340, 0x360, 0 };
32060
32061 static int pcnet32_debug = 0;
32062 diff -urNp linux-2.6.32.41/drivers/net/tg3.h linux-2.6.32.41/drivers/net/tg3.h
32063 --- linux-2.6.32.41/drivers/net/tg3.h 2011-03-27 14:31:47.000000000 -0400
32064 +++ linux-2.6.32.41/drivers/net/tg3.h 2011-04-17 15:56:46.000000000 -0400
32065 @@ -95,6 +95,7 @@
32066 #define CHIPREV_ID_5750_A0 0x4000
32067 #define CHIPREV_ID_5750_A1 0x4001
32068 #define CHIPREV_ID_5750_A3 0x4003
32069 +#define CHIPREV_ID_5750_C1 0x4201
32070 #define CHIPREV_ID_5750_C2 0x4202
32071 #define CHIPREV_ID_5752_A0_HW 0x5000
32072 #define CHIPREV_ID_5752_A0 0x6000
32073 diff -urNp linux-2.6.32.41/drivers/net/tulip/de2104x.c linux-2.6.32.41/drivers/net/tulip/de2104x.c
32074 --- linux-2.6.32.41/drivers/net/tulip/de2104x.c 2011-03-27 14:31:47.000000000 -0400
32075 +++ linux-2.6.32.41/drivers/net/tulip/de2104x.c 2011-05-16 21:46:57.000000000 -0400
32076 @@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_i
32077 struct de_srom_info_leaf *il;
32078 void *bufp;
32079
32080 + pax_track_stack();
32081 +
32082 /* download entire eeprom */
32083 for (i = 0; i < DE_EEPROM_WORDS; i++)
32084 ((__le16 *)ee_data)[i] =
32085 diff -urNp linux-2.6.32.41/drivers/net/tulip/de4x5.c linux-2.6.32.41/drivers/net/tulip/de4x5.c
32086 --- linux-2.6.32.41/drivers/net/tulip/de4x5.c 2011-03-27 14:31:47.000000000 -0400
32087 +++ linux-2.6.32.41/drivers/net/tulip/de4x5.c 2011-04-17 15:56:46.000000000 -0400
32088 @@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, stru
32089 for (i=0; i<ETH_ALEN; i++) {
32090 tmp.addr[i] = dev->dev_addr[i];
32091 }
32092 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32093 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32094 break;
32095
32096 case DE4X5_SET_HWADDR: /* Set the hardware address */
32097 @@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, stru
32098 spin_lock_irqsave(&lp->lock, flags);
32099 memcpy(&statbuf, &lp->pktStats, ioc->len);
32100 spin_unlock_irqrestore(&lp->lock, flags);
32101 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
32102 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
32103 return -EFAULT;
32104 break;
32105 }
32106 diff -urNp linux-2.6.32.41/drivers/net/usb/hso.c linux-2.6.32.41/drivers/net/usb/hso.c
32107 --- linux-2.6.32.41/drivers/net/usb/hso.c 2011-03-27 14:31:47.000000000 -0400
32108 +++ linux-2.6.32.41/drivers/net/usb/hso.c 2011-04-17 15:56:46.000000000 -0400
32109 @@ -71,7 +71,7 @@
32110 #include <asm/byteorder.h>
32111 #include <linux/serial_core.h>
32112 #include <linux/serial.h>
32113 -
32114 +#include <asm/local.h>
32115
32116 #define DRIVER_VERSION "1.2"
32117 #define MOD_AUTHOR "Option Wireless"
32118 @@ -258,7 +258,7 @@ struct hso_serial {
32119
32120 /* from usb_serial_port */
32121 struct tty_struct *tty;
32122 - int open_count;
32123 + local_t open_count;
32124 spinlock_t serial_lock;
32125
32126 int (*write_data) (struct hso_serial *serial);
32127 @@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_
32128 struct urb *urb;
32129
32130 urb = serial->rx_urb[0];
32131 - if (serial->open_count > 0) {
32132 + if (local_read(&serial->open_count) > 0) {
32133 count = put_rxbuf_data(urb, serial);
32134 if (count == -1)
32135 return;
32136 @@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_cal
32137 DUMP1(urb->transfer_buffer, urb->actual_length);
32138
32139 /* Anyone listening? */
32140 - if (serial->open_count == 0)
32141 + if (local_read(&serial->open_count) == 0)
32142 return;
32143
32144 if (status == 0) {
32145 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
32146 spin_unlock_irq(&serial->serial_lock);
32147
32148 /* check for port already opened, if not set the termios */
32149 - serial->open_count++;
32150 - if (serial->open_count == 1) {
32151 + if (local_inc_return(&serial->open_count) == 1) {
32152 tty->low_latency = 1;
32153 serial->rx_state = RX_IDLE;
32154 /* Force default termio settings */
32155 @@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_st
32156 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
32157 if (result) {
32158 hso_stop_serial_device(serial->parent);
32159 - serial->open_count--;
32160 + local_dec(&serial->open_count);
32161 kref_put(&serial->parent->ref, hso_serial_ref_free);
32162 }
32163 } else {
32164 @@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_
32165
32166 /* reset the rts and dtr */
32167 /* do the actual close */
32168 - serial->open_count--;
32169 + local_dec(&serial->open_count);
32170
32171 - if (serial->open_count <= 0) {
32172 - serial->open_count = 0;
32173 + if (local_read(&serial->open_count) <= 0) {
32174 + local_set(&serial->open_count, 0);
32175 spin_lock_irq(&serial->serial_lock);
32176 if (serial->tty == tty) {
32177 serial->tty->driver_data = NULL;
32178 @@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struc
32179
32180 /* the actual setup */
32181 spin_lock_irqsave(&serial->serial_lock, flags);
32182 - if (serial->open_count)
32183 + if (local_read(&serial->open_count))
32184 _hso_serial_set_termios(tty, old);
32185 else
32186 tty->termios = old;
32187 @@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interfa
32188 /* Start all serial ports */
32189 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
32190 if (serial_table[i] && (serial_table[i]->interface == iface)) {
32191 - if (dev2ser(serial_table[i])->open_count) {
32192 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
32193 result =
32194 hso_start_serial_device(serial_table[i], GFP_NOIO);
32195 hso_kick_transmit(dev2ser(serial_table[i]));
32196 diff -urNp linux-2.6.32.41/drivers/net/vxge/vxge-main.c linux-2.6.32.41/drivers/net/vxge/vxge-main.c
32197 --- linux-2.6.32.41/drivers/net/vxge/vxge-main.c 2011-03-27 14:31:47.000000000 -0400
32198 +++ linux-2.6.32.41/drivers/net/vxge/vxge-main.c 2011-05-16 21:46:57.000000000 -0400
32199 @@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_T
32200 struct sk_buff *completed[NR_SKB_COMPLETED];
32201 int more;
32202
32203 + pax_track_stack();
32204 +
32205 do {
32206 more = 0;
32207 skb_ptr = completed;
32208 @@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_conf
32209 u8 mtable[256] = {0}; /* CPU to vpath mapping */
32210 int index;
32211
32212 + pax_track_stack();
32213 +
32214 /*
32215 * Filling
32216 * - itable with bucket numbers
32217 diff -urNp linux-2.6.32.41/drivers/net/wan/cycx_x25.c linux-2.6.32.41/drivers/net/wan/cycx_x25.c
32218 --- linux-2.6.32.41/drivers/net/wan/cycx_x25.c 2011-03-27 14:31:47.000000000 -0400
32219 +++ linux-2.6.32.41/drivers/net/wan/cycx_x25.c 2011-05-16 21:46:57.000000000 -0400
32220 @@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned
32221 unsigned char hex[1024],
32222 * phex = hex;
32223
32224 + pax_track_stack();
32225 +
32226 if (len >= (sizeof(hex) / 2))
32227 len = (sizeof(hex) / 2) - 1;
32228
32229 diff -urNp linux-2.6.32.41/drivers/net/wimax/i2400m/usb-fw.c linux-2.6.32.41/drivers/net/wimax/i2400m/usb-fw.c
32230 --- linux-2.6.32.41/drivers/net/wimax/i2400m/usb-fw.c 2011-03-27 14:31:47.000000000 -0400
32231 +++ linux-2.6.32.41/drivers/net/wimax/i2400m/usb-fw.c 2011-05-16 21:46:57.000000000 -0400
32232 @@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
32233 int do_autopm = 1;
32234 DECLARE_COMPLETION_ONSTACK(notif_completion);
32235
32236 + pax_track_stack();
32237 +
32238 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
32239 i2400m, ack, ack_size);
32240 BUG_ON(_ack == i2400m->bm_ack_buf);
32241 diff -urNp linux-2.6.32.41/drivers/net/wireless/airo.c linux-2.6.32.41/drivers/net/wireless/airo.c
32242 --- linux-2.6.32.41/drivers/net/wireless/airo.c 2011-03-27 14:31:47.000000000 -0400
32243 +++ linux-2.6.32.41/drivers/net/wireless/airo.c 2011-05-16 21:46:57.000000000 -0400
32244 @@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
32245 BSSListElement * loop_net;
32246 BSSListElement * tmp_net;
32247
32248 + pax_track_stack();
32249 +
32250 /* Blow away current list of scan results */
32251 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
32252 list_move_tail (&loop_net->list, &ai->network_free_list);
32253 @@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *
32254 WepKeyRid wkr;
32255 int rc;
32256
32257 + pax_track_stack();
32258 +
32259 memset( &mySsid, 0, sizeof( mySsid ) );
32260 kfree (ai->flash);
32261 ai->flash = NULL;
32262 @@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct i
32263 __le32 *vals = stats.vals;
32264 int len;
32265
32266 + pax_track_stack();
32267 +
32268 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
32269 return -ENOMEM;
32270 data = (struct proc_data *)file->private_data;
32271 @@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct ino
32272 /* If doLoseSync is not 1, we won't do a Lose Sync */
32273 int doLoseSync = -1;
32274
32275 + pax_track_stack();
32276 +
32277 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
32278 return -ENOMEM;
32279 data = (struct proc_data *)file->private_data;
32280 @@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_de
32281 int i;
32282 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
32283
32284 + pax_track_stack();
32285 +
32286 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
32287 if (!qual)
32288 return -ENOMEM;
32289 @@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(str
32290 CapabilityRid cap_rid;
32291 __le32 *vals = stats_rid.vals;
32292
32293 + pax_track_stack();
32294 +
32295 /* Get stats out of the card */
32296 clear_bit(JOB_WSTATS, &local->jobs);
32297 if (local->power.event) {
32298 diff -urNp linux-2.6.32.41/drivers/net/wireless/ath/ath5k/debug.c linux-2.6.32.41/drivers/net/wireless/ath/ath5k/debug.c
32299 --- linux-2.6.32.41/drivers/net/wireless/ath/ath5k/debug.c 2011-03-27 14:31:47.000000000 -0400
32300 +++ linux-2.6.32.41/drivers/net/wireless/ath/ath5k/debug.c 2011-05-16 21:46:57.000000000 -0400
32301 @@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct f
32302 unsigned int v;
32303 u64 tsf;
32304
32305 + pax_track_stack();
32306 +
32307 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
32308 len += snprintf(buf+len, sizeof(buf)-len,
32309 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
32310 @@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct fi
32311 unsigned int len = 0;
32312 unsigned int i;
32313
32314 + pax_track_stack();
32315 +
32316 len += snprintf(buf+len, sizeof(buf)-len,
32317 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
32318
32319 diff -urNp linux-2.6.32.41/drivers/net/wireless/ath/ath9k/debug.c linux-2.6.32.41/drivers/net/wireless/ath/ath9k/debug.c
32320 --- linux-2.6.32.41/drivers/net/wireless/ath/ath9k/debug.c 2011-03-27 14:31:47.000000000 -0400
32321 +++ linux-2.6.32.41/drivers/net/wireless/ath/ath9k/debug.c 2011-05-16 21:46:57.000000000 -0400
32322 @@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struc
32323 char buf[512];
32324 unsigned int len = 0;
32325
32326 + pax_track_stack();
32327 +
32328 len += snprintf(buf + len, sizeof(buf) - len,
32329 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
32330 len += snprintf(buf + len, sizeof(buf) - len,
32331 @@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct fi
32332 int i;
32333 u8 addr[ETH_ALEN];
32334
32335 + pax_track_stack();
32336 +
32337 len += snprintf(buf + len, sizeof(buf) - len,
32338 "primary: %s (%s chan=%d ht=%d)\n",
32339 wiphy_name(sc->pri_wiphy->hw->wiphy),
32340 diff -urNp linux-2.6.32.41/drivers/net/wireless/b43/debugfs.c linux-2.6.32.41/drivers/net/wireless/b43/debugfs.c
32341 --- linux-2.6.32.41/drivers/net/wireless/b43/debugfs.c 2011-03-27 14:31:47.000000000 -0400
32342 +++ linux-2.6.32.41/drivers/net/wireless/b43/debugfs.c 2011-04-17 15:56:46.000000000 -0400
32343 @@ -43,7 +43,7 @@ static struct dentry *rootdir;
32344 struct b43_debugfs_fops {
32345 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
32346 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
32347 - struct file_operations fops;
32348 + const struct file_operations fops;
32349 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
32350 size_t file_struct_offset;
32351 };
32352 diff -urNp linux-2.6.32.41/drivers/net/wireless/b43legacy/debugfs.c linux-2.6.32.41/drivers/net/wireless/b43legacy/debugfs.c
32353 --- linux-2.6.32.41/drivers/net/wireless/b43legacy/debugfs.c 2011-03-27 14:31:47.000000000 -0400
32354 +++ linux-2.6.32.41/drivers/net/wireless/b43legacy/debugfs.c 2011-04-17 15:56:46.000000000 -0400
32355 @@ -44,7 +44,7 @@ static struct dentry *rootdir;
32356 struct b43legacy_debugfs_fops {
32357 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
32358 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
32359 - struct file_operations fops;
32360 + const struct file_operations fops;
32361 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
32362 size_t file_struct_offset;
32363 /* Take wl->irq_lock before calling read/write? */
32364 diff -urNp linux-2.6.32.41/drivers/net/wireless/ipw2x00/ipw2100.c linux-2.6.32.41/drivers/net/wireless/ipw2x00/ipw2100.c
32365 --- linux-2.6.32.41/drivers/net/wireless/ipw2x00/ipw2100.c 2011-03-27 14:31:47.000000000 -0400
32366 +++ linux-2.6.32.41/drivers/net/wireless/ipw2x00/ipw2100.c 2011-05-16 21:46:57.000000000 -0400
32367 @@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2
32368 int err;
32369 DECLARE_SSID_BUF(ssid);
32370
32371 + pax_track_stack();
32372 +
32373 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
32374
32375 if (ssid_len)
32376 @@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw210
32377 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
32378 int err;
32379
32380 + pax_track_stack();
32381 +
32382 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
32383 idx, keylen, len);
32384
32385 diff -urNp linux-2.6.32.41/drivers/net/wireless/ipw2x00/libipw_rx.c linux-2.6.32.41/drivers/net/wireless/ipw2x00/libipw_rx.c
32386 --- linux-2.6.32.41/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-03-27 14:31:47.000000000 -0400
32387 +++ linux-2.6.32.41/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-05-16 21:46:57.000000000 -0400
32388 @@ -1566,6 +1566,8 @@ static void libipw_process_probe_respons
32389 unsigned long flags;
32390 DECLARE_SSID_BUF(ssid);
32391
32392 + pax_track_stack();
32393 +
32394 LIBIPW_DEBUG_SCAN("'%s' (%pM"
32395 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
32396 print_ssid(ssid, info_element->data, info_element->len),
32397 diff -urNp linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-1000.c linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-1000.c
32398 --- linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-03-27 14:31:47.000000000 -0400
32399 +++ linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-04-17 15:56:46.000000000 -0400
32400 @@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib =
32401 },
32402 };
32403
32404 -static struct iwl_ops iwl1000_ops = {
32405 +static const struct iwl_ops iwl1000_ops = {
32406 .ucode = &iwl5000_ucode,
32407 .lib = &iwl1000_lib,
32408 .hcmd = &iwl5000_hcmd,
32409 diff -urNp linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-3945.c linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-3945.c
32410 --- linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-03-27 14:31:47.000000000 -0400
32411 +++ linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-04-17 15:56:46.000000000 -0400
32412 @@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945
32413 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
32414 };
32415
32416 -static struct iwl_ops iwl3945_ops = {
32417 +static const struct iwl_ops iwl3945_ops = {
32418 .ucode = &iwl3945_ucode,
32419 .lib = &iwl3945_lib,
32420 .hcmd = &iwl3945_hcmd,
32421 diff -urNp linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-4965.c linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-4965.c
32422 --- linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-03-27 14:31:47.000000000 -0400
32423 +++ linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-04-17 15:56:46.000000000 -0400
32424 @@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib =
32425 },
32426 };
32427
32428 -static struct iwl_ops iwl4965_ops = {
32429 +static const struct iwl_ops iwl4965_ops = {
32430 .ucode = &iwl4965_ucode,
32431 .lib = &iwl4965_lib,
32432 .hcmd = &iwl4965_hcmd,
32433 diff -urNp linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-5000.c linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-5000.c
32434 --- linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-05-10 22:12:01.000000000 -0400
32435 +++ linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-05-10 22:12:32.000000000 -0400
32436 @@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib =
32437 },
32438 };
32439
32440 -struct iwl_ops iwl5000_ops = {
32441 +const struct iwl_ops iwl5000_ops = {
32442 .ucode = &iwl5000_ucode,
32443 .lib = &iwl5000_lib,
32444 .hcmd = &iwl5000_hcmd,
32445 .utils = &iwl5000_hcmd_utils,
32446 };
32447
32448 -static struct iwl_ops iwl5150_ops = {
32449 +static const struct iwl_ops iwl5150_ops = {
32450 .ucode = &iwl5000_ucode,
32451 .lib = &iwl5150_lib,
32452 .hcmd = &iwl5000_hcmd,
32453 diff -urNp linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-6000.c linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-6000.c
32454 --- linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-03-27 14:31:47.000000000 -0400
32455 +++ linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-04-17 15:56:46.000000000 -0400
32456 @@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000
32457 .calc_rssi = iwl5000_calc_rssi,
32458 };
32459
32460 -static struct iwl_ops iwl6000_ops = {
32461 +static const struct iwl_ops iwl6000_ops = {
32462 .ucode = &iwl5000_ucode,
32463 .lib = &iwl6000_lib,
32464 .hcmd = &iwl5000_hcmd,
32465 diff -urNp linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
32466 --- linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-03-27 14:31:47.000000000 -0400
32467 +++ linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-05-16 21:46:57.000000000 -0400
32468 @@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, s
32469 u8 active_index = 0;
32470 s32 tpt = 0;
32471
32472 + pax_track_stack();
32473 +
32474 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
32475
32476 if (!ieee80211_is_data(hdr->frame_control) ||
32477 @@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_
32478 u8 valid_tx_ant = 0;
32479 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
32480
32481 + pax_track_stack();
32482 +
32483 /* Override starting rate (index 0) if needed for debug purposes */
32484 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
32485
32486 diff -urNp linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-debugfs.c
32487 --- linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-03-27 14:31:47.000000000 -0400
32488 +++ linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-05-16 21:46:57.000000000 -0400
32489 @@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(str
32490 int pos = 0;
32491 const size_t bufsz = sizeof(buf);
32492
32493 + pax_track_stack();
32494 +
32495 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
32496 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
32497 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
32498 @@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
32499 const size_t bufsz = sizeof(buf);
32500 ssize_t ret;
32501
32502 + pax_track_stack();
32503 +
32504 for (i = 0; i < AC_NUM; i++) {
32505 pos += scnprintf(buf + pos, bufsz - pos,
32506 "\tcw_min\tcw_max\taifsn\ttxop\n");
32507 diff -urNp linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-debug.h linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-debug.h
32508 --- linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-03-27 14:31:47.000000000 -0400
32509 +++ linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-04-17 15:56:46.000000000 -0400
32510 @@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_pri
32511 #endif
32512
32513 #else
32514 -#define IWL_DEBUG(__priv, level, fmt, args...)
32515 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
32516 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
32517 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
32518 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
32519 void *p, u32 len)
32520 {}
32521 diff -urNp linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-dev.h linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-dev.h
32522 --- linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-03-27 14:31:47.000000000 -0400
32523 +++ linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-04-17 15:56:46.000000000 -0400
32524 @@ -68,7 +68,7 @@ struct iwl_tx_queue;
32525
32526 /* shared structures from iwl-5000.c */
32527 extern struct iwl_mod_params iwl50_mod_params;
32528 -extern struct iwl_ops iwl5000_ops;
32529 +extern const struct iwl_ops iwl5000_ops;
32530 extern struct iwl_ucode_ops iwl5000_ucode;
32531 extern struct iwl_lib_ops iwl5000_lib;
32532 extern struct iwl_hcmd_ops iwl5000_hcmd;
32533 diff -urNp linux-2.6.32.41/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-2.6.32.41/drivers/net/wireless/iwmc3200wifi/debugfs.c
32534 --- linux-2.6.32.41/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-03-27 14:31:47.000000000 -0400
32535 +++ linux-2.6.32.41/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-05-16 21:46:57.000000000 -0400
32536 @@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
32537 int buf_len = 512;
32538 size_t len = 0;
32539
32540 + pax_track_stack();
32541 +
32542 if (*ppos != 0)
32543 return 0;
32544 if (count < sizeof(buf))
32545 diff -urNp linux-2.6.32.41/drivers/net/wireless/libertas/debugfs.c linux-2.6.32.41/drivers/net/wireless/libertas/debugfs.c
32546 --- linux-2.6.32.41/drivers/net/wireless/libertas/debugfs.c 2011-03-27 14:31:47.000000000 -0400
32547 +++ linux-2.6.32.41/drivers/net/wireless/libertas/debugfs.c 2011-04-17 15:56:46.000000000 -0400
32548 @@ -708,7 +708,7 @@ out_unlock:
32549 struct lbs_debugfs_files {
32550 const char *name;
32551 int perm;
32552 - struct file_operations fops;
32553 + const struct file_operations fops;
32554 };
32555
32556 static const struct lbs_debugfs_files debugfs_files[] = {
32557 diff -urNp linux-2.6.32.41/drivers/net/wireless/rndis_wlan.c linux-2.6.32.41/drivers/net/wireless/rndis_wlan.c
32558 --- linux-2.6.32.41/drivers/net/wireless/rndis_wlan.c 2011-03-27 14:31:47.000000000 -0400
32559 +++ linux-2.6.32.41/drivers/net/wireless/rndis_wlan.c 2011-04-17 15:56:46.000000000 -0400
32560 @@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbn
32561
32562 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
32563
32564 - if (rts_threshold < 0 || rts_threshold > 2347)
32565 + if (rts_threshold > 2347)
32566 rts_threshold = 2347;
32567
32568 tmp = cpu_to_le32(rts_threshold);
32569 diff -urNp linux-2.6.32.41/drivers/oprofile/buffer_sync.c linux-2.6.32.41/drivers/oprofile/buffer_sync.c
32570 --- linux-2.6.32.41/drivers/oprofile/buffer_sync.c 2011-03-27 14:31:47.000000000 -0400
32571 +++ linux-2.6.32.41/drivers/oprofile/buffer_sync.c 2011-04-17 15:56:46.000000000 -0400
32572 @@ -341,7 +341,7 @@ static void add_data(struct op_entry *en
32573 if (cookie == NO_COOKIE)
32574 offset = pc;
32575 if (cookie == INVALID_COOKIE) {
32576 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
32577 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
32578 offset = pc;
32579 }
32580 if (cookie != last_cookie) {
32581 @@ -385,14 +385,14 @@ add_sample(struct mm_struct *mm, struct
32582 /* add userspace sample */
32583
32584 if (!mm) {
32585 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
32586 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
32587 return 0;
32588 }
32589
32590 cookie = lookup_dcookie(mm, s->eip, &offset);
32591
32592 if (cookie == INVALID_COOKIE) {
32593 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
32594 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
32595 return 0;
32596 }
32597
32598 @@ -561,7 +561,7 @@ void sync_buffer(int cpu)
32599 /* ignore backtraces if failed to add a sample */
32600 if (state == sb_bt_start) {
32601 state = sb_bt_ignore;
32602 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
32603 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
32604 }
32605 }
32606 release_mm(mm);
32607 diff -urNp linux-2.6.32.41/drivers/oprofile/event_buffer.c linux-2.6.32.41/drivers/oprofile/event_buffer.c
32608 --- linux-2.6.32.41/drivers/oprofile/event_buffer.c 2011-03-27 14:31:47.000000000 -0400
32609 +++ linux-2.6.32.41/drivers/oprofile/event_buffer.c 2011-04-17 15:56:46.000000000 -0400
32610 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
32611 }
32612
32613 if (buffer_pos == buffer_size) {
32614 - atomic_inc(&oprofile_stats.event_lost_overflow);
32615 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
32616 return;
32617 }
32618
32619 diff -urNp linux-2.6.32.41/drivers/oprofile/oprof.c linux-2.6.32.41/drivers/oprofile/oprof.c
32620 --- linux-2.6.32.41/drivers/oprofile/oprof.c 2011-03-27 14:31:47.000000000 -0400
32621 +++ linux-2.6.32.41/drivers/oprofile/oprof.c 2011-04-17 15:56:46.000000000 -0400
32622 @@ -110,7 +110,7 @@ static void switch_worker(struct work_st
32623 if (oprofile_ops.switch_events())
32624 return;
32625
32626 - atomic_inc(&oprofile_stats.multiplex_counter);
32627 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
32628 start_switch_worker();
32629 }
32630
32631 diff -urNp linux-2.6.32.41/drivers/oprofile/oprofilefs.c linux-2.6.32.41/drivers/oprofile/oprofilefs.c
32632 --- linux-2.6.32.41/drivers/oprofile/oprofilefs.c 2011-03-27 14:31:47.000000000 -0400
32633 +++ linux-2.6.32.41/drivers/oprofile/oprofilefs.c 2011-04-17 15:56:46.000000000 -0400
32634 @@ -187,7 +187,7 @@ static const struct file_operations atom
32635
32636
32637 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
32638 - char const *name, atomic_t *val)
32639 + char const *name, atomic_unchecked_t *val)
32640 {
32641 struct dentry *d = __oprofilefs_create_file(sb, root, name,
32642 &atomic_ro_fops, 0444);
32643 diff -urNp linux-2.6.32.41/drivers/oprofile/oprofile_stats.c linux-2.6.32.41/drivers/oprofile/oprofile_stats.c
32644 --- linux-2.6.32.41/drivers/oprofile/oprofile_stats.c 2011-03-27 14:31:47.000000000 -0400
32645 +++ linux-2.6.32.41/drivers/oprofile/oprofile_stats.c 2011-04-17 15:56:46.000000000 -0400
32646 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
32647 cpu_buf->sample_invalid_eip = 0;
32648 }
32649
32650 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
32651 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
32652 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
32653 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
32654 - atomic_set(&oprofile_stats.multiplex_counter, 0);
32655 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
32656 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
32657 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
32658 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
32659 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
32660 }
32661
32662
32663 diff -urNp linux-2.6.32.41/drivers/oprofile/oprofile_stats.h linux-2.6.32.41/drivers/oprofile/oprofile_stats.h
32664 --- linux-2.6.32.41/drivers/oprofile/oprofile_stats.h 2011-03-27 14:31:47.000000000 -0400
32665 +++ linux-2.6.32.41/drivers/oprofile/oprofile_stats.h 2011-04-17 15:56:46.000000000 -0400
32666 @@ -13,11 +13,11 @@
32667 #include <asm/atomic.h>
32668
32669 struct oprofile_stat_struct {
32670 - atomic_t sample_lost_no_mm;
32671 - atomic_t sample_lost_no_mapping;
32672 - atomic_t bt_lost_no_mapping;
32673 - atomic_t event_lost_overflow;
32674 - atomic_t multiplex_counter;
32675 + atomic_unchecked_t sample_lost_no_mm;
32676 + atomic_unchecked_t sample_lost_no_mapping;
32677 + atomic_unchecked_t bt_lost_no_mapping;
32678 + atomic_unchecked_t event_lost_overflow;
32679 + atomic_unchecked_t multiplex_counter;
32680 };
32681
32682 extern struct oprofile_stat_struct oprofile_stats;
32683 diff -urNp linux-2.6.32.41/drivers/parisc/pdc_stable.c linux-2.6.32.41/drivers/parisc/pdc_stable.c
32684 --- linux-2.6.32.41/drivers/parisc/pdc_stable.c 2011-03-27 14:31:47.000000000 -0400
32685 +++ linux-2.6.32.41/drivers/parisc/pdc_stable.c 2011-04-17 15:56:46.000000000 -0400
32686 @@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj
32687 return ret;
32688 }
32689
32690 -static struct sysfs_ops pdcspath_attr_ops = {
32691 +static const struct sysfs_ops pdcspath_attr_ops = {
32692 .show = pdcspath_attr_show,
32693 .store = pdcspath_attr_store,
32694 };
32695 diff -urNp linux-2.6.32.41/drivers/parport/procfs.c linux-2.6.32.41/drivers/parport/procfs.c
32696 --- linux-2.6.32.41/drivers/parport/procfs.c 2011-03-27 14:31:47.000000000 -0400
32697 +++ linux-2.6.32.41/drivers/parport/procfs.c 2011-04-17 15:56:46.000000000 -0400
32698 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
32699
32700 *ppos += len;
32701
32702 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
32703 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
32704 }
32705
32706 #ifdef CONFIG_PARPORT_1284
32707 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
32708
32709 *ppos += len;
32710
32711 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
32712 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
32713 }
32714 #endif /* IEEE1284.3 support. */
32715
32716 diff -urNp linux-2.6.32.41/drivers/pci/hotplug/acpiphp_glue.c linux-2.6.32.41/drivers/pci/hotplug/acpiphp_glue.c
32717 --- linux-2.6.32.41/drivers/pci/hotplug/acpiphp_glue.c 2011-03-27 14:31:47.000000000 -0400
32718 +++ linux-2.6.32.41/drivers/pci/hotplug/acpiphp_glue.c 2011-04-17 15:56:46.000000000 -0400
32719 @@ -111,7 +111,7 @@ static int post_dock_fixups(struct notif
32720 }
32721
32722
32723 -static struct acpi_dock_ops acpiphp_dock_ops = {
32724 +static const struct acpi_dock_ops acpiphp_dock_ops = {
32725 .handler = handle_hotplug_event_func,
32726 };
32727
32728 diff -urNp linux-2.6.32.41/drivers/pci/hotplug/cpqphp_nvram.c linux-2.6.32.41/drivers/pci/hotplug/cpqphp_nvram.c
32729 --- linux-2.6.32.41/drivers/pci/hotplug/cpqphp_nvram.c 2011-03-27 14:31:47.000000000 -0400
32730 +++ linux-2.6.32.41/drivers/pci/hotplug/cpqphp_nvram.c 2011-04-17 15:56:46.000000000 -0400
32731 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
32732
32733 void compaq_nvram_init (void __iomem *rom_start)
32734 {
32735 +
32736 +#ifndef CONFIG_PAX_KERNEXEC
32737 if (rom_start) {
32738 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
32739 }
32740 +#endif
32741 +
32742 dbg("int15 entry = %p\n", compaq_int15_entry_point);
32743
32744 /* initialize our int15 lock */
32745 diff -urNp linux-2.6.32.41/drivers/pci/hotplug/fakephp.c linux-2.6.32.41/drivers/pci/hotplug/fakephp.c
32746 --- linux-2.6.32.41/drivers/pci/hotplug/fakephp.c 2011-03-27 14:31:47.000000000 -0400
32747 +++ linux-2.6.32.41/drivers/pci/hotplug/fakephp.c 2011-04-17 15:56:46.000000000 -0400
32748 @@ -73,7 +73,7 @@ static void legacy_release(struct kobjec
32749 }
32750
32751 static struct kobj_type legacy_ktype = {
32752 - .sysfs_ops = &(struct sysfs_ops){
32753 + .sysfs_ops = &(const struct sysfs_ops){
32754 .store = legacy_store, .show = legacy_show
32755 },
32756 .release = &legacy_release,
32757 diff -urNp linux-2.6.32.41/drivers/pci/intel-iommu.c linux-2.6.32.41/drivers/pci/intel-iommu.c
32758 --- linux-2.6.32.41/drivers/pci/intel-iommu.c 2011-05-10 22:12:01.000000000 -0400
32759 +++ linux-2.6.32.41/drivers/pci/intel-iommu.c 2011-05-10 22:12:33.000000000 -0400
32760 @@ -2643,7 +2643,7 @@ error:
32761 return 0;
32762 }
32763
32764 -static dma_addr_t intel_map_page(struct device *dev, struct page *page,
32765 +dma_addr_t intel_map_page(struct device *dev, struct page *page,
32766 unsigned long offset, size_t size,
32767 enum dma_data_direction dir,
32768 struct dma_attrs *attrs)
32769 @@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain
32770 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
32771 }
32772
32773 -static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
32774 +void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
32775 size_t size, enum dma_data_direction dir,
32776 struct dma_attrs *attrs)
32777 {
32778 @@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct devi
32779 }
32780 }
32781
32782 -static void *intel_alloc_coherent(struct device *hwdev, size_t size,
32783 +void *intel_alloc_coherent(struct device *hwdev, size_t size,
32784 dma_addr_t *dma_handle, gfp_t flags)
32785 {
32786 void *vaddr;
32787 @@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct
32788 return NULL;
32789 }
32790
32791 -static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
32792 +void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
32793 dma_addr_t dma_handle)
32794 {
32795 int order;
32796 @@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct d
32797 free_pages((unsigned long)vaddr, order);
32798 }
32799
32800 -static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
32801 +void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
32802 int nelems, enum dma_data_direction dir,
32803 struct dma_attrs *attrs)
32804 {
32805 @@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(str
32806 return nelems;
32807 }
32808
32809 -static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
32810 +int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
32811 enum dma_data_direction dir, struct dma_attrs *attrs)
32812 {
32813 int i;
32814 @@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *h
32815 return nelems;
32816 }
32817
32818 -static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
32819 +int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
32820 {
32821 return !dma_addr;
32822 }
32823
32824 -struct dma_map_ops intel_dma_ops = {
32825 +const struct dma_map_ops intel_dma_ops = {
32826 .alloc_coherent = intel_alloc_coherent,
32827 .free_coherent = intel_free_coherent,
32828 .map_sg = intel_map_sg,
32829 diff -urNp linux-2.6.32.41/drivers/pci/pcie/aspm.c linux-2.6.32.41/drivers/pci/pcie/aspm.c
32830 --- linux-2.6.32.41/drivers/pci/pcie/aspm.c 2011-03-27 14:31:47.000000000 -0400
32831 +++ linux-2.6.32.41/drivers/pci/pcie/aspm.c 2011-04-17 15:56:46.000000000 -0400
32832 @@ -27,9 +27,9 @@
32833 #define MODULE_PARAM_PREFIX "pcie_aspm."
32834
32835 /* Note: those are not register definitions */
32836 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
32837 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
32838 -#define ASPM_STATE_L1 (4) /* L1 state */
32839 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
32840 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
32841 +#define ASPM_STATE_L1 (4U) /* L1 state */
32842 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
32843 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
32844
32845 diff -urNp linux-2.6.32.41/drivers/pci/probe.c linux-2.6.32.41/drivers/pci/probe.c
32846 --- linux-2.6.32.41/drivers/pci/probe.c 2011-03-27 14:31:47.000000000 -0400
32847 +++ linux-2.6.32.41/drivers/pci/probe.c 2011-04-17 15:56:46.000000000 -0400
32848 @@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(
32849 return ret;
32850 }
32851
32852 -static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
32853 +static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
32854 struct device_attribute *attr,
32855 char *buf)
32856 {
32857 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
32858 }
32859
32860 -static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
32861 +static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
32862 struct device_attribute *attr,
32863 char *buf)
32864 {
32865 diff -urNp linux-2.6.32.41/drivers/pci/proc.c linux-2.6.32.41/drivers/pci/proc.c
32866 --- linux-2.6.32.41/drivers/pci/proc.c 2011-03-27 14:31:47.000000000 -0400
32867 +++ linux-2.6.32.41/drivers/pci/proc.c 2011-04-17 15:56:46.000000000 -0400
32868 @@ -480,7 +480,16 @@ static const struct file_operations proc
32869 static int __init pci_proc_init(void)
32870 {
32871 struct pci_dev *dev = NULL;
32872 +
32873 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
32874 +#ifdef CONFIG_GRKERNSEC_PROC_USER
32875 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
32876 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
32877 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
32878 +#endif
32879 +#else
32880 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
32881 +#endif
32882 proc_create("devices", 0, proc_bus_pci_dir,
32883 &proc_bus_pci_dev_operations);
32884 proc_initialized = 1;
32885 diff -urNp linux-2.6.32.41/drivers/pci/slot.c linux-2.6.32.41/drivers/pci/slot.c
32886 --- linux-2.6.32.41/drivers/pci/slot.c 2011-03-27 14:31:47.000000000 -0400
32887 +++ linux-2.6.32.41/drivers/pci/slot.c 2011-04-17 15:56:46.000000000 -0400
32888 @@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struc
32889 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
32890 }
32891
32892 -static struct sysfs_ops pci_slot_sysfs_ops = {
32893 +static const struct sysfs_ops pci_slot_sysfs_ops = {
32894 .show = pci_slot_attr_show,
32895 .store = pci_slot_attr_store,
32896 };
32897 diff -urNp linux-2.6.32.41/drivers/pcmcia/pcmcia_ioctl.c linux-2.6.32.41/drivers/pcmcia/pcmcia_ioctl.c
32898 --- linux-2.6.32.41/drivers/pcmcia/pcmcia_ioctl.c 2011-03-27 14:31:47.000000000 -0400
32899 +++ linux-2.6.32.41/drivers/pcmcia/pcmcia_ioctl.c 2011-04-17 15:56:46.000000000 -0400
32900 @@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode
32901 return -EFAULT;
32902 }
32903 }
32904 - buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
32905 + buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
32906 if (!buf)
32907 return -ENOMEM;
32908
32909 diff -urNp linux-2.6.32.41/drivers/platform/x86/acer-wmi.c linux-2.6.32.41/drivers/platform/x86/acer-wmi.c
32910 --- linux-2.6.32.41/drivers/platform/x86/acer-wmi.c 2011-03-27 14:31:47.000000000 -0400
32911 +++ linux-2.6.32.41/drivers/platform/x86/acer-wmi.c 2011-04-17 15:56:46.000000000 -0400
32912 @@ -918,7 +918,7 @@ static int update_bl_status(struct backl
32913 return 0;
32914 }
32915
32916 -static struct backlight_ops acer_bl_ops = {
32917 +static const struct backlight_ops acer_bl_ops = {
32918 .get_brightness = read_brightness,
32919 .update_status = update_bl_status,
32920 };
32921 diff -urNp linux-2.6.32.41/drivers/platform/x86/asus_acpi.c linux-2.6.32.41/drivers/platform/x86/asus_acpi.c
32922 --- linux-2.6.32.41/drivers/platform/x86/asus_acpi.c 2011-03-27 14:31:47.000000000 -0400
32923 +++ linux-2.6.32.41/drivers/platform/x86/asus_acpi.c 2011-04-17 15:56:46.000000000 -0400
32924 @@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_
32925 return 0;
32926 }
32927
32928 -static struct backlight_ops asus_backlight_data = {
32929 +static const struct backlight_ops asus_backlight_data = {
32930 .get_brightness = read_brightness,
32931 .update_status = set_brightness_status,
32932 };
32933 diff -urNp linux-2.6.32.41/drivers/platform/x86/asus-laptop.c linux-2.6.32.41/drivers/platform/x86/asus-laptop.c
32934 --- linux-2.6.32.41/drivers/platform/x86/asus-laptop.c 2011-03-27 14:31:47.000000000 -0400
32935 +++ linux-2.6.32.41/drivers/platform/x86/asus-laptop.c 2011-04-17 15:56:46.000000000 -0400
32936 @@ -250,7 +250,7 @@ static struct backlight_device *asus_bac
32937 */
32938 static int read_brightness(struct backlight_device *bd);
32939 static int update_bl_status(struct backlight_device *bd);
32940 -static struct backlight_ops asusbl_ops = {
32941 +static const struct backlight_ops asusbl_ops = {
32942 .get_brightness = read_brightness,
32943 .update_status = update_bl_status,
32944 };
32945 diff -urNp linux-2.6.32.41/drivers/platform/x86/compal-laptop.c linux-2.6.32.41/drivers/platform/x86/compal-laptop.c
32946 --- linux-2.6.32.41/drivers/platform/x86/compal-laptop.c 2011-03-27 14:31:47.000000000 -0400
32947 +++ linux-2.6.32.41/drivers/platform/x86/compal-laptop.c 2011-04-17 15:56:46.000000000 -0400
32948 @@ -163,7 +163,7 @@ static int bl_update_status(struct backl
32949 return set_lcd_level(b->props.brightness);
32950 }
32951
32952 -static struct backlight_ops compalbl_ops = {
32953 +static const struct backlight_ops compalbl_ops = {
32954 .get_brightness = bl_get_brightness,
32955 .update_status = bl_update_status,
32956 };
32957 diff -urNp linux-2.6.32.41/drivers/platform/x86/dell-laptop.c linux-2.6.32.41/drivers/platform/x86/dell-laptop.c
32958 --- linux-2.6.32.41/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:01.000000000 -0400
32959 +++ linux-2.6.32.41/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:33.000000000 -0400
32960 @@ -318,7 +318,7 @@ static int dell_get_intensity(struct bac
32961 return buffer.output[1];
32962 }
32963
32964 -static struct backlight_ops dell_ops = {
32965 +static const struct backlight_ops dell_ops = {
32966 .get_brightness = dell_get_intensity,
32967 .update_status = dell_send_intensity,
32968 };
32969 diff -urNp linux-2.6.32.41/drivers/platform/x86/eeepc-laptop.c linux-2.6.32.41/drivers/platform/x86/eeepc-laptop.c
32970 --- linux-2.6.32.41/drivers/platform/x86/eeepc-laptop.c 2011-03-27 14:31:47.000000000 -0400
32971 +++ linux-2.6.32.41/drivers/platform/x86/eeepc-laptop.c 2011-04-17 15:56:46.000000000 -0400
32972 @@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device
32973 */
32974 static int read_brightness(struct backlight_device *bd);
32975 static int update_bl_status(struct backlight_device *bd);
32976 -static struct backlight_ops eeepcbl_ops = {
32977 +static const struct backlight_ops eeepcbl_ops = {
32978 .get_brightness = read_brightness,
32979 .update_status = update_bl_status,
32980 };
32981 diff -urNp linux-2.6.32.41/drivers/platform/x86/fujitsu-laptop.c linux-2.6.32.41/drivers/platform/x86/fujitsu-laptop.c
32982 --- linux-2.6.32.41/drivers/platform/x86/fujitsu-laptop.c 2011-03-27 14:31:47.000000000 -0400
32983 +++ linux-2.6.32.41/drivers/platform/x86/fujitsu-laptop.c 2011-04-17 15:56:46.000000000 -0400
32984 @@ -436,7 +436,7 @@ static int bl_update_status(struct backl
32985 return ret;
32986 }
32987
32988 -static struct backlight_ops fujitsubl_ops = {
32989 +static const struct backlight_ops fujitsubl_ops = {
32990 .get_brightness = bl_get_brightness,
32991 .update_status = bl_update_status,
32992 };
32993 diff -urNp linux-2.6.32.41/drivers/platform/x86/msi-laptop.c linux-2.6.32.41/drivers/platform/x86/msi-laptop.c
32994 --- linux-2.6.32.41/drivers/platform/x86/msi-laptop.c 2011-03-27 14:31:47.000000000 -0400
32995 +++ linux-2.6.32.41/drivers/platform/x86/msi-laptop.c 2011-04-17 15:56:46.000000000 -0400
32996 @@ -161,7 +161,7 @@ static int bl_update_status(struct backl
32997 return set_lcd_level(b->props.brightness);
32998 }
32999
33000 -static struct backlight_ops msibl_ops = {
33001 +static const struct backlight_ops msibl_ops = {
33002 .get_brightness = bl_get_brightness,
33003 .update_status = bl_update_status,
33004 };
33005 diff -urNp linux-2.6.32.41/drivers/platform/x86/panasonic-laptop.c linux-2.6.32.41/drivers/platform/x86/panasonic-laptop.c
33006 --- linux-2.6.32.41/drivers/platform/x86/panasonic-laptop.c 2011-03-27 14:31:47.000000000 -0400
33007 +++ linux-2.6.32.41/drivers/platform/x86/panasonic-laptop.c 2011-04-17 15:56:46.000000000 -0400
33008 @@ -352,7 +352,7 @@ static int bl_set_status(struct backligh
33009 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
33010 }
33011
33012 -static struct backlight_ops pcc_backlight_ops = {
33013 +static const struct backlight_ops pcc_backlight_ops = {
33014 .get_brightness = bl_get,
33015 .update_status = bl_set_status,
33016 };
33017 diff -urNp linux-2.6.32.41/drivers/platform/x86/sony-laptop.c linux-2.6.32.41/drivers/platform/x86/sony-laptop.c
33018 --- linux-2.6.32.41/drivers/platform/x86/sony-laptop.c 2011-03-27 14:31:47.000000000 -0400
33019 +++ linux-2.6.32.41/drivers/platform/x86/sony-laptop.c 2011-04-17 15:56:46.000000000 -0400
33020 @@ -850,7 +850,7 @@ static int sony_backlight_get_brightness
33021 }
33022
33023 static struct backlight_device *sony_backlight_device;
33024 -static struct backlight_ops sony_backlight_ops = {
33025 +static const struct backlight_ops sony_backlight_ops = {
33026 .update_status = sony_backlight_update_status,
33027 .get_brightness = sony_backlight_get_brightness,
33028 };
33029 diff -urNp linux-2.6.32.41/drivers/platform/x86/thinkpad_acpi.c linux-2.6.32.41/drivers/platform/x86/thinkpad_acpi.c
33030 --- linux-2.6.32.41/drivers/platform/x86/thinkpad_acpi.c 2011-03-27 14:31:47.000000000 -0400
33031 +++ linux-2.6.32.41/drivers/platform/x86/thinkpad_acpi.c 2011-04-17 15:56:46.000000000 -0400
33032 @@ -6122,7 +6122,7 @@ static void tpacpi_brightness_notify_cha
33033 BACKLIGHT_UPDATE_HOTKEY);
33034 }
33035
33036 -static struct backlight_ops ibm_backlight_data = {
33037 +static const struct backlight_ops ibm_backlight_data = {
33038 .get_brightness = brightness_get,
33039 .update_status = brightness_update_status,
33040 };
33041 diff -urNp linux-2.6.32.41/drivers/platform/x86/toshiba_acpi.c linux-2.6.32.41/drivers/platform/x86/toshiba_acpi.c
33042 --- linux-2.6.32.41/drivers/platform/x86/toshiba_acpi.c 2011-03-27 14:31:47.000000000 -0400
33043 +++ linux-2.6.32.41/drivers/platform/x86/toshiba_acpi.c 2011-04-17 15:56:46.000000000 -0400
33044 @@ -671,7 +671,7 @@ static acpi_status remove_device(void)
33045 return AE_OK;
33046 }
33047
33048 -static struct backlight_ops toshiba_backlight_data = {
33049 +static const struct backlight_ops toshiba_backlight_data = {
33050 .get_brightness = get_lcd,
33051 .update_status = set_lcd_status,
33052 };
33053 diff -urNp linux-2.6.32.41/drivers/pnp/pnpbios/bioscalls.c linux-2.6.32.41/drivers/pnp/pnpbios/bioscalls.c
33054 --- linux-2.6.32.41/drivers/pnp/pnpbios/bioscalls.c 2011-03-27 14:31:47.000000000 -0400
33055 +++ linux-2.6.32.41/drivers/pnp/pnpbios/bioscalls.c 2011-04-17 15:56:46.000000000 -0400
33056 @@ -60,7 +60,7 @@ do { \
33057 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
33058 } while(0)
33059
33060 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
33061 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
33062 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
33063
33064 /*
33065 @@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func
33066
33067 cpu = get_cpu();
33068 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
33069 +
33070 + pax_open_kernel();
33071 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
33072 + pax_close_kernel();
33073
33074 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
33075 spin_lock_irqsave(&pnp_bios_lock, flags);
33076 @@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func
33077 :"memory");
33078 spin_unlock_irqrestore(&pnp_bios_lock, flags);
33079
33080 + pax_open_kernel();
33081 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
33082 + pax_close_kernel();
33083 +
33084 put_cpu();
33085
33086 /* If we get here and this is set then the PnP BIOS faulted on us. */
33087 @@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 n
33088 return status;
33089 }
33090
33091 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
33092 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
33093 {
33094 int i;
33095
33096 @@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_i
33097 pnp_bios_callpoint.offset = header->fields.pm16offset;
33098 pnp_bios_callpoint.segment = PNP_CS16;
33099
33100 + pax_open_kernel();
33101 +
33102 for_each_possible_cpu(i) {
33103 struct desc_struct *gdt = get_cpu_gdt_table(i);
33104 if (!gdt)
33105 @@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_i
33106 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
33107 (unsigned long)__va(header->fields.pm16dseg));
33108 }
33109 +
33110 + pax_close_kernel();
33111 }
33112 diff -urNp linux-2.6.32.41/drivers/pnp/resource.c linux-2.6.32.41/drivers/pnp/resource.c
33113 --- linux-2.6.32.41/drivers/pnp/resource.c 2011-03-27 14:31:47.000000000 -0400
33114 +++ linux-2.6.32.41/drivers/pnp/resource.c 2011-04-17 15:56:46.000000000 -0400
33115 @@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
33116 return 1;
33117
33118 /* check if the resource is valid */
33119 - if (*irq < 0 || *irq > 15)
33120 + if (*irq > 15)
33121 return 0;
33122
33123 /* check if the resource is reserved */
33124 @@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
33125 return 1;
33126
33127 /* check if the resource is valid */
33128 - if (*dma < 0 || *dma == 4 || *dma > 7)
33129 + if (*dma == 4 || *dma > 7)
33130 return 0;
33131
33132 /* check if the resource is reserved */
33133 diff -urNp linux-2.6.32.41/drivers/rtc/rtc-dev.c linux-2.6.32.41/drivers/rtc/rtc-dev.c
33134 --- linux-2.6.32.41/drivers/rtc/rtc-dev.c 2011-03-27 14:31:47.000000000 -0400
33135 +++ linux-2.6.32.41/drivers/rtc/rtc-dev.c 2011-04-17 15:56:46.000000000 -0400
33136 @@ -14,6 +14,7 @@
33137 #include <linux/module.h>
33138 #include <linux/rtc.h>
33139 #include <linux/sched.h>
33140 +#include <linux/grsecurity.h>
33141 #include "rtc-core.h"
33142
33143 static dev_t rtc_devt;
33144 @@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *f
33145 if (copy_from_user(&tm, uarg, sizeof(tm)))
33146 return -EFAULT;
33147
33148 + gr_log_timechange();
33149 +
33150 return rtc_set_time(rtc, &tm);
33151
33152 case RTC_PIE_ON:
33153 diff -urNp linux-2.6.32.41/drivers/s390/cio/qdio_perf.c linux-2.6.32.41/drivers/s390/cio/qdio_perf.c
33154 --- linux-2.6.32.41/drivers/s390/cio/qdio_perf.c 2011-03-27 14:31:47.000000000 -0400
33155 +++ linux-2.6.32.41/drivers/s390/cio/qdio_perf.c 2011-04-17 15:56:46.000000000 -0400
33156 @@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_
33157 static int qdio_perf_proc_show(struct seq_file *m, void *v)
33158 {
33159 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
33160 - (long)atomic_long_read(&perf_stats.qdio_int));
33161 + (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
33162 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
33163 - (long)atomic_long_read(&perf_stats.pci_int));
33164 + (long)atomic_long_read_unchecked(&perf_stats.pci_int));
33165 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
33166 - (long)atomic_long_read(&perf_stats.thin_int));
33167 + (long)atomic_long_read_unchecked(&perf_stats.thin_int));
33168 seq_printf(m, "\n");
33169 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
33170 - (long)atomic_long_read(&perf_stats.tasklet_inbound));
33171 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
33172 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
33173 - (long)atomic_long_read(&perf_stats.tasklet_outbound));
33174 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
33175 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
33176 - (long)atomic_long_read(&perf_stats.tasklet_thinint),
33177 - (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
33178 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
33179 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
33180 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
33181 - (long)atomic_long_read(&perf_stats.thinint_inbound),
33182 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
33183 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
33184 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
33185 seq_printf(m, "\n");
33186 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
33187 - (long)atomic_long_read(&perf_stats.siga_in));
33188 + (long)atomic_long_read_unchecked(&perf_stats.siga_in));
33189 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
33190 - (long)atomic_long_read(&perf_stats.siga_out));
33191 + (long)atomic_long_read_unchecked(&perf_stats.siga_out));
33192 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
33193 - (long)atomic_long_read(&perf_stats.siga_sync));
33194 + (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
33195 seq_printf(m, "\n");
33196 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
33197 - (long)atomic_long_read(&perf_stats.inbound_handler));
33198 + (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
33199 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
33200 - (long)atomic_long_read(&perf_stats.outbound_handler));
33201 + (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
33202 seq_printf(m, "\n");
33203 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
33204 - (long)atomic_long_read(&perf_stats.fast_requeue));
33205 + (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
33206 seq_printf(m, "Number of outbound target full condition\t: %li\n",
33207 - (long)atomic_long_read(&perf_stats.outbound_target_full));
33208 + (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
33209 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
33210 - (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
33211 + (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
33212 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
33213 - (long)atomic_long_read(&perf_stats.debug_stop_polling));
33214 + (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
33215 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
33216 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
33217 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
33218 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
33219 - (long)atomic_long_read(&perf_stats.debug_eqbs_all),
33220 - (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
33221 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
33222 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
33223 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
33224 - (long)atomic_long_read(&perf_stats.debug_sqbs_all),
33225 - (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
33226 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
33227 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
33228 seq_printf(m, "\n");
33229 return 0;
33230 }
33231 diff -urNp linux-2.6.32.41/drivers/s390/cio/qdio_perf.h linux-2.6.32.41/drivers/s390/cio/qdio_perf.h
33232 --- linux-2.6.32.41/drivers/s390/cio/qdio_perf.h 2011-03-27 14:31:47.000000000 -0400
33233 +++ linux-2.6.32.41/drivers/s390/cio/qdio_perf.h 2011-04-17 15:56:46.000000000 -0400
33234 @@ -13,46 +13,46 @@
33235
33236 struct qdio_perf_stats {
33237 /* interrupt handler calls */
33238 - atomic_long_t qdio_int;
33239 - atomic_long_t pci_int;
33240 - atomic_long_t thin_int;
33241 + atomic_long_unchecked_t qdio_int;
33242 + atomic_long_unchecked_t pci_int;
33243 + atomic_long_unchecked_t thin_int;
33244
33245 /* tasklet runs */
33246 - atomic_long_t tasklet_inbound;
33247 - atomic_long_t tasklet_outbound;
33248 - atomic_long_t tasklet_thinint;
33249 - atomic_long_t tasklet_thinint_loop;
33250 - atomic_long_t thinint_inbound;
33251 - atomic_long_t thinint_inbound_loop;
33252 - atomic_long_t thinint_inbound_loop2;
33253 + atomic_long_unchecked_t tasklet_inbound;
33254 + atomic_long_unchecked_t tasklet_outbound;
33255 + atomic_long_unchecked_t tasklet_thinint;
33256 + atomic_long_unchecked_t tasklet_thinint_loop;
33257 + atomic_long_unchecked_t thinint_inbound;
33258 + atomic_long_unchecked_t thinint_inbound_loop;
33259 + atomic_long_unchecked_t thinint_inbound_loop2;
33260
33261 /* signal adapter calls */
33262 - atomic_long_t siga_out;
33263 - atomic_long_t siga_in;
33264 - atomic_long_t siga_sync;
33265 + atomic_long_unchecked_t siga_out;
33266 + atomic_long_unchecked_t siga_in;
33267 + atomic_long_unchecked_t siga_sync;
33268
33269 /* misc */
33270 - atomic_long_t inbound_handler;
33271 - atomic_long_t outbound_handler;
33272 - atomic_long_t fast_requeue;
33273 - atomic_long_t outbound_target_full;
33274 + atomic_long_unchecked_t inbound_handler;
33275 + atomic_long_unchecked_t outbound_handler;
33276 + atomic_long_unchecked_t fast_requeue;
33277 + atomic_long_unchecked_t outbound_target_full;
33278
33279 /* for debugging */
33280 - atomic_long_t debug_tl_out_timer;
33281 - atomic_long_t debug_stop_polling;
33282 - atomic_long_t debug_eqbs_all;
33283 - atomic_long_t debug_eqbs_incomplete;
33284 - atomic_long_t debug_sqbs_all;
33285 - atomic_long_t debug_sqbs_incomplete;
33286 + atomic_long_unchecked_t debug_tl_out_timer;
33287 + atomic_long_unchecked_t debug_stop_polling;
33288 + atomic_long_unchecked_t debug_eqbs_all;
33289 + atomic_long_unchecked_t debug_eqbs_incomplete;
33290 + atomic_long_unchecked_t debug_sqbs_all;
33291 + atomic_long_unchecked_t debug_sqbs_incomplete;
33292 };
33293
33294 extern struct qdio_perf_stats perf_stats;
33295 extern int qdio_performance_stats;
33296
33297 -static inline void qdio_perf_stat_inc(atomic_long_t *count)
33298 +static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
33299 {
33300 if (qdio_performance_stats)
33301 - atomic_long_inc(count);
33302 + atomic_long_inc_unchecked(count);
33303 }
33304
33305 int qdio_setup_perf_stats(void);
33306 diff -urNp linux-2.6.32.41/drivers/scsi/aacraid/commctrl.c linux-2.6.32.41/drivers/scsi/aacraid/commctrl.c
33307 --- linux-2.6.32.41/drivers/scsi/aacraid/commctrl.c 2011-03-27 14:31:47.000000000 -0400
33308 +++ linux-2.6.32.41/drivers/scsi/aacraid/commctrl.c 2011-05-16 21:46:57.000000000 -0400
33309 @@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_d
33310 u32 actual_fibsize64, actual_fibsize = 0;
33311 int i;
33312
33313 + pax_track_stack();
33314
33315 if (dev->in_reset) {
33316 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
33317 diff -urNp linux-2.6.32.41/drivers/scsi/aic94xx/aic94xx_init.c linux-2.6.32.41/drivers/scsi/aic94xx/aic94xx_init.c
33318 --- linux-2.6.32.41/drivers/scsi/aic94xx/aic94xx_init.c 2011-03-27 14:31:47.000000000 -0400
33319 +++ linux-2.6.32.41/drivers/scsi/aic94xx/aic94xx_init.c 2011-04-17 15:56:46.000000000 -0400
33320 @@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(stru
33321 flash_error_table[i].reason);
33322 }
33323
33324 -static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
33325 +static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
33326 asd_show_update_bios, asd_store_update_bios);
33327
33328 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
33329 diff -urNp linux-2.6.32.41/drivers/scsi/BusLogic.c linux-2.6.32.41/drivers/scsi/BusLogic.c
33330 --- linux-2.6.32.41/drivers/scsi/BusLogic.c 2011-03-27 14:31:47.000000000 -0400
33331 +++ linux-2.6.32.41/drivers/scsi/BusLogic.c 2011-05-16 21:46:57.000000000 -0400
33332 @@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFla
33333 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
33334 *PrototypeHostAdapter)
33335 {
33336 + pax_track_stack();
33337 +
33338 /*
33339 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
33340 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
33341 diff -urNp linux-2.6.32.41/drivers/scsi/dpt_i2o.c linux-2.6.32.41/drivers/scsi/dpt_i2o.c
33342 --- linux-2.6.32.41/drivers/scsi/dpt_i2o.c 2011-03-27 14:31:47.000000000 -0400
33343 +++ linux-2.6.32.41/drivers/scsi/dpt_i2o.c 2011-05-16 21:46:57.000000000 -0400
33344 @@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* p
33345 dma_addr_t addr;
33346 ulong flags = 0;
33347
33348 + pax_track_stack();
33349 +
33350 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
33351 // get user msg size in u32s
33352 if(get_user(size, &user_msg[0])){
33353 @@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
33354 s32 rcode;
33355 dma_addr_t addr;
33356
33357 + pax_track_stack();
33358 +
33359 memset(msg, 0 , sizeof(msg));
33360 len = scsi_bufflen(cmd);
33361 direction = 0x00000000;
33362 diff -urNp linux-2.6.32.41/drivers/scsi/eata.c linux-2.6.32.41/drivers/scsi/eata.c
33363 --- linux-2.6.32.41/drivers/scsi/eata.c 2011-03-27 14:31:47.000000000 -0400
33364 +++ linux-2.6.32.41/drivers/scsi/eata.c 2011-05-16 21:46:57.000000000 -0400
33365 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
33366 struct hostdata *ha;
33367 char name[16];
33368
33369 + pax_track_stack();
33370 +
33371 sprintf(name, "%s%d", driver_name, j);
33372
33373 if (!request_region(port_base, REGION_SIZE, driver_name)) {
33374 diff -urNp linux-2.6.32.41/drivers/scsi/fcoe/libfcoe.c linux-2.6.32.41/drivers/scsi/fcoe/libfcoe.c
33375 --- linux-2.6.32.41/drivers/scsi/fcoe/libfcoe.c 2011-03-27 14:31:47.000000000 -0400
33376 +++ linux-2.6.32.41/drivers/scsi/fcoe/libfcoe.c 2011-05-16 21:46:57.000000000 -0400
33377 @@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fc
33378 size_t rlen;
33379 size_t dlen;
33380
33381 + pax_track_stack();
33382 +
33383 fiph = (struct fip_header *)skb->data;
33384 sub = fiph->fip_subcode;
33385 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
33386 diff -urNp linux-2.6.32.41/drivers/scsi/gdth.c linux-2.6.32.41/drivers/scsi/gdth.c
33387 --- linux-2.6.32.41/drivers/scsi/gdth.c 2011-03-27 14:31:47.000000000 -0400
33388 +++ linux-2.6.32.41/drivers/scsi/gdth.c 2011-05-16 21:46:57.000000000 -0400
33389 @@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
33390 ulong flags;
33391 gdth_ha_str *ha;
33392
33393 + pax_track_stack();
33394 +
33395 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
33396 return -EFAULT;
33397 ha = gdth_find_ha(ldrv.ionode);
33398 @@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg
33399 gdth_ha_str *ha;
33400 int rval;
33401
33402 + pax_track_stack();
33403 +
33404 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
33405 res.number >= MAX_HDRIVES)
33406 return -EFAULT;
33407 @@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg,
33408 gdth_ha_str *ha;
33409 int rval;
33410
33411 + pax_track_stack();
33412 +
33413 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
33414 return -EFAULT;
33415 ha = gdth_find_ha(gen.ionode);
33416 @@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
33417 int i;
33418 gdth_cmd_str gdtcmd;
33419 char cmnd[MAX_COMMAND_SIZE];
33420 +
33421 + pax_track_stack();
33422 +
33423 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
33424
33425 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
33426 diff -urNp linux-2.6.32.41/drivers/scsi/gdth_proc.c linux-2.6.32.41/drivers/scsi/gdth_proc.c
33427 --- linux-2.6.32.41/drivers/scsi/gdth_proc.c 2011-03-27 14:31:47.000000000 -0400
33428 +++ linux-2.6.32.41/drivers/scsi/gdth_proc.c 2011-05-16 21:46:57.000000000 -0400
33429 @@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi
33430 ulong64 paddr;
33431
33432 char cmnd[MAX_COMMAND_SIZE];
33433 +
33434 + pax_track_stack();
33435 +
33436 memset(cmnd, 0xff, 12);
33437 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
33438
33439 @@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,ch
33440 gdth_hget_str *phg;
33441 char cmnd[MAX_COMMAND_SIZE];
33442
33443 + pax_track_stack();
33444 +
33445 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
33446 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
33447 if (!gdtcmd || !estr)
33448 diff -urNp linux-2.6.32.41/drivers/scsi/hosts.c linux-2.6.32.41/drivers/scsi/hosts.c
33449 --- linux-2.6.32.41/drivers/scsi/hosts.c 2011-03-27 14:31:47.000000000 -0400
33450 +++ linux-2.6.32.41/drivers/scsi/hosts.c 2011-05-04 17:56:28.000000000 -0400
33451 @@ -40,7 +40,7 @@
33452 #include "scsi_logging.h"
33453
33454
33455 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
33456 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
33457
33458
33459 static void scsi_host_cls_release(struct device *dev)
33460 @@ -344,7 +344,7 @@ struct Scsi_Host *scsi_host_alloc(struct
33461 * subtract one because we increment first then return, but we need to
33462 * know what the next host number was before increment
33463 */
33464 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
33465 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
33466 shost->dma_channel = 0xff;
33467
33468 /* These three are default values which can be overridden */
33469 diff -urNp linux-2.6.32.41/drivers/scsi/ipr.c linux-2.6.32.41/drivers/scsi/ipr.c
33470 --- linux-2.6.32.41/drivers/scsi/ipr.c 2011-03-27 14:31:47.000000000 -0400
33471 +++ linux-2.6.32.41/drivers/scsi/ipr.c 2011-04-17 15:56:46.000000000 -0400
33472 @@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_q
33473 return true;
33474 }
33475
33476 -static struct ata_port_operations ipr_sata_ops = {
33477 +static const struct ata_port_operations ipr_sata_ops = {
33478 .phy_reset = ipr_ata_phy_reset,
33479 .hardreset = ipr_sata_reset,
33480 .post_internal_cmd = ipr_ata_post_internal,
33481 diff -urNp linux-2.6.32.41/drivers/scsi/libfc/fc_exch.c linux-2.6.32.41/drivers/scsi/libfc/fc_exch.c
33482 --- linux-2.6.32.41/drivers/scsi/libfc/fc_exch.c 2011-03-27 14:31:47.000000000 -0400
33483 +++ linux-2.6.32.41/drivers/scsi/libfc/fc_exch.c 2011-04-17 15:56:46.000000000 -0400
33484 @@ -86,12 +86,12 @@ struct fc_exch_mgr {
33485 * all together if not used XXX
33486 */
33487 struct {
33488 - atomic_t no_free_exch;
33489 - atomic_t no_free_exch_xid;
33490 - atomic_t xid_not_found;
33491 - atomic_t xid_busy;
33492 - atomic_t seq_not_found;
33493 - atomic_t non_bls_resp;
33494 + atomic_unchecked_t no_free_exch;
33495 + atomic_unchecked_t no_free_exch_xid;
33496 + atomic_unchecked_t xid_not_found;
33497 + atomic_unchecked_t xid_busy;
33498 + atomic_unchecked_t seq_not_found;
33499 + atomic_unchecked_t non_bls_resp;
33500 } stats;
33501 };
33502 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
33503 @@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(
33504 /* allocate memory for exchange */
33505 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
33506 if (!ep) {
33507 - atomic_inc(&mp->stats.no_free_exch);
33508 + atomic_inc_unchecked(&mp->stats.no_free_exch);
33509 goto out;
33510 }
33511 memset(ep, 0, sizeof(*ep));
33512 @@ -557,7 +557,7 @@ out:
33513 return ep;
33514 err:
33515 spin_unlock_bh(&pool->lock);
33516 - atomic_inc(&mp->stats.no_free_exch_xid);
33517 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
33518 mempool_free(ep, mp->ep_pool);
33519 return NULL;
33520 }
33521 @@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33522 xid = ntohs(fh->fh_ox_id); /* we originated exch */
33523 ep = fc_exch_find(mp, xid);
33524 if (!ep) {
33525 - atomic_inc(&mp->stats.xid_not_found);
33526 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33527 reject = FC_RJT_OX_ID;
33528 goto out;
33529 }
33530 @@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33531 ep = fc_exch_find(mp, xid);
33532 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
33533 if (ep) {
33534 - atomic_inc(&mp->stats.xid_busy);
33535 + atomic_inc_unchecked(&mp->stats.xid_busy);
33536 reject = FC_RJT_RX_ID;
33537 goto rel;
33538 }
33539 @@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33540 }
33541 xid = ep->xid; /* get our XID */
33542 } else if (!ep) {
33543 - atomic_inc(&mp->stats.xid_not_found);
33544 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33545 reject = FC_RJT_RX_ID; /* XID not found */
33546 goto out;
33547 }
33548 @@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33549 } else {
33550 sp = &ep->seq;
33551 if (sp->id != fh->fh_seq_id) {
33552 - atomic_inc(&mp->stats.seq_not_found);
33553 + atomic_inc_unchecked(&mp->stats.seq_not_found);
33554 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
33555 goto rel;
33556 }
33557 @@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct
33558
33559 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
33560 if (!ep) {
33561 - atomic_inc(&mp->stats.xid_not_found);
33562 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33563 goto out;
33564 }
33565 if (ep->esb_stat & ESB_ST_COMPLETE) {
33566 - atomic_inc(&mp->stats.xid_not_found);
33567 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33568 goto out;
33569 }
33570 if (ep->rxid == FC_XID_UNKNOWN)
33571 ep->rxid = ntohs(fh->fh_rx_id);
33572 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
33573 - atomic_inc(&mp->stats.xid_not_found);
33574 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33575 goto rel;
33576 }
33577 if (ep->did != ntoh24(fh->fh_s_id) &&
33578 ep->did != FC_FID_FLOGI) {
33579 - atomic_inc(&mp->stats.xid_not_found);
33580 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33581 goto rel;
33582 }
33583 sof = fr_sof(fp);
33584 @@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct
33585 } else {
33586 sp = &ep->seq;
33587 if (sp->id != fh->fh_seq_id) {
33588 - atomic_inc(&mp->stats.seq_not_found);
33589 + atomic_inc_unchecked(&mp->stats.seq_not_found);
33590 goto rel;
33591 }
33592 }
33593 @@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_
33594 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
33595
33596 if (!sp)
33597 - atomic_inc(&mp->stats.xid_not_found);
33598 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33599 else
33600 - atomic_inc(&mp->stats.non_bls_resp);
33601 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
33602
33603 fc_frame_free(fp);
33604 }
33605 diff -urNp linux-2.6.32.41/drivers/scsi/libsas/sas_ata.c linux-2.6.32.41/drivers/scsi/libsas/sas_ata.c
33606 --- linux-2.6.32.41/drivers/scsi/libsas/sas_ata.c 2011-03-27 14:31:47.000000000 -0400
33607 +++ linux-2.6.32.41/drivers/scsi/libsas/sas_ata.c 2011-04-23 12:56:11.000000000 -0400
33608 @@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_l
33609 }
33610 }
33611
33612 -static struct ata_port_operations sas_sata_ops = {
33613 +static const struct ata_port_operations sas_sata_ops = {
33614 .phy_reset = sas_ata_phy_reset,
33615 .post_internal_cmd = sas_ata_post_internal,
33616 .qc_defer = ata_std_qc_defer,
33617 diff -urNp linux-2.6.32.41/drivers/scsi/lpfc/lpfc_debugfs.c linux-2.6.32.41/drivers/scsi/lpfc/lpfc_debugfs.c
33618 --- linux-2.6.32.41/drivers/scsi/lpfc/lpfc_debugfs.c 2011-03-27 14:31:47.000000000 -0400
33619 +++ linux-2.6.32.41/drivers/scsi/lpfc/lpfc_debugfs.c 2011-05-16 21:46:57.000000000 -0400
33620 @@ -124,7 +124,7 @@ struct lpfc_debug {
33621 int len;
33622 };
33623
33624 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
33625 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
33626 static unsigned long lpfc_debugfs_start_time = 0L;
33627
33628 /**
33629 @@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
33630 lpfc_debugfs_enable = 0;
33631
33632 len = 0;
33633 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
33634 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
33635 (lpfc_debugfs_max_disc_trc - 1);
33636 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
33637 dtp = vport->disc_trc + i;
33638 @@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
33639 lpfc_debugfs_enable = 0;
33640
33641 len = 0;
33642 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
33643 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
33644 (lpfc_debugfs_max_slow_ring_trc - 1);
33645 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
33646 dtp = phba->slow_ring_trc + i;
33647 @@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
33648 uint32_t *ptr;
33649 char buffer[1024];
33650
33651 + pax_track_stack();
33652 +
33653 off = 0;
33654 spin_lock_irq(&phba->hbalock);
33655
33656 @@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
33657 !vport || !vport->disc_trc)
33658 return;
33659
33660 - index = atomic_inc_return(&vport->disc_trc_cnt) &
33661 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
33662 (lpfc_debugfs_max_disc_trc - 1);
33663 dtp = vport->disc_trc + index;
33664 dtp->fmt = fmt;
33665 dtp->data1 = data1;
33666 dtp->data2 = data2;
33667 dtp->data3 = data3;
33668 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
33669 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
33670 dtp->jif = jiffies;
33671 #endif
33672 return;
33673 @@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
33674 !phba || !phba->slow_ring_trc)
33675 return;
33676
33677 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
33678 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
33679 (lpfc_debugfs_max_slow_ring_trc - 1);
33680 dtp = phba->slow_ring_trc + index;
33681 dtp->fmt = fmt;
33682 dtp->data1 = data1;
33683 dtp->data2 = data2;
33684 dtp->data3 = data3;
33685 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
33686 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
33687 dtp->jif = jiffies;
33688 #endif
33689 return;
33690 @@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
33691 "slow_ring buffer\n");
33692 goto debug_failed;
33693 }
33694 - atomic_set(&phba->slow_ring_trc_cnt, 0);
33695 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
33696 memset(phba->slow_ring_trc, 0,
33697 (sizeof(struct lpfc_debugfs_trc) *
33698 lpfc_debugfs_max_slow_ring_trc));
33699 @@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
33700 "buffer\n");
33701 goto debug_failed;
33702 }
33703 - atomic_set(&vport->disc_trc_cnt, 0);
33704 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
33705
33706 snprintf(name, sizeof(name), "discovery_trace");
33707 vport->debug_disc_trc =
33708 diff -urNp linux-2.6.32.41/drivers/scsi/lpfc/lpfc.h linux-2.6.32.41/drivers/scsi/lpfc/lpfc.h
33709 --- linux-2.6.32.41/drivers/scsi/lpfc/lpfc.h 2011-03-27 14:31:47.000000000 -0400
33710 +++ linux-2.6.32.41/drivers/scsi/lpfc/lpfc.h 2011-05-04 17:56:28.000000000 -0400
33711 @@ -400,7 +400,7 @@ struct lpfc_vport {
33712 struct dentry *debug_nodelist;
33713 struct dentry *vport_debugfs_root;
33714 struct lpfc_debugfs_trc *disc_trc;
33715 - atomic_t disc_trc_cnt;
33716 + atomic_unchecked_t disc_trc_cnt;
33717 #endif
33718 uint8_t stat_data_enabled;
33719 uint8_t stat_data_blocked;
33720 @@ -725,8 +725,8 @@ struct lpfc_hba {
33721 struct timer_list fabric_block_timer;
33722 unsigned long bit_flags;
33723 #define FABRIC_COMANDS_BLOCKED 0
33724 - atomic_t num_rsrc_err;
33725 - atomic_t num_cmd_success;
33726 + atomic_unchecked_t num_rsrc_err;
33727 + atomic_unchecked_t num_cmd_success;
33728 unsigned long last_rsrc_error_time;
33729 unsigned long last_ramp_down_time;
33730 unsigned long last_ramp_up_time;
33731 @@ -740,7 +740,7 @@ struct lpfc_hba {
33732 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
33733 struct dentry *debug_slow_ring_trc;
33734 struct lpfc_debugfs_trc *slow_ring_trc;
33735 - atomic_t slow_ring_trc_cnt;
33736 + atomic_unchecked_t slow_ring_trc_cnt;
33737 #endif
33738
33739 /* Used for deferred freeing of ELS data buffers */
33740 diff -urNp linux-2.6.32.41/drivers/scsi/lpfc/lpfc_scsi.c linux-2.6.32.41/drivers/scsi/lpfc/lpfc_scsi.c
33741 --- linux-2.6.32.41/drivers/scsi/lpfc/lpfc_scsi.c 2011-03-27 14:31:47.000000000 -0400
33742 +++ linux-2.6.32.41/drivers/scsi/lpfc/lpfc_scsi.c 2011-05-04 17:56:28.000000000 -0400
33743 @@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
33744 uint32_t evt_posted;
33745
33746 spin_lock_irqsave(&phba->hbalock, flags);
33747 - atomic_inc(&phba->num_rsrc_err);
33748 + atomic_inc_unchecked(&phba->num_rsrc_err);
33749 phba->last_rsrc_error_time = jiffies;
33750
33751 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
33752 @@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
33753 unsigned long flags;
33754 struct lpfc_hba *phba = vport->phba;
33755 uint32_t evt_posted;
33756 - atomic_inc(&phba->num_cmd_success);
33757 + atomic_inc_unchecked(&phba->num_cmd_success);
33758
33759 if (vport->cfg_lun_queue_depth <= queue_depth)
33760 return;
33761 @@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
33762 int i;
33763 struct lpfc_rport_data *rdata;
33764
33765 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
33766 - num_cmd_success = atomic_read(&phba->num_cmd_success);
33767 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
33768 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
33769
33770 vports = lpfc_create_vport_work_array(phba);
33771 if (vports != NULL)
33772 @@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
33773 }
33774 }
33775 lpfc_destroy_vport_work_array(phba, vports);
33776 - atomic_set(&phba->num_rsrc_err, 0);
33777 - atomic_set(&phba->num_cmd_success, 0);
33778 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
33779 + atomic_set_unchecked(&phba->num_cmd_success, 0);
33780 }
33781
33782 /**
33783 @@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
33784 }
33785 }
33786 lpfc_destroy_vport_work_array(phba, vports);
33787 - atomic_set(&phba->num_rsrc_err, 0);
33788 - atomic_set(&phba->num_cmd_success, 0);
33789 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
33790 + atomic_set_unchecked(&phba->num_cmd_success, 0);
33791 }
33792
33793 /**
33794 diff -urNp linux-2.6.32.41/drivers/scsi/megaraid/megaraid_mbox.c linux-2.6.32.41/drivers/scsi/megaraid/megaraid_mbox.c
33795 --- linux-2.6.32.41/drivers/scsi/megaraid/megaraid_mbox.c 2011-03-27 14:31:47.000000000 -0400
33796 +++ linux-2.6.32.41/drivers/scsi/megaraid/megaraid_mbox.c 2011-05-16 21:46:57.000000000 -0400
33797 @@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
33798 int rval;
33799 int i;
33800
33801 + pax_track_stack();
33802 +
33803 // Allocate memory for the base list of scb for management module.
33804 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
33805
33806 diff -urNp linux-2.6.32.41/drivers/scsi/osd/osd_initiator.c linux-2.6.32.41/drivers/scsi/osd/osd_initiator.c
33807 --- linux-2.6.32.41/drivers/scsi/osd/osd_initiator.c 2011-03-27 14:31:47.000000000 -0400
33808 +++ linux-2.6.32.41/drivers/scsi/osd/osd_initiator.c 2011-05-16 21:46:57.000000000 -0400
33809 @@ -94,6 +94,8 @@ static int _osd_print_system_info(struct
33810 int nelem = ARRAY_SIZE(get_attrs), a = 0;
33811 int ret;
33812
33813 + pax_track_stack();
33814 +
33815 or = osd_start_request(od, GFP_KERNEL);
33816 if (!or)
33817 return -ENOMEM;
33818 diff -urNp linux-2.6.32.41/drivers/scsi/pmcraid.c linux-2.6.32.41/drivers/scsi/pmcraid.c
33819 --- linux-2.6.32.41/drivers/scsi/pmcraid.c 2011-05-10 22:12:01.000000000 -0400
33820 +++ linux-2.6.32.41/drivers/scsi/pmcraid.c 2011-05-10 22:12:33.000000000 -0400
33821 @@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct sc
33822 res->scsi_dev = scsi_dev;
33823 scsi_dev->hostdata = res;
33824 res->change_detected = 0;
33825 - atomic_set(&res->read_failures, 0);
33826 - atomic_set(&res->write_failures, 0);
33827 + atomic_set_unchecked(&res->read_failures, 0);
33828 + atomic_set_unchecked(&res->write_failures, 0);
33829 rc = 0;
33830 }
33831 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
33832 @@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct
33833
33834 /* If this was a SCSI read/write command keep count of errors */
33835 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
33836 - atomic_inc(&res->read_failures);
33837 + atomic_inc_unchecked(&res->read_failures);
33838 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
33839 - atomic_inc(&res->write_failures);
33840 + atomic_inc_unchecked(&res->write_failures);
33841
33842 if (!RES_IS_GSCSI(res->cfg_entry) &&
33843 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
33844 @@ -4113,7 +4113,7 @@ static void pmcraid_worker_function(stru
33845
33846 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
33847 /* add resources only after host is added into system */
33848 - if (!atomic_read(&pinstance->expose_resources))
33849 + if (!atomic_read_unchecked(&pinstance->expose_resources))
33850 return;
33851
33852 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
33853 @@ -4847,7 +4847,7 @@ static int __devinit pmcraid_init_instan
33854 init_waitqueue_head(&pinstance->reset_wait_q);
33855
33856 atomic_set(&pinstance->outstanding_cmds, 0);
33857 - atomic_set(&pinstance->expose_resources, 0);
33858 + atomic_set_unchecked(&pinstance->expose_resources, 0);
33859
33860 INIT_LIST_HEAD(&pinstance->free_res_q);
33861 INIT_LIST_HEAD(&pinstance->used_res_q);
33862 @@ -5499,7 +5499,7 @@ static int __devinit pmcraid_probe(
33863 /* Schedule worker thread to handle CCN and take care of adding and
33864 * removing devices to OS
33865 */
33866 - atomic_set(&pinstance->expose_resources, 1);
33867 + atomic_set_unchecked(&pinstance->expose_resources, 1);
33868 schedule_work(&pinstance->worker_q);
33869 return rc;
33870
33871 diff -urNp linux-2.6.32.41/drivers/scsi/pmcraid.h linux-2.6.32.41/drivers/scsi/pmcraid.h
33872 --- linux-2.6.32.41/drivers/scsi/pmcraid.h 2011-03-27 14:31:47.000000000 -0400
33873 +++ linux-2.6.32.41/drivers/scsi/pmcraid.h 2011-05-04 17:56:28.000000000 -0400
33874 @@ -690,7 +690,7 @@ struct pmcraid_instance {
33875 atomic_t outstanding_cmds;
33876
33877 /* should add/delete resources to mid-layer now ?*/
33878 - atomic_t expose_resources;
33879 + atomic_unchecked_t expose_resources;
33880
33881 /* Tasklet to handle deferred processing */
33882 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
33883 @@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
33884 struct list_head queue; /* link to "to be exposed" resources */
33885 struct pmcraid_config_table_entry cfg_entry;
33886 struct scsi_device *scsi_dev; /* Link scsi_device structure */
33887 - atomic_t read_failures; /* count of failed READ commands */
33888 - atomic_t write_failures; /* count of failed WRITE commands */
33889 + atomic_unchecked_t read_failures; /* count of failed READ commands */
33890 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
33891
33892 /* To indicate add/delete/modify during CCN */
33893 u8 change_detected;
33894 diff -urNp linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_def.h linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_def.h
33895 --- linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_def.h 2011-03-27 14:31:47.000000000 -0400
33896 +++ linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_def.h 2011-05-04 17:56:28.000000000 -0400
33897 @@ -240,7 +240,7 @@ struct ddb_entry {
33898 atomic_t retry_relogin_timer; /* Min Time between relogins
33899 * (4000 only) */
33900 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
33901 - atomic_t relogin_retry_count; /* Num of times relogin has been
33902 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
33903 * retried */
33904
33905 uint16_t port;
33906 diff -urNp linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_init.c linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_init.c
33907 --- linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_init.c 2011-03-27 14:31:47.000000000 -0400
33908 +++ linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_init.c 2011-05-04 17:56:28.000000000 -0400
33909 @@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_
33910 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
33911 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
33912 atomic_set(&ddb_entry->relogin_timer, 0);
33913 - atomic_set(&ddb_entry->relogin_retry_count, 0);
33914 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
33915 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
33916 list_add_tail(&ddb_entry->list, &ha->ddb_list);
33917 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
33918 @@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct s
33919 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
33920 atomic_set(&ddb_entry->port_down_timer,
33921 ha->port_down_retry_count);
33922 - atomic_set(&ddb_entry->relogin_retry_count, 0);
33923 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
33924 atomic_set(&ddb_entry->relogin_timer, 0);
33925 clear_bit(DF_RELOGIN, &ddb_entry->flags);
33926 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
33927 diff -urNp linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_os.c linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_os.c
33928 --- linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_os.c 2011-03-27 14:31:47.000000000 -0400
33929 +++ linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_os.c 2011-05-04 17:56:28.000000000 -0400
33930 @@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_ql
33931 ddb_entry->fw_ddb_device_state ==
33932 DDB_DS_SESSION_FAILED) {
33933 /* Reset retry relogin timer */
33934 - atomic_inc(&ddb_entry->relogin_retry_count);
33935 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
33936 DEBUG2(printk("scsi%ld: index[%d] relogin"
33937 " timed out-retrying"
33938 " relogin (%d)\n",
33939 ha->host_no,
33940 ddb_entry->fw_ddb_index,
33941 - atomic_read(&ddb_entry->
33942 + atomic_read_unchecked(&ddb_entry->
33943 relogin_retry_count))
33944 );
33945 start_dpc++;
33946 diff -urNp linux-2.6.32.41/drivers/scsi/scsi.c linux-2.6.32.41/drivers/scsi/scsi.c
33947 --- linux-2.6.32.41/drivers/scsi/scsi.c 2011-03-27 14:31:47.000000000 -0400
33948 +++ linux-2.6.32.41/drivers/scsi/scsi.c 2011-05-04 17:56:28.000000000 -0400
33949 @@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
33950 unsigned long timeout;
33951 int rtn = 0;
33952
33953 - atomic_inc(&cmd->device->iorequest_cnt);
33954 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
33955
33956 /* check if the device is still usable */
33957 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
33958 diff -urNp linux-2.6.32.41/drivers/scsi/scsi_debug.c linux-2.6.32.41/drivers/scsi/scsi_debug.c
33959 --- linux-2.6.32.41/drivers/scsi/scsi_debug.c 2011-03-27 14:31:47.000000000 -0400
33960 +++ linux-2.6.32.41/drivers/scsi/scsi_debug.c 2011-05-16 21:46:57.000000000 -0400
33961 @@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_
33962 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
33963 unsigned char *cmd = (unsigned char *)scp->cmnd;
33964
33965 + pax_track_stack();
33966 +
33967 if ((errsts = check_readiness(scp, 1, devip)))
33968 return errsts;
33969 memset(arr, 0, sizeof(arr));
33970 @@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cm
33971 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
33972 unsigned char *cmd = (unsigned char *)scp->cmnd;
33973
33974 + pax_track_stack();
33975 +
33976 if ((errsts = check_readiness(scp, 1, devip)))
33977 return errsts;
33978 memset(arr, 0, sizeof(arr));
33979 diff -urNp linux-2.6.32.41/drivers/scsi/scsi_lib.c linux-2.6.32.41/drivers/scsi/scsi_lib.c
33980 --- linux-2.6.32.41/drivers/scsi/scsi_lib.c 2011-05-10 22:12:01.000000000 -0400
33981 +++ linux-2.6.32.41/drivers/scsi/scsi_lib.c 2011-05-10 22:12:33.000000000 -0400
33982 @@ -1384,7 +1384,7 @@ static void scsi_kill_request(struct req
33983
33984 scsi_init_cmd_errh(cmd);
33985 cmd->result = DID_NO_CONNECT << 16;
33986 - atomic_inc(&cmd->device->iorequest_cnt);
33987 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
33988
33989 /*
33990 * SCSI request completion path will do scsi_device_unbusy(),
33991 @@ -1415,9 +1415,9 @@ static void scsi_softirq_done(struct req
33992 */
33993 cmd->serial_number = 0;
33994
33995 - atomic_inc(&cmd->device->iodone_cnt);
33996 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
33997 if (cmd->result)
33998 - atomic_inc(&cmd->device->ioerr_cnt);
33999 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
34000
34001 disposition = scsi_decide_disposition(cmd);
34002 if (disposition != SUCCESS &&
34003 diff -urNp linux-2.6.32.41/drivers/scsi/scsi_sysfs.c linux-2.6.32.41/drivers/scsi/scsi_sysfs.c
34004 --- linux-2.6.32.41/drivers/scsi/scsi_sysfs.c 2011-05-10 22:12:01.000000000 -0400
34005 +++ linux-2.6.32.41/drivers/scsi/scsi_sysfs.c 2011-05-10 22:12:33.000000000 -0400
34006 @@ -661,7 +661,7 @@ show_iostat_##field(struct device *dev,
34007 char *buf) \
34008 { \
34009 struct scsi_device *sdev = to_scsi_device(dev); \
34010 - unsigned long long count = atomic_read(&sdev->field); \
34011 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
34012 return snprintf(buf, 20, "0x%llx\n", count); \
34013 } \
34014 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
34015 diff -urNp linux-2.6.32.41/drivers/scsi/scsi_transport_fc.c linux-2.6.32.41/drivers/scsi/scsi_transport_fc.c
34016 --- linux-2.6.32.41/drivers/scsi/scsi_transport_fc.c 2011-03-27 14:31:47.000000000 -0400
34017 +++ linux-2.6.32.41/drivers/scsi/scsi_transport_fc.c 2011-05-04 17:56:28.000000000 -0400
34018 @@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
34019 * Netlink Infrastructure
34020 */
34021
34022 -static atomic_t fc_event_seq;
34023 +static atomic_unchecked_t fc_event_seq;
34024
34025 /**
34026 * fc_get_event_number - Obtain the next sequential FC event number
34027 @@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
34028 u32
34029 fc_get_event_number(void)
34030 {
34031 - return atomic_add_return(1, &fc_event_seq);
34032 + return atomic_add_return_unchecked(1, &fc_event_seq);
34033 }
34034 EXPORT_SYMBOL(fc_get_event_number);
34035
34036 @@ -641,7 +641,7 @@ static __init int fc_transport_init(void
34037 {
34038 int error;
34039
34040 - atomic_set(&fc_event_seq, 0);
34041 + atomic_set_unchecked(&fc_event_seq, 0);
34042
34043 error = transport_class_register(&fc_host_class);
34044 if (error)
34045 diff -urNp linux-2.6.32.41/drivers/scsi/scsi_transport_iscsi.c linux-2.6.32.41/drivers/scsi/scsi_transport_iscsi.c
34046 --- linux-2.6.32.41/drivers/scsi/scsi_transport_iscsi.c 2011-03-27 14:31:47.000000000 -0400
34047 +++ linux-2.6.32.41/drivers/scsi/scsi_transport_iscsi.c 2011-05-04 17:56:28.000000000 -0400
34048 @@ -81,7 +81,7 @@ struct iscsi_internal {
34049 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
34050 };
34051
34052 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
34053 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
34054 static struct workqueue_struct *iscsi_eh_timer_workq;
34055
34056 /*
34057 @@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_s
34058 int err;
34059
34060 ihost = shost->shost_data;
34061 - session->sid = atomic_add_return(1, &iscsi_session_nr);
34062 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
34063
34064 if (id == ISCSI_MAX_TARGET) {
34065 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
34066 @@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(v
34067 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
34068 ISCSI_TRANSPORT_VERSION);
34069
34070 - atomic_set(&iscsi_session_nr, 0);
34071 + atomic_set_unchecked(&iscsi_session_nr, 0);
34072
34073 err = class_register(&iscsi_transport_class);
34074 if (err)
34075 diff -urNp linux-2.6.32.41/drivers/scsi/scsi_transport_srp.c linux-2.6.32.41/drivers/scsi/scsi_transport_srp.c
34076 --- linux-2.6.32.41/drivers/scsi/scsi_transport_srp.c 2011-03-27 14:31:47.000000000 -0400
34077 +++ linux-2.6.32.41/drivers/scsi/scsi_transport_srp.c 2011-05-04 17:56:28.000000000 -0400
34078 @@ -33,7 +33,7 @@
34079 #include "scsi_transport_srp_internal.h"
34080
34081 struct srp_host_attrs {
34082 - atomic_t next_port_id;
34083 + atomic_unchecked_t next_port_id;
34084 };
34085 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
34086
34087 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
34088 struct Scsi_Host *shost = dev_to_shost(dev);
34089 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
34090
34091 - atomic_set(&srp_host->next_port_id, 0);
34092 + atomic_set_unchecked(&srp_host->next_port_id, 0);
34093 return 0;
34094 }
34095
34096 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
34097 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
34098 rport->roles = ids->roles;
34099
34100 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
34101 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
34102 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
34103
34104 transport_setup_device(&rport->dev);
34105 diff -urNp linux-2.6.32.41/drivers/scsi/sg.c linux-2.6.32.41/drivers/scsi/sg.c
34106 --- linux-2.6.32.41/drivers/scsi/sg.c 2011-03-27 14:31:47.000000000 -0400
34107 +++ linux-2.6.32.41/drivers/scsi/sg.c 2011-04-17 15:56:46.000000000 -0400
34108 @@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
34109 const struct file_operations * fops;
34110 };
34111
34112 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
34113 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
34114 {"allow_dio", &adio_fops},
34115 {"debug", &debug_fops},
34116 {"def_reserved_size", &dressz_fops},
34117 @@ -2307,7 +2307,7 @@ sg_proc_init(void)
34118 {
34119 int k, mask;
34120 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
34121 - struct sg_proc_leaf * leaf;
34122 + const struct sg_proc_leaf * leaf;
34123
34124 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
34125 if (!sg_proc_sgp)
34126 diff -urNp linux-2.6.32.41/drivers/scsi/sym53c8xx_2/sym_glue.c linux-2.6.32.41/drivers/scsi/sym53c8xx_2/sym_glue.c
34127 --- linux-2.6.32.41/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-03-27 14:31:47.000000000 -0400
34128 +++ linux-2.6.32.41/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-05-16 21:46:57.000000000 -0400
34129 @@ -1754,6 +1754,8 @@ static int __devinit sym2_probe(struct p
34130 int do_iounmap = 0;
34131 int do_disable_device = 1;
34132
34133 + pax_track_stack();
34134 +
34135 memset(&sym_dev, 0, sizeof(sym_dev));
34136 memset(&nvram, 0, sizeof(nvram));
34137 sym_dev.pdev = pdev;
34138 diff -urNp linux-2.6.32.41/drivers/serial/kgdboc.c linux-2.6.32.41/drivers/serial/kgdboc.c
34139 --- linux-2.6.32.41/drivers/serial/kgdboc.c 2011-03-27 14:31:47.000000000 -0400
34140 +++ linux-2.6.32.41/drivers/serial/kgdboc.c 2011-04-17 15:56:46.000000000 -0400
34141 @@ -18,7 +18,7 @@
34142
34143 #define MAX_CONFIG_LEN 40
34144
34145 -static struct kgdb_io kgdboc_io_ops;
34146 +static const struct kgdb_io kgdboc_io_ops;
34147
34148 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
34149 static int configured = -1;
34150 @@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void
34151 module_put(THIS_MODULE);
34152 }
34153
34154 -static struct kgdb_io kgdboc_io_ops = {
34155 +static const struct kgdb_io kgdboc_io_ops = {
34156 .name = "kgdboc",
34157 .read_char = kgdboc_get_char,
34158 .write_char = kgdboc_put_char,
34159 diff -urNp linux-2.6.32.41/drivers/spi/spi.c linux-2.6.32.41/drivers/spi/spi.c
34160 --- linux-2.6.32.41/drivers/spi/spi.c 2011-03-27 14:31:47.000000000 -0400
34161 +++ linux-2.6.32.41/drivers/spi/spi.c 2011-05-04 17:56:28.000000000 -0400
34162 @@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, str
34163 EXPORT_SYMBOL_GPL(spi_sync);
34164
34165 /* portable code must never pass more than 32 bytes */
34166 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
34167 +#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
34168
34169 static u8 *buf;
34170
34171 diff -urNp linux-2.6.32.41/drivers/staging/android/binder.c linux-2.6.32.41/drivers/staging/android/binder.c
34172 --- linux-2.6.32.41/drivers/staging/android/binder.c 2011-03-27 14:31:47.000000000 -0400
34173 +++ linux-2.6.32.41/drivers/staging/android/binder.c 2011-04-17 15:56:46.000000000 -0400
34174 @@ -2756,7 +2756,7 @@ static void binder_vma_close(struct vm_a
34175 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
34176 }
34177
34178 -static struct vm_operations_struct binder_vm_ops = {
34179 +static const struct vm_operations_struct binder_vm_ops = {
34180 .open = binder_vma_open,
34181 .close = binder_vma_close,
34182 };
34183 diff -urNp linux-2.6.32.41/drivers/staging/b3dfg/b3dfg.c linux-2.6.32.41/drivers/staging/b3dfg/b3dfg.c
34184 --- linux-2.6.32.41/drivers/staging/b3dfg/b3dfg.c 2011-03-27 14:31:47.000000000 -0400
34185 +++ linux-2.6.32.41/drivers/staging/b3dfg/b3dfg.c 2011-04-17 15:56:46.000000000 -0400
34186 @@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_are
34187 return VM_FAULT_NOPAGE;
34188 }
34189
34190 -static struct vm_operations_struct b3dfg_vm_ops = {
34191 +static const struct vm_operations_struct b3dfg_vm_ops = {
34192 .fault = b3dfg_vma_fault,
34193 };
34194
34195 @@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp,
34196 return r;
34197 }
34198
34199 -static struct file_operations b3dfg_fops = {
34200 +static const struct file_operations b3dfg_fops = {
34201 .owner = THIS_MODULE,
34202 .open = b3dfg_open,
34203 .release = b3dfg_release,
34204 diff -urNp linux-2.6.32.41/drivers/staging/comedi/comedi_fops.c linux-2.6.32.41/drivers/staging/comedi/comedi_fops.c
34205 --- linux-2.6.32.41/drivers/staging/comedi/comedi_fops.c 2011-03-27 14:31:47.000000000 -0400
34206 +++ linux-2.6.32.41/drivers/staging/comedi/comedi_fops.c 2011-04-17 15:56:46.000000000 -0400
34207 @@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct
34208 mutex_unlock(&dev->mutex);
34209 }
34210
34211 -static struct vm_operations_struct comedi_vm_ops = {
34212 +static const struct vm_operations_struct comedi_vm_ops = {
34213 .close = comedi_unmap,
34214 };
34215
34216 diff -urNp linux-2.6.32.41/drivers/staging/dream/qdsp5/adsp_driver.c linux-2.6.32.41/drivers/staging/dream/qdsp5/adsp_driver.c
34217 --- linux-2.6.32.41/drivers/staging/dream/qdsp5/adsp_driver.c 2011-03-27 14:31:47.000000000 -0400
34218 +++ linux-2.6.32.41/drivers/staging/dream/qdsp5/adsp_driver.c 2011-04-17 15:56:46.000000000 -0400
34219 @@ -576,7 +576,7 @@ static struct adsp_device *inode_to_devi
34220 static dev_t adsp_devno;
34221 static struct class *adsp_class;
34222
34223 -static struct file_operations adsp_fops = {
34224 +static const struct file_operations adsp_fops = {
34225 .owner = THIS_MODULE,
34226 .open = adsp_open,
34227 .unlocked_ioctl = adsp_ioctl,
34228 diff -urNp linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_aac.c linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_aac.c
34229 --- linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_aac.c 2011-03-27 14:31:47.000000000 -0400
34230 +++ linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_aac.c 2011-04-17 15:56:46.000000000 -0400
34231 @@ -1022,7 +1022,7 @@ done:
34232 return rc;
34233 }
34234
34235 -static struct file_operations audio_aac_fops = {
34236 +static const struct file_operations audio_aac_fops = {
34237 .owner = THIS_MODULE,
34238 .open = audio_open,
34239 .release = audio_release,
34240 diff -urNp linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_amrnb.c linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_amrnb.c
34241 --- linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-03-27 14:31:47.000000000 -0400
34242 +++ linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-04-17 15:56:46.000000000 -0400
34243 @@ -833,7 +833,7 @@ done:
34244 return rc;
34245 }
34246
34247 -static struct file_operations audio_amrnb_fops = {
34248 +static const struct file_operations audio_amrnb_fops = {
34249 .owner = THIS_MODULE,
34250 .open = audamrnb_open,
34251 .release = audamrnb_release,
34252 diff -urNp linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_evrc.c linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_evrc.c
34253 --- linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_evrc.c 2011-03-27 14:31:47.000000000 -0400
34254 +++ linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_evrc.c 2011-04-17 15:56:46.000000000 -0400
34255 @@ -805,7 +805,7 @@ dma_fail:
34256 return rc;
34257 }
34258
34259 -static struct file_operations audio_evrc_fops = {
34260 +static const struct file_operations audio_evrc_fops = {
34261 .owner = THIS_MODULE,
34262 .open = audevrc_open,
34263 .release = audevrc_release,
34264 diff -urNp linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_in.c linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_in.c
34265 --- linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_in.c 2011-03-27 14:31:47.000000000 -0400
34266 +++ linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_in.c 2011-04-17 15:56:46.000000000 -0400
34267 @@ -913,7 +913,7 @@ static int audpre_open(struct inode *ino
34268 return 0;
34269 }
34270
34271 -static struct file_operations audio_fops = {
34272 +static const struct file_operations audio_fops = {
34273 .owner = THIS_MODULE,
34274 .open = audio_in_open,
34275 .release = audio_in_release,
34276 @@ -922,7 +922,7 @@ static struct file_operations audio_fops
34277 .unlocked_ioctl = audio_in_ioctl,
34278 };
34279
34280 -static struct file_operations audpre_fops = {
34281 +static const struct file_operations audpre_fops = {
34282 .owner = THIS_MODULE,
34283 .open = audpre_open,
34284 .unlocked_ioctl = audpre_ioctl,
34285 diff -urNp linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_mp3.c linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_mp3.c
34286 --- linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_mp3.c 2011-03-27 14:31:47.000000000 -0400
34287 +++ linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_mp3.c 2011-04-17 15:56:46.000000000 -0400
34288 @@ -941,7 +941,7 @@ done:
34289 return rc;
34290 }
34291
34292 -static struct file_operations audio_mp3_fops = {
34293 +static const struct file_operations audio_mp3_fops = {
34294 .owner = THIS_MODULE,
34295 .open = audio_open,
34296 .release = audio_release,
34297 diff -urNp linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_out.c linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_out.c
34298 --- linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_out.c 2011-03-27 14:31:47.000000000 -0400
34299 +++ linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_out.c 2011-04-17 15:56:46.000000000 -0400
34300 @@ -810,7 +810,7 @@ static int audpp_open(struct inode *inod
34301 return 0;
34302 }
34303
34304 -static struct file_operations audio_fops = {
34305 +static const struct file_operations audio_fops = {
34306 .owner = THIS_MODULE,
34307 .open = audio_open,
34308 .release = audio_release,
34309 @@ -819,7 +819,7 @@ static struct file_operations audio_fops
34310 .unlocked_ioctl = audio_ioctl,
34311 };
34312
34313 -static struct file_operations audpp_fops = {
34314 +static const struct file_operations audpp_fops = {
34315 .owner = THIS_MODULE,
34316 .open = audpp_open,
34317 .unlocked_ioctl = audpp_ioctl,
34318 diff -urNp linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_qcelp.c linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_qcelp.c
34319 --- linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-03-27 14:31:47.000000000 -0400
34320 +++ linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-04-17 15:56:46.000000000 -0400
34321 @@ -816,7 +816,7 @@ err:
34322 return rc;
34323 }
34324
34325 -static struct file_operations audio_qcelp_fops = {
34326 +static const struct file_operations audio_qcelp_fops = {
34327 .owner = THIS_MODULE,
34328 .open = audqcelp_open,
34329 .release = audqcelp_release,
34330 diff -urNp linux-2.6.32.41/drivers/staging/dream/qdsp5/snd.c linux-2.6.32.41/drivers/staging/dream/qdsp5/snd.c
34331 --- linux-2.6.32.41/drivers/staging/dream/qdsp5/snd.c 2011-03-27 14:31:47.000000000 -0400
34332 +++ linux-2.6.32.41/drivers/staging/dream/qdsp5/snd.c 2011-04-17 15:56:46.000000000 -0400
34333 @@ -242,7 +242,7 @@ err:
34334 return rc;
34335 }
34336
34337 -static struct file_operations snd_fops = {
34338 +static const struct file_operations snd_fops = {
34339 .owner = THIS_MODULE,
34340 .open = snd_open,
34341 .release = snd_release,
34342 diff -urNp linux-2.6.32.41/drivers/staging/dream/smd/smd_qmi.c linux-2.6.32.41/drivers/staging/dream/smd/smd_qmi.c
34343 --- linux-2.6.32.41/drivers/staging/dream/smd/smd_qmi.c 2011-03-27 14:31:47.000000000 -0400
34344 +++ linux-2.6.32.41/drivers/staging/dream/smd/smd_qmi.c 2011-04-17 15:56:46.000000000 -0400
34345 @@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip,
34346 return 0;
34347 }
34348
34349 -static struct file_operations qmi_fops = {
34350 +static const struct file_operations qmi_fops = {
34351 .owner = THIS_MODULE,
34352 .read = qmi_read,
34353 .write = qmi_write,
34354 diff -urNp linux-2.6.32.41/drivers/staging/dream/smd/smd_rpcrouter_device.c linux-2.6.32.41/drivers/staging/dream/smd/smd_rpcrouter_device.c
34355 --- linux-2.6.32.41/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-03-27 14:31:47.000000000 -0400
34356 +++ linux-2.6.32.41/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-04-17 15:56:46.000000000 -0400
34357 @@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file
34358 return rc;
34359 }
34360
34361 -static struct file_operations rpcrouter_server_fops = {
34362 +static const struct file_operations rpcrouter_server_fops = {
34363 .owner = THIS_MODULE,
34364 .open = rpcrouter_open,
34365 .release = rpcrouter_release,
34366 @@ -224,7 +224,7 @@ static struct file_operations rpcrouter_
34367 .unlocked_ioctl = rpcrouter_ioctl,
34368 };
34369
34370 -static struct file_operations rpcrouter_router_fops = {
34371 +static const struct file_operations rpcrouter_router_fops = {
34372 .owner = THIS_MODULE,
34373 .open = rpcrouter_open,
34374 .release = rpcrouter_release,
34375 diff -urNp linux-2.6.32.41/drivers/staging/dst/dcore.c linux-2.6.32.41/drivers/staging/dst/dcore.c
34376 --- linux-2.6.32.41/drivers/staging/dst/dcore.c 2011-03-27 14:31:47.000000000 -0400
34377 +++ linux-2.6.32.41/drivers/staging/dst/dcore.c 2011-04-17 15:56:46.000000000 -0400
34378 @@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendi
34379 return 0;
34380 }
34381
34382 -static struct block_device_operations dst_blk_ops = {
34383 +static const struct block_device_operations dst_blk_ops = {
34384 .open = dst_bdev_open,
34385 .release = dst_bdev_release,
34386 .owner = THIS_MODULE,
34387 @@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(s
34388 n->size = ctl->size;
34389
34390 atomic_set(&n->refcnt, 1);
34391 - atomic_long_set(&n->gen, 0);
34392 + atomic_long_set_unchecked(&n->gen, 0);
34393 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
34394
34395 err = dst_node_sysfs_init(n);
34396 diff -urNp linux-2.6.32.41/drivers/staging/dst/trans.c linux-2.6.32.41/drivers/staging/dst/trans.c
34397 --- linux-2.6.32.41/drivers/staging/dst/trans.c 2011-03-27 14:31:47.000000000 -0400
34398 +++ linux-2.6.32.41/drivers/staging/dst/trans.c 2011-04-17 15:56:46.000000000 -0400
34399 @@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n,
34400 t->error = 0;
34401 t->retries = 0;
34402 atomic_set(&t->refcnt, 1);
34403 - t->gen = atomic_long_inc_return(&n->gen);
34404 + t->gen = atomic_long_inc_return_unchecked(&n->gen);
34405
34406 t->enc = bio_data_dir(bio);
34407 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
34408 diff -urNp linux-2.6.32.41/drivers/staging/et131x/et1310_tx.c linux-2.6.32.41/drivers/staging/et131x/et1310_tx.c
34409 --- linux-2.6.32.41/drivers/staging/et131x/et1310_tx.c 2011-03-27 14:31:47.000000000 -0400
34410 +++ linux-2.6.32.41/drivers/staging/et131x/et1310_tx.c 2011-05-04 17:56:28.000000000 -0400
34411 @@ -710,11 +710,11 @@ inline void et131x_free_send_packet(stru
34412 struct net_device_stats *stats = &etdev->net_stats;
34413
34414 if (pMpTcb->Flags & fMP_DEST_BROAD)
34415 - atomic_inc(&etdev->Stats.brdcstxmt);
34416 + atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
34417 else if (pMpTcb->Flags & fMP_DEST_MULTI)
34418 - atomic_inc(&etdev->Stats.multixmt);
34419 + atomic_inc_unchecked(&etdev->Stats.multixmt);
34420 else
34421 - atomic_inc(&etdev->Stats.unixmt);
34422 + atomic_inc_unchecked(&etdev->Stats.unixmt);
34423
34424 if (pMpTcb->Packet) {
34425 stats->tx_bytes += pMpTcb->Packet->len;
34426 diff -urNp linux-2.6.32.41/drivers/staging/et131x/et131x_adapter.h linux-2.6.32.41/drivers/staging/et131x/et131x_adapter.h
34427 --- linux-2.6.32.41/drivers/staging/et131x/et131x_adapter.h 2011-03-27 14:31:47.000000000 -0400
34428 +++ linux-2.6.32.41/drivers/staging/et131x/et131x_adapter.h 2011-05-04 17:56:28.000000000 -0400
34429 @@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
34430 * operations
34431 */
34432 u32 unircv; /* # multicast packets received */
34433 - atomic_t unixmt; /* # multicast packets for Tx */
34434 + atomic_unchecked_t unixmt; /* # multicast packets for Tx */
34435 u32 multircv; /* # multicast packets received */
34436 - atomic_t multixmt; /* # multicast packets for Tx */
34437 + atomic_unchecked_t multixmt; /* # multicast packets for Tx */
34438 u32 brdcstrcv; /* # broadcast packets received */
34439 - atomic_t brdcstxmt; /* # broadcast packets for Tx */
34440 + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
34441 u32 norcvbuf; /* # Rx packets discarded */
34442 u32 noxmtbuf; /* # Tx packets discarded */
34443
34444 diff -urNp linux-2.6.32.41/drivers/staging/go7007/go7007-v4l2.c linux-2.6.32.41/drivers/staging/go7007/go7007-v4l2.c
34445 --- linux-2.6.32.41/drivers/staging/go7007/go7007-v4l2.c 2011-03-27 14:31:47.000000000 -0400
34446 +++ linux-2.6.32.41/drivers/staging/go7007/go7007-v4l2.c 2011-04-17 15:56:46.000000000 -0400
34447 @@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_are
34448 return 0;
34449 }
34450
34451 -static struct vm_operations_struct go7007_vm_ops = {
34452 +static const struct vm_operations_struct go7007_vm_ops = {
34453 .open = go7007_vm_open,
34454 .close = go7007_vm_close,
34455 .fault = go7007_vm_fault,
34456 diff -urNp linux-2.6.32.41/drivers/staging/hv/blkvsc_drv.c linux-2.6.32.41/drivers/staging/hv/blkvsc_drv.c
34457 --- linux-2.6.32.41/drivers/staging/hv/blkvsc_drv.c 2011-03-27 14:31:47.000000000 -0400
34458 +++ linux-2.6.32.41/drivers/staging/hv/blkvsc_drv.c 2011-04-17 15:56:46.000000000 -0400
34459 @@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKV
34460 /* The one and only one */
34461 static struct blkvsc_driver_context g_blkvsc_drv;
34462
34463 -static struct block_device_operations block_ops = {
34464 +static const struct block_device_operations block_ops = {
34465 .owner = THIS_MODULE,
34466 .open = blkvsc_open,
34467 .release = blkvsc_release,
34468 diff -urNp linux-2.6.32.41/drivers/staging/hv/Channel.c linux-2.6.32.41/drivers/staging/hv/Channel.c
34469 --- linux-2.6.32.41/drivers/staging/hv/Channel.c 2011-04-17 17:00:52.000000000 -0400
34470 +++ linux-2.6.32.41/drivers/staging/hv/Channel.c 2011-05-04 17:56:28.000000000 -0400
34471 @@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vm
34472
34473 DPRINT_ENTER(VMBUS);
34474
34475 - nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
34476 - atomic_inc(&gVmbusConnection.NextGpadlHandle);
34477 + nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
34478 + atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
34479
34480 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
34481 ASSERT(msgInfo != NULL);
34482 diff -urNp linux-2.6.32.41/drivers/staging/hv/Hv.c linux-2.6.32.41/drivers/staging/hv/Hv.c
34483 --- linux-2.6.32.41/drivers/staging/hv/Hv.c 2011-03-27 14:31:47.000000000 -0400
34484 +++ linux-2.6.32.41/drivers/staging/hv/Hv.c 2011-04-17 15:56:46.000000000 -0400
34485 @@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, vo
34486 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
34487 u32 outputAddressHi = outputAddress >> 32;
34488 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
34489 - volatile void *hypercallPage = gHvContext.HypercallPage;
34490 + volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
34491
34492 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
34493 Control, Input, Output);
34494 diff -urNp linux-2.6.32.41/drivers/staging/hv/vmbus_drv.c linux-2.6.32.41/drivers/staging/hv/vmbus_drv.c
34495 --- linux-2.6.32.41/drivers/staging/hv/vmbus_drv.c 2011-03-27 14:31:47.000000000 -0400
34496 +++ linux-2.6.32.41/drivers/staging/hv/vmbus_drv.c 2011-05-04 17:56:28.000000000 -0400
34497 @@ -532,7 +532,7 @@ static int vmbus_child_device_register(s
34498 to_device_context(root_device_obj);
34499 struct device_context *child_device_ctx =
34500 to_device_context(child_device_obj);
34501 - static atomic_t device_num = ATOMIC_INIT(0);
34502 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
34503
34504 DPRINT_ENTER(VMBUS_DRV);
34505
34506 @@ -541,7 +541,7 @@ static int vmbus_child_device_register(s
34507
34508 /* Set the device name. Otherwise, device_register() will fail. */
34509 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
34510 - atomic_inc_return(&device_num));
34511 + atomic_inc_return_unchecked(&device_num));
34512
34513 /* The new device belongs to this bus */
34514 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
34515 diff -urNp linux-2.6.32.41/drivers/staging/hv/VmbusPrivate.h linux-2.6.32.41/drivers/staging/hv/VmbusPrivate.h
34516 --- linux-2.6.32.41/drivers/staging/hv/VmbusPrivate.h 2011-04-17 17:00:52.000000000 -0400
34517 +++ linux-2.6.32.41/drivers/staging/hv/VmbusPrivate.h 2011-05-04 17:56:28.000000000 -0400
34518 @@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
34519 struct VMBUS_CONNECTION {
34520 enum VMBUS_CONNECT_STATE ConnectState;
34521
34522 - atomic_t NextGpadlHandle;
34523 + atomic_unchecked_t NextGpadlHandle;
34524
34525 /*
34526 * Represents channel interrupts. Each bit position represents a
34527 diff -urNp linux-2.6.32.41/drivers/staging/octeon/ethernet.c linux-2.6.32.41/drivers/staging/octeon/ethernet.c
34528 --- linux-2.6.32.41/drivers/staging/octeon/ethernet.c 2011-03-27 14:31:47.000000000 -0400
34529 +++ linux-2.6.32.41/drivers/staging/octeon/ethernet.c 2011-05-04 17:56:28.000000000 -0400
34530 @@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_
34531 * since the RX tasklet also increments it.
34532 */
34533 #ifdef CONFIG_64BIT
34534 - atomic64_add(rx_status.dropped_packets,
34535 - (atomic64_t *)&priv->stats.rx_dropped);
34536 + atomic64_add_unchecked(rx_status.dropped_packets,
34537 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
34538 #else
34539 - atomic_add(rx_status.dropped_packets,
34540 - (atomic_t *)&priv->stats.rx_dropped);
34541 + atomic_add_unchecked(rx_status.dropped_packets,
34542 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
34543 #endif
34544 }
34545
34546 diff -urNp linux-2.6.32.41/drivers/staging/octeon/ethernet-rx.c linux-2.6.32.41/drivers/staging/octeon/ethernet-rx.c
34547 --- linux-2.6.32.41/drivers/staging/octeon/ethernet-rx.c 2011-03-27 14:31:47.000000000 -0400
34548 +++ linux-2.6.32.41/drivers/staging/octeon/ethernet-rx.c 2011-05-04 17:56:28.000000000 -0400
34549 @@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long un
34550 /* Increment RX stats for virtual ports */
34551 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
34552 #ifdef CONFIG_64BIT
34553 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
34554 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
34555 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
34556 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
34557 #else
34558 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
34559 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
34560 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
34561 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
34562 #endif
34563 }
34564 netif_receive_skb(skb);
34565 @@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long un
34566 dev->name);
34567 */
34568 #ifdef CONFIG_64BIT
34569 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
34570 + atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
34571 #else
34572 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
34573 + atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
34574 #endif
34575 dev_kfree_skb_irq(skb);
34576 }
34577 diff -urNp linux-2.6.32.41/drivers/staging/panel/panel.c linux-2.6.32.41/drivers/staging/panel/panel.c
34578 --- linux-2.6.32.41/drivers/staging/panel/panel.c 2011-03-27 14:31:47.000000000 -0400
34579 +++ linux-2.6.32.41/drivers/staging/panel/panel.c 2011-04-17 15:56:46.000000000 -0400
34580 @@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *ino
34581 return 0;
34582 }
34583
34584 -static struct file_operations lcd_fops = {
34585 +static const struct file_operations lcd_fops = {
34586 .write = lcd_write,
34587 .open = lcd_open,
34588 .release = lcd_release,
34589 @@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *
34590 return 0;
34591 }
34592
34593 -static struct file_operations keypad_fops = {
34594 +static const struct file_operations keypad_fops = {
34595 .read = keypad_read, /* read */
34596 .open = keypad_open, /* open */
34597 .release = keypad_release, /* close */
34598 diff -urNp linux-2.6.32.41/drivers/staging/phison/phison.c linux-2.6.32.41/drivers/staging/phison/phison.c
34599 --- linux-2.6.32.41/drivers/staging/phison/phison.c 2011-03-27 14:31:47.000000000 -0400
34600 +++ linux-2.6.32.41/drivers/staging/phison/phison.c 2011-04-17 15:56:46.000000000 -0400
34601 @@ -43,7 +43,7 @@ static struct scsi_host_template phison_
34602 ATA_BMDMA_SHT(DRV_NAME),
34603 };
34604
34605 -static struct ata_port_operations phison_ops = {
34606 +static const struct ata_port_operations phison_ops = {
34607 .inherits = &ata_bmdma_port_ops,
34608 .prereset = phison_pre_reset,
34609 };
34610 diff -urNp linux-2.6.32.41/drivers/staging/poch/poch.c linux-2.6.32.41/drivers/staging/poch/poch.c
34611 --- linux-2.6.32.41/drivers/staging/poch/poch.c 2011-03-27 14:31:47.000000000 -0400
34612 +++ linux-2.6.32.41/drivers/staging/poch/poch.c 2011-04-17 15:56:46.000000000 -0400
34613 @@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inod
34614 return 0;
34615 }
34616
34617 -static struct file_operations poch_fops = {
34618 +static const struct file_operations poch_fops = {
34619 .owner = THIS_MODULE,
34620 .open = poch_open,
34621 .release = poch_release,
34622 diff -urNp linux-2.6.32.41/drivers/staging/pohmelfs/inode.c linux-2.6.32.41/drivers/staging/pohmelfs/inode.c
34623 --- linux-2.6.32.41/drivers/staging/pohmelfs/inode.c 2011-03-27 14:31:47.000000000 -0400
34624 +++ linux-2.6.32.41/drivers/staging/pohmelfs/inode.c 2011-05-04 17:56:20.000000000 -0400
34625 @@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct su
34626 mutex_init(&psb->mcache_lock);
34627 psb->mcache_root = RB_ROOT;
34628 psb->mcache_timeout = msecs_to_jiffies(5000);
34629 - atomic_long_set(&psb->mcache_gen, 0);
34630 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
34631
34632 psb->trans_max_pages = 100;
34633
34634 @@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct su
34635 INIT_LIST_HEAD(&psb->crypto_ready_list);
34636 INIT_LIST_HEAD(&psb->crypto_active_list);
34637
34638 - atomic_set(&psb->trans_gen, 1);
34639 + atomic_set_unchecked(&psb->trans_gen, 1);
34640 atomic_long_set(&psb->total_inodes, 0);
34641
34642 mutex_init(&psb->state_lock);
34643 diff -urNp linux-2.6.32.41/drivers/staging/pohmelfs/mcache.c linux-2.6.32.41/drivers/staging/pohmelfs/mcache.c
34644 --- linux-2.6.32.41/drivers/staging/pohmelfs/mcache.c 2011-03-27 14:31:47.000000000 -0400
34645 +++ linux-2.6.32.41/drivers/staging/pohmelfs/mcache.c 2011-04-17 15:56:46.000000000 -0400
34646 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
34647 m->data = data;
34648 m->start = start;
34649 m->size = size;
34650 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
34651 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
34652
34653 mutex_lock(&psb->mcache_lock);
34654 err = pohmelfs_mcache_insert(psb, m);
34655 diff -urNp linux-2.6.32.41/drivers/staging/pohmelfs/netfs.h linux-2.6.32.41/drivers/staging/pohmelfs/netfs.h
34656 --- linux-2.6.32.41/drivers/staging/pohmelfs/netfs.h 2011-03-27 14:31:47.000000000 -0400
34657 +++ linux-2.6.32.41/drivers/staging/pohmelfs/netfs.h 2011-05-04 17:56:20.000000000 -0400
34658 @@ -570,14 +570,14 @@ struct pohmelfs_config;
34659 struct pohmelfs_sb {
34660 struct rb_root mcache_root;
34661 struct mutex mcache_lock;
34662 - atomic_long_t mcache_gen;
34663 + atomic_long_unchecked_t mcache_gen;
34664 unsigned long mcache_timeout;
34665
34666 unsigned int idx;
34667
34668 unsigned int trans_retries;
34669
34670 - atomic_t trans_gen;
34671 + atomic_unchecked_t trans_gen;
34672
34673 unsigned int crypto_attached_size;
34674 unsigned int crypto_align_size;
34675 diff -urNp linux-2.6.32.41/drivers/staging/pohmelfs/trans.c linux-2.6.32.41/drivers/staging/pohmelfs/trans.c
34676 --- linux-2.6.32.41/drivers/staging/pohmelfs/trans.c 2011-03-27 14:31:47.000000000 -0400
34677 +++ linux-2.6.32.41/drivers/staging/pohmelfs/trans.c 2011-05-04 17:56:28.000000000 -0400
34678 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
34679 int err;
34680 struct netfs_cmd *cmd = t->iovec.iov_base;
34681
34682 - t->gen = atomic_inc_return(&psb->trans_gen);
34683 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
34684
34685 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
34686 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
34687 diff -urNp linux-2.6.32.41/drivers/staging/sep/sep_driver.c linux-2.6.32.41/drivers/staging/sep/sep_driver.c
34688 --- linux-2.6.32.41/drivers/staging/sep/sep_driver.c 2011-03-27 14:31:47.000000000 -0400
34689 +++ linux-2.6.32.41/drivers/staging/sep/sep_driver.c 2011-04-17 15:56:46.000000000 -0400
34690 @@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver
34691 static dev_t sep_devno;
34692
34693 /* the files operations structure of the driver */
34694 -static struct file_operations sep_file_operations = {
34695 +static const struct file_operations sep_file_operations = {
34696 .owner = THIS_MODULE,
34697 .ioctl = sep_ioctl,
34698 .poll = sep_poll,
34699 diff -urNp linux-2.6.32.41/drivers/staging/usbip/vhci.h linux-2.6.32.41/drivers/staging/usbip/vhci.h
34700 --- linux-2.6.32.41/drivers/staging/usbip/vhci.h 2011-03-27 14:31:47.000000000 -0400
34701 +++ linux-2.6.32.41/drivers/staging/usbip/vhci.h 2011-05-04 17:56:28.000000000 -0400
34702 @@ -92,7 +92,7 @@ struct vhci_hcd {
34703 unsigned resuming:1;
34704 unsigned long re_timeout;
34705
34706 - atomic_t seqnum;
34707 + atomic_unchecked_t seqnum;
34708
34709 /*
34710 * NOTE:
34711 diff -urNp linux-2.6.32.41/drivers/staging/usbip/vhci_hcd.c linux-2.6.32.41/drivers/staging/usbip/vhci_hcd.c
34712 --- linux-2.6.32.41/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:01.000000000 -0400
34713 +++ linux-2.6.32.41/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:33.000000000 -0400
34714 @@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
34715 return;
34716 }
34717
34718 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
34719 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
34720 if (priv->seqnum == 0xffff)
34721 usbip_uinfo("seqnum max\n");
34722
34723 @@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_h
34724 return -ENOMEM;
34725 }
34726
34727 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
34728 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
34729 if (unlink->seqnum == 0xffff)
34730 usbip_uinfo("seqnum max\n");
34731
34732 @@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hc
34733 vdev->rhport = rhport;
34734 }
34735
34736 - atomic_set(&vhci->seqnum, 0);
34737 + atomic_set_unchecked(&vhci->seqnum, 0);
34738 spin_lock_init(&vhci->lock);
34739
34740
34741 diff -urNp linux-2.6.32.41/drivers/staging/usbip/vhci_rx.c linux-2.6.32.41/drivers/staging/usbip/vhci_rx.c
34742 --- linux-2.6.32.41/drivers/staging/usbip/vhci_rx.c 2011-04-17 17:00:52.000000000 -0400
34743 +++ linux-2.6.32.41/drivers/staging/usbip/vhci_rx.c 2011-05-04 17:56:28.000000000 -0400
34744 @@ -78,7 +78,7 @@ static void vhci_recv_ret_submit(struct
34745 usbip_uerr("cannot find a urb of seqnum %u\n",
34746 pdu->base.seqnum);
34747 usbip_uinfo("max seqnum %d\n",
34748 - atomic_read(&the_controller->seqnum));
34749 + atomic_read_unchecked(&the_controller->seqnum));
34750 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
34751 return;
34752 }
34753 diff -urNp linux-2.6.32.41/drivers/staging/vme/devices/vme_user.c linux-2.6.32.41/drivers/staging/vme/devices/vme_user.c
34754 --- linux-2.6.32.41/drivers/staging/vme/devices/vme_user.c 2011-03-27 14:31:47.000000000 -0400
34755 +++ linux-2.6.32.41/drivers/staging/vme/devices/vme_user.c 2011-04-17 15:56:46.000000000 -0400
34756 @@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *
34757 static int __init vme_user_probe(struct device *, int, int);
34758 static int __exit vme_user_remove(struct device *, int, int);
34759
34760 -static struct file_operations vme_user_fops = {
34761 +static const struct file_operations vme_user_fops = {
34762 .open = vme_user_open,
34763 .release = vme_user_release,
34764 .read = vme_user_read,
34765 diff -urNp linux-2.6.32.41/drivers/telephony/ixj.c linux-2.6.32.41/drivers/telephony/ixj.c
34766 --- linux-2.6.32.41/drivers/telephony/ixj.c 2011-03-27 14:31:47.000000000 -0400
34767 +++ linux-2.6.32.41/drivers/telephony/ixj.c 2011-05-16 21:46:57.000000000 -0400
34768 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
34769 bool mContinue;
34770 char *pIn, *pOut;
34771
34772 + pax_track_stack();
34773 +
34774 if (!SCI_Prepare(j))
34775 return 0;
34776
34777 diff -urNp linux-2.6.32.41/drivers/uio/uio.c linux-2.6.32.41/drivers/uio/uio.c
34778 --- linux-2.6.32.41/drivers/uio/uio.c 2011-03-27 14:31:47.000000000 -0400
34779 +++ linux-2.6.32.41/drivers/uio/uio.c 2011-05-04 17:56:20.000000000 -0400
34780 @@ -23,6 +23,7 @@
34781 #include <linux/string.h>
34782 #include <linux/kobject.h>
34783 #include <linux/uio_driver.h>
34784 +#include <asm/local.h>
34785
34786 #define UIO_MAX_DEVICES 255
34787
34788 @@ -30,10 +31,10 @@ struct uio_device {
34789 struct module *owner;
34790 struct device *dev;
34791 int minor;
34792 - atomic_t event;
34793 + atomic_unchecked_t event;
34794 struct fasync_struct *async_queue;
34795 wait_queue_head_t wait;
34796 - int vma_count;
34797 + local_t vma_count;
34798 struct uio_info *info;
34799 struct kobject *map_dir;
34800 struct kobject *portio_dir;
34801 @@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobj
34802 return entry->show(mem, buf);
34803 }
34804
34805 -static struct sysfs_ops map_sysfs_ops = {
34806 +static const struct sysfs_ops map_sysfs_ops = {
34807 .show = map_type_show,
34808 };
34809
34810 @@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct k
34811 return entry->show(port, buf);
34812 }
34813
34814 -static struct sysfs_ops portio_sysfs_ops = {
34815 +static const struct sysfs_ops portio_sysfs_ops = {
34816 .show = portio_type_show,
34817 };
34818
34819 @@ -255,7 +256,7 @@ static ssize_t show_event(struct device
34820 struct uio_device *idev = dev_get_drvdata(dev);
34821 if (idev)
34822 return sprintf(buf, "%u\n",
34823 - (unsigned int)atomic_read(&idev->event));
34824 + (unsigned int)atomic_read_unchecked(&idev->event));
34825 else
34826 return -ENODEV;
34827 }
34828 @@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *i
34829 {
34830 struct uio_device *idev = info->uio_dev;
34831
34832 - atomic_inc(&idev->event);
34833 + atomic_inc_unchecked(&idev->event);
34834 wake_up_interruptible(&idev->wait);
34835 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
34836 }
34837 @@ -477,7 +478,7 @@ static int uio_open(struct inode *inode,
34838 }
34839
34840 listener->dev = idev;
34841 - listener->event_count = atomic_read(&idev->event);
34842 + listener->event_count = atomic_read_unchecked(&idev->event);
34843 filep->private_data = listener;
34844
34845 if (idev->info->open) {
34846 @@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file
34847 return -EIO;
34848
34849 poll_wait(filep, &idev->wait, wait);
34850 - if (listener->event_count != atomic_read(&idev->event))
34851 + if (listener->event_count != atomic_read_unchecked(&idev->event))
34852 return POLLIN | POLLRDNORM;
34853 return 0;
34854 }
34855 @@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *fil
34856 do {
34857 set_current_state(TASK_INTERRUPTIBLE);
34858
34859 - event_count = atomic_read(&idev->event);
34860 + event_count = atomic_read_unchecked(&idev->event);
34861 if (event_count != listener->event_count) {
34862 if (copy_to_user(buf, &event_count, count))
34863 retval = -EFAULT;
34864 @@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_
34865 static void uio_vma_open(struct vm_area_struct *vma)
34866 {
34867 struct uio_device *idev = vma->vm_private_data;
34868 - idev->vma_count++;
34869 + local_inc(&idev->vma_count);
34870 }
34871
34872 static void uio_vma_close(struct vm_area_struct *vma)
34873 {
34874 struct uio_device *idev = vma->vm_private_data;
34875 - idev->vma_count--;
34876 + local_dec(&idev->vma_count);
34877 }
34878
34879 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
34880 @@ -840,7 +841,7 @@ int __uio_register_device(struct module
34881 idev->owner = owner;
34882 idev->info = info;
34883 init_waitqueue_head(&idev->wait);
34884 - atomic_set(&idev->event, 0);
34885 + atomic_set_unchecked(&idev->event, 0);
34886
34887 ret = uio_get_minor(idev);
34888 if (ret)
34889 diff -urNp linux-2.6.32.41/drivers/usb/atm/usbatm.c linux-2.6.32.41/drivers/usb/atm/usbatm.c
34890 --- linux-2.6.32.41/drivers/usb/atm/usbatm.c 2011-03-27 14:31:47.000000000 -0400
34891 +++ linux-2.6.32.41/drivers/usb/atm/usbatm.c 2011-04-17 15:56:46.000000000 -0400
34892 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(stru
34893 if (printk_ratelimit())
34894 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
34895 __func__, vpi, vci);
34896 - atomic_inc(&vcc->stats->rx_err);
34897 + atomic_inc_unchecked(&vcc->stats->rx_err);
34898 return;
34899 }
34900
34901 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(stru
34902 if (length > ATM_MAX_AAL5_PDU) {
34903 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
34904 __func__, length, vcc);
34905 - atomic_inc(&vcc->stats->rx_err);
34906 + atomic_inc_unchecked(&vcc->stats->rx_err);
34907 goto out;
34908 }
34909
34910 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(stru
34911 if (sarb->len < pdu_length) {
34912 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
34913 __func__, pdu_length, sarb->len, vcc);
34914 - atomic_inc(&vcc->stats->rx_err);
34915 + atomic_inc_unchecked(&vcc->stats->rx_err);
34916 goto out;
34917 }
34918
34919 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
34920 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
34921 __func__, vcc);
34922 - atomic_inc(&vcc->stats->rx_err);
34923 + atomic_inc_unchecked(&vcc->stats->rx_err);
34924 goto out;
34925 }
34926
34927 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(stru
34928 if (printk_ratelimit())
34929 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
34930 __func__, length);
34931 - atomic_inc(&vcc->stats->rx_drop);
34932 + atomic_inc_unchecked(&vcc->stats->rx_drop);
34933 goto out;
34934 }
34935
34936 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(stru
34937
34938 vcc->push(vcc, skb);
34939
34940 - atomic_inc(&vcc->stats->rx);
34941 + atomic_inc_unchecked(&vcc->stats->rx);
34942 out:
34943 skb_trim(sarb, 0);
34944 }
34945 @@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned l
34946 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
34947
34948 usbatm_pop(vcc, skb);
34949 - atomic_inc(&vcc->stats->tx);
34950 + atomic_inc_unchecked(&vcc->stats->tx);
34951
34952 skb = skb_dequeue(&instance->sndqueue);
34953 }
34954 @@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct a
34955 if (!left--)
34956 return sprintf(page,
34957 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
34958 - atomic_read(&atm_dev->stats.aal5.tx),
34959 - atomic_read(&atm_dev->stats.aal5.tx_err),
34960 - atomic_read(&atm_dev->stats.aal5.rx),
34961 - atomic_read(&atm_dev->stats.aal5.rx_err),
34962 - atomic_read(&atm_dev->stats.aal5.rx_drop));
34963 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
34964 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
34965 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
34966 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
34967 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
34968
34969 if (!left--) {
34970 if (instance->disconnected)
34971 diff -urNp linux-2.6.32.41/drivers/usb/class/cdc-wdm.c linux-2.6.32.41/drivers/usb/class/cdc-wdm.c
34972 --- linux-2.6.32.41/drivers/usb/class/cdc-wdm.c 2011-03-27 14:31:47.000000000 -0400
34973 +++ linux-2.6.32.41/drivers/usb/class/cdc-wdm.c 2011-04-17 15:56:46.000000000 -0400
34974 @@ -314,7 +314,7 @@ static ssize_t wdm_write
34975 if (r < 0)
34976 goto outnp;
34977
34978 - if (!file->f_flags && O_NONBLOCK)
34979 + if (!(file->f_flags & O_NONBLOCK))
34980 r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
34981 &desc->flags));
34982 else
34983 diff -urNp linux-2.6.32.41/drivers/usb/core/hcd.c linux-2.6.32.41/drivers/usb/core/hcd.c
34984 --- linux-2.6.32.41/drivers/usb/core/hcd.c 2011-03-27 14:31:47.000000000 -0400
34985 +++ linux-2.6.32.41/drivers/usb/core/hcd.c 2011-04-17 15:56:46.000000000 -0400
34986 @@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutd
34987
34988 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
34989
34990 -struct usb_mon_operations *mon_ops;
34991 +const struct usb_mon_operations *mon_ops;
34992
34993 /*
34994 * The registration is unlocked.
34995 @@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
34996 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
34997 */
34998
34999 -int usb_mon_register (struct usb_mon_operations *ops)
35000 +int usb_mon_register (const struct usb_mon_operations *ops)
35001 {
35002
35003 if (mon_ops)
35004 diff -urNp linux-2.6.32.41/drivers/usb/core/hcd.h linux-2.6.32.41/drivers/usb/core/hcd.h
35005 --- linux-2.6.32.41/drivers/usb/core/hcd.h 2011-03-27 14:31:47.000000000 -0400
35006 +++ linux-2.6.32.41/drivers/usb/core/hcd.h 2011-04-17 15:56:46.000000000 -0400
35007 @@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) {
35008 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
35009
35010 struct usb_mon_operations {
35011 - void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
35012 - void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
35013 - void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
35014 + void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
35015 + void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
35016 + void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
35017 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
35018 };
35019
35020 -extern struct usb_mon_operations *mon_ops;
35021 +extern const struct usb_mon_operations *mon_ops;
35022
35023 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
35024 {
35025 @@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(s
35026 (*mon_ops->urb_complete)(bus, urb, status);
35027 }
35028
35029 -int usb_mon_register(struct usb_mon_operations *ops);
35030 +int usb_mon_register(const struct usb_mon_operations *ops);
35031 void usb_mon_deregister(void);
35032
35033 #else
35034 diff -urNp linux-2.6.32.41/drivers/usb/core/message.c linux-2.6.32.41/drivers/usb/core/message.c
35035 --- linux-2.6.32.41/drivers/usb/core/message.c 2011-03-27 14:31:47.000000000 -0400
35036 +++ linux-2.6.32.41/drivers/usb/core/message.c 2011-04-17 15:56:46.000000000 -0400
35037 @@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device
35038 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
35039 if (buf) {
35040 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
35041 - if (len > 0) {
35042 - smallbuf = kmalloc(++len, GFP_NOIO);
35043 + if (len++ > 0) {
35044 + smallbuf = kmalloc(len, GFP_NOIO);
35045 if (!smallbuf)
35046 return buf;
35047 memcpy(smallbuf, buf, len);
35048 diff -urNp linux-2.6.32.41/drivers/usb/misc/appledisplay.c linux-2.6.32.41/drivers/usb/misc/appledisplay.c
35049 --- linux-2.6.32.41/drivers/usb/misc/appledisplay.c 2011-03-27 14:31:47.000000000 -0400
35050 +++ linux-2.6.32.41/drivers/usb/misc/appledisplay.c 2011-04-17 15:56:46.000000000 -0400
35051 @@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightnes
35052 return pdata->msgdata[1];
35053 }
35054
35055 -static struct backlight_ops appledisplay_bl_data = {
35056 +static const struct backlight_ops appledisplay_bl_data = {
35057 .get_brightness = appledisplay_bl_get_brightness,
35058 .update_status = appledisplay_bl_update_status,
35059 };
35060 diff -urNp linux-2.6.32.41/drivers/usb/mon/mon_main.c linux-2.6.32.41/drivers/usb/mon/mon_main.c
35061 --- linux-2.6.32.41/drivers/usb/mon/mon_main.c 2011-03-27 14:31:47.000000000 -0400
35062 +++ linux-2.6.32.41/drivers/usb/mon/mon_main.c 2011-04-17 15:56:46.000000000 -0400
35063 @@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
35064 /*
35065 * Ops
35066 */
35067 -static struct usb_mon_operations mon_ops_0 = {
35068 +static const struct usb_mon_operations mon_ops_0 = {
35069 .urb_submit = mon_submit,
35070 .urb_submit_error = mon_submit_error,
35071 .urb_complete = mon_complete,
35072 diff -urNp linux-2.6.32.41/drivers/usb/wusbcore/wa-hc.h linux-2.6.32.41/drivers/usb/wusbcore/wa-hc.h
35073 --- linux-2.6.32.41/drivers/usb/wusbcore/wa-hc.h 2011-03-27 14:31:47.000000000 -0400
35074 +++ linux-2.6.32.41/drivers/usb/wusbcore/wa-hc.h 2011-05-04 17:56:28.000000000 -0400
35075 @@ -192,7 +192,7 @@ struct wahc {
35076 struct list_head xfer_delayed_list;
35077 spinlock_t xfer_list_lock;
35078 struct work_struct xfer_work;
35079 - atomic_t xfer_id_count;
35080 + atomic_unchecked_t xfer_id_count;
35081 };
35082
35083
35084 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
35085 INIT_LIST_HEAD(&wa->xfer_delayed_list);
35086 spin_lock_init(&wa->xfer_list_lock);
35087 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
35088 - atomic_set(&wa->xfer_id_count, 1);
35089 + atomic_set_unchecked(&wa->xfer_id_count, 1);
35090 }
35091
35092 /**
35093 diff -urNp linux-2.6.32.41/drivers/usb/wusbcore/wa-xfer.c linux-2.6.32.41/drivers/usb/wusbcore/wa-xfer.c
35094 --- linux-2.6.32.41/drivers/usb/wusbcore/wa-xfer.c 2011-03-27 14:31:47.000000000 -0400
35095 +++ linux-2.6.32.41/drivers/usb/wusbcore/wa-xfer.c 2011-05-04 17:56:28.000000000 -0400
35096 @@ -293,7 +293,7 @@ out:
35097 */
35098 static void wa_xfer_id_init(struct wa_xfer *xfer)
35099 {
35100 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
35101 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
35102 }
35103
35104 /*
35105 diff -urNp linux-2.6.32.41/drivers/uwb/wlp/messages.c linux-2.6.32.41/drivers/uwb/wlp/messages.c
35106 --- linux-2.6.32.41/drivers/uwb/wlp/messages.c 2011-03-27 14:31:47.000000000 -0400
35107 +++ linux-2.6.32.41/drivers/uwb/wlp/messages.c 2011-04-17 15:56:46.000000000 -0400
35108 @@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct
35109 size_t len = skb->len;
35110 size_t used;
35111 ssize_t result;
35112 - struct wlp_nonce enonce, rnonce;
35113 + struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
35114 enum wlp_assc_error assc_err;
35115 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
35116 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
35117 diff -urNp linux-2.6.32.41/drivers/uwb/wlp/sysfs.c linux-2.6.32.41/drivers/uwb/wlp/sysfs.c
35118 --- linux-2.6.32.41/drivers/uwb/wlp/sysfs.c 2011-03-27 14:31:47.000000000 -0400
35119 +++ linux-2.6.32.41/drivers/uwb/wlp/sysfs.c 2011-04-17 15:56:46.000000000 -0400
35120 @@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobjec
35121 return ret;
35122 }
35123
35124 -static
35125 -struct sysfs_ops wss_sysfs_ops = {
35126 +static const struct sysfs_ops wss_sysfs_ops = {
35127 .show = wlp_wss_attr_show,
35128 .store = wlp_wss_attr_store,
35129 };
35130 diff -urNp linux-2.6.32.41/drivers/video/atmel_lcdfb.c linux-2.6.32.41/drivers/video/atmel_lcdfb.c
35131 --- linux-2.6.32.41/drivers/video/atmel_lcdfb.c 2011-03-27 14:31:47.000000000 -0400
35132 +++ linux-2.6.32.41/drivers/video/atmel_lcdfb.c 2011-04-17 15:56:46.000000000 -0400
35133 @@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struc
35134 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
35135 }
35136
35137 -static struct backlight_ops atmel_lcdc_bl_ops = {
35138 +static const struct backlight_ops atmel_lcdc_bl_ops = {
35139 .update_status = atmel_bl_update_status,
35140 .get_brightness = atmel_bl_get_brightness,
35141 };
35142 diff -urNp linux-2.6.32.41/drivers/video/aty/aty128fb.c linux-2.6.32.41/drivers/video/aty/aty128fb.c
35143 --- linux-2.6.32.41/drivers/video/aty/aty128fb.c 2011-03-27 14:31:47.000000000 -0400
35144 +++ linux-2.6.32.41/drivers/video/aty/aty128fb.c 2011-04-17 15:56:46.000000000 -0400
35145 @@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(stru
35146 return bd->props.brightness;
35147 }
35148
35149 -static struct backlight_ops aty128_bl_data = {
35150 +static const struct backlight_ops aty128_bl_data = {
35151 .get_brightness = aty128_bl_get_brightness,
35152 .update_status = aty128_bl_update_status,
35153 };
35154 diff -urNp linux-2.6.32.41/drivers/video/aty/atyfb_base.c linux-2.6.32.41/drivers/video/aty/atyfb_base.c
35155 --- linux-2.6.32.41/drivers/video/aty/atyfb_base.c 2011-03-27 14:31:47.000000000 -0400
35156 +++ linux-2.6.32.41/drivers/video/aty/atyfb_base.c 2011-04-17 15:56:46.000000000 -0400
35157 @@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct
35158 return bd->props.brightness;
35159 }
35160
35161 -static struct backlight_ops aty_bl_data = {
35162 +static const struct backlight_ops aty_bl_data = {
35163 .get_brightness = aty_bl_get_brightness,
35164 .update_status = aty_bl_update_status,
35165 };
35166 diff -urNp linux-2.6.32.41/drivers/video/aty/radeon_backlight.c linux-2.6.32.41/drivers/video/aty/radeon_backlight.c
35167 --- linux-2.6.32.41/drivers/video/aty/radeon_backlight.c 2011-03-27 14:31:47.000000000 -0400
35168 +++ linux-2.6.32.41/drivers/video/aty/radeon_backlight.c 2011-04-17 15:56:46.000000000 -0400
35169 @@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(stru
35170 return bd->props.brightness;
35171 }
35172
35173 -static struct backlight_ops radeon_bl_data = {
35174 +static const struct backlight_ops radeon_bl_data = {
35175 .get_brightness = radeon_bl_get_brightness,
35176 .update_status = radeon_bl_update_status,
35177 };
35178 diff -urNp linux-2.6.32.41/drivers/video/backlight/adp5520_bl.c linux-2.6.32.41/drivers/video/backlight/adp5520_bl.c
35179 --- linux-2.6.32.41/drivers/video/backlight/adp5520_bl.c 2011-03-27 14:31:47.000000000 -0400
35180 +++ linux-2.6.32.41/drivers/video/backlight/adp5520_bl.c 2011-04-17 15:56:46.000000000 -0400
35181 @@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(str
35182 return error ? data->current_brightness : reg_val;
35183 }
35184
35185 -static struct backlight_ops adp5520_bl_ops = {
35186 +static const struct backlight_ops adp5520_bl_ops = {
35187 .update_status = adp5520_bl_update_status,
35188 .get_brightness = adp5520_bl_get_brightness,
35189 };
35190 diff -urNp linux-2.6.32.41/drivers/video/backlight/adx_bl.c linux-2.6.32.41/drivers/video/backlight/adx_bl.c
35191 --- linux-2.6.32.41/drivers/video/backlight/adx_bl.c 2011-03-27 14:31:47.000000000 -0400
35192 +++ linux-2.6.32.41/drivers/video/backlight/adx_bl.c 2011-04-17 15:56:46.000000000 -0400
35193 @@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct
35194 return 1;
35195 }
35196
35197 -static struct backlight_ops adx_backlight_ops = {
35198 +static const struct backlight_ops adx_backlight_ops = {
35199 .options = 0,
35200 .update_status = adx_backlight_update_status,
35201 .get_brightness = adx_backlight_get_brightness,
35202 diff -urNp linux-2.6.32.41/drivers/video/backlight/atmel-pwm-bl.c linux-2.6.32.41/drivers/video/backlight/atmel-pwm-bl.c
35203 --- linux-2.6.32.41/drivers/video/backlight/atmel-pwm-bl.c 2011-03-27 14:31:47.000000000 -0400
35204 +++ linux-2.6.32.41/drivers/video/backlight/atmel-pwm-bl.c 2011-04-17 15:56:46.000000000 -0400
35205 @@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct
35206 return pwm_channel_enable(&pwmbl->pwmc);
35207 }
35208
35209 -static struct backlight_ops atmel_pwm_bl_ops = {
35210 +static const struct backlight_ops atmel_pwm_bl_ops = {
35211 .get_brightness = atmel_pwm_bl_get_intensity,
35212 .update_status = atmel_pwm_bl_set_intensity,
35213 };
35214 diff -urNp linux-2.6.32.41/drivers/video/backlight/backlight.c linux-2.6.32.41/drivers/video/backlight/backlight.c
35215 --- linux-2.6.32.41/drivers/video/backlight/backlight.c 2011-03-27 14:31:47.000000000 -0400
35216 +++ linux-2.6.32.41/drivers/video/backlight/backlight.c 2011-04-17 15:56:46.000000000 -0400
35217 @@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
35218 * ERR_PTR() or a pointer to the newly allocated device.
35219 */
35220 struct backlight_device *backlight_device_register(const char *name,
35221 - struct device *parent, void *devdata, struct backlight_ops *ops)
35222 + struct device *parent, void *devdata, const struct backlight_ops *ops)
35223 {
35224 struct backlight_device *new_bd;
35225 int rc;
35226 diff -urNp linux-2.6.32.41/drivers/video/backlight/corgi_lcd.c linux-2.6.32.41/drivers/video/backlight/corgi_lcd.c
35227 --- linux-2.6.32.41/drivers/video/backlight/corgi_lcd.c 2011-03-27 14:31:47.000000000 -0400
35228 +++ linux-2.6.32.41/drivers/video/backlight/corgi_lcd.c 2011-04-17 15:56:46.000000000 -0400
35229 @@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit
35230 }
35231 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
35232
35233 -static struct backlight_ops corgi_bl_ops = {
35234 +static const struct backlight_ops corgi_bl_ops = {
35235 .get_brightness = corgi_bl_get_intensity,
35236 .update_status = corgi_bl_update_status,
35237 };
35238 diff -urNp linux-2.6.32.41/drivers/video/backlight/cr_bllcd.c linux-2.6.32.41/drivers/video/backlight/cr_bllcd.c
35239 --- linux-2.6.32.41/drivers/video/backlight/cr_bllcd.c 2011-03-27 14:31:47.000000000 -0400
35240 +++ linux-2.6.32.41/drivers/video/backlight/cr_bllcd.c 2011-04-17 15:56:46.000000000 -0400
35241 @@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(st
35242 return intensity;
35243 }
35244
35245 -static struct backlight_ops cr_backlight_ops = {
35246 +static const struct backlight_ops cr_backlight_ops = {
35247 .get_brightness = cr_backlight_get_intensity,
35248 .update_status = cr_backlight_set_intensity,
35249 };
35250 diff -urNp linux-2.6.32.41/drivers/video/backlight/da903x_bl.c linux-2.6.32.41/drivers/video/backlight/da903x_bl.c
35251 --- linux-2.6.32.41/drivers/video/backlight/da903x_bl.c 2011-03-27 14:31:47.000000000 -0400
35252 +++ linux-2.6.32.41/drivers/video/backlight/da903x_bl.c 2011-04-17 15:56:46.000000000 -0400
35253 @@ -94,7 +94,7 @@ static int da903x_backlight_get_brightne
35254 return data->current_brightness;
35255 }
35256
35257 -static struct backlight_ops da903x_backlight_ops = {
35258 +static const struct backlight_ops da903x_backlight_ops = {
35259 .update_status = da903x_backlight_update_status,
35260 .get_brightness = da903x_backlight_get_brightness,
35261 };
35262 diff -urNp linux-2.6.32.41/drivers/video/backlight/generic_bl.c linux-2.6.32.41/drivers/video/backlight/generic_bl.c
35263 --- linux-2.6.32.41/drivers/video/backlight/generic_bl.c 2011-03-27 14:31:47.000000000 -0400
35264 +++ linux-2.6.32.41/drivers/video/backlight/generic_bl.c 2011-04-17 15:56:46.000000000 -0400
35265 @@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
35266 }
35267 EXPORT_SYMBOL(corgibl_limit_intensity);
35268
35269 -static struct backlight_ops genericbl_ops = {
35270 +static const struct backlight_ops genericbl_ops = {
35271 .options = BL_CORE_SUSPENDRESUME,
35272 .get_brightness = genericbl_get_intensity,
35273 .update_status = genericbl_send_intensity,
35274 diff -urNp linux-2.6.32.41/drivers/video/backlight/hp680_bl.c linux-2.6.32.41/drivers/video/backlight/hp680_bl.c
35275 --- linux-2.6.32.41/drivers/video/backlight/hp680_bl.c 2011-03-27 14:31:47.000000000 -0400
35276 +++ linux-2.6.32.41/drivers/video/backlight/hp680_bl.c 2011-04-17 15:56:46.000000000 -0400
35277 @@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct
35278 return current_intensity;
35279 }
35280
35281 -static struct backlight_ops hp680bl_ops = {
35282 +static const struct backlight_ops hp680bl_ops = {
35283 .get_brightness = hp680bl_get_intensity,
35284 .update_status = hp680bl_set_intensity,
35285 };
35286 diff -urNp linux-2.6.32.41/drivers/video/backlight/jornada720_bl.c linux-2.6.32.41/drivers/video/backlight/jornada720_bl.c
35287 --- linux-2.6.32.41/drivers/video/backlight/jornada720_bl.c 2011-03-27 14:31:47.000000000 -0400
35288 +++ linux-2.6.32.41/drivers/video/backlight/jornada720_bl.c 2011-04-17 15:56:46.000000000 -0400
35289 @@ -93,7 +93,7 @@ out:
35290 return ret;
35291 }
35292
35293 -static struct backlight_ops jornada_bl_ops = {
35294 +static const struct backlight_ops jornada_bl_ops = {
35295 .get_brightness = jornada_bl_get_brightness,
35296 .update_status = jornada_bl_update_status,
35297 .options = BL_CORE_SUSPENDRESUME,
35298 diff -urNp linux-2.6.32.41/drivers/video/backlight/kb3886_bl.c linux-2.6.32.41/drivers/video/backlight/kb3886_bl.c
35299 --- linux-2.6.32.41/drivers/video/backlight/kb3886_bl.c 2011-03-27 14:31:47.000000000 -0400
35300 +++ linux-2.6.32.41/drivers/video/backlight/kb3886_bl.c 2011-04-17 15:56:46.000000000 -0400
35301 @@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct
35302 return kb3886bl_intensity;
35303 }
35304
35305 -static struct backlight_ops kb3886bl_ops = {
35306 +static const struct backlight_ops kb3886bl_ops = {
35307 .get_brightness = kb3886bl_get_intensity,
35308 .update_status = kb3886bl_send_intensity,
35309 };
35310 diff -urNp linux-2.6.32.41/drivers/video/backlight/locomolcd.c linux-2.6.32.41/drivers/video/backlight/locomolcd.c
35311 --- linux-2.6.32.41/drivers/video/backlight/locomolcd.c 2011-03-27 14:31:47.000000000 -0400
35312 +++ linux-2.6.32.41/drivers/video/backlight/locomolcd.c 2011-04-17 15:56:46.000000000 -0400
35313 @@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struc
35314 return current_intensity;
35315 }
35316
35317 -static struct backlight_ops locomobl_data = {
35318 +static const struct backlight_ops locomobl_data = {
35319 .get_brightness = locomolcd_get_intensity,
35320 .update_status = locomolcd_set_intensity,
35321 };
35322 diff -urNp linux-2.6.32.41/drivers/video/backlight/mbp_nvidia_bl.c linux-2.6.32.41/drivers/video/backlight/mbp_nvidia_bl.c
35323 --- linux-2.6.32.41/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:01.000000000 -0400
35324 +++ linux-2.6.32.41/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:33.000000000 -0400
35325 @@ -33,7 +33,7 @@ struct dmi_match_data {
35326 unsigned long iostart;
35327 unsigned long iolen;
35328 /* Backlight operations structure. */
35329 - struct backlight_ops backlight_ops;
35330 + const struct backlight_ops backlight_ops;
35331 };
35332
35333 /* Module parameters. */
35334 diff -urNp linux-2.6.32.41/drivers/video/backlight/omap1_bl.c linux-2.6.32.41/drivers/video/backlight/omap1_bl.c
35335 --- linux-2.6.32.41/drivers/video/backlight/omap1_bl.c 2011-03-27 14:31:47.000000000 -0400
35336 +++ linux-2.6.32.41/drivers/video/backlight/omap1_bl.c 2011-04-17 15:56:46.000000000 -0400
35337 @@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct b
35338 return bl->current_intensity;
35339 }
35340
35341 -static struct backlight_ops omapbl_ops = {
35342 +static const struct backlight_ops omapbl_ops = {
35343 .get_brightness = omapbl_get_intensity,
35344 .update_status = omapbl_update_status,
35345 };
35346 diff -urNp linux-2.6.32.41/drivers/video/backlight/progear_bl.c linux-2.6.32.41/drivers/video/backlight/progear_bl.c
35347 --- linux-2.6.32.41/drivers/video/backlight/progear_bl.c 2011-03-27 14:31:47.000000000 -0400
35348 +++ linux-2.6.32.41/drivers/video/backlight/progear_bl.c 2011-04-17 15:56:46.000000000 -0400
35349 @@ -54,7 +54,7 @@ static int progearbl_get_intensity(struc
35350 return intensity - HW_LEVEL_MIN;
35351 }
35352
35353 -static struct backlight_ops progearbl_ops = {
35354 +static const struct backlight_ops progearbl_ops = {
35355 .get_brightness = progearbl_get_intensity,
35356 .update_status = progearbl_set_intensity,
35357 };
35358 diff -urNp linux-2.6.32.41/drivers/video/backlight/pwm_bl.c linux-2.6.32.41/drivers/video/backlight/pwm_bl.c
35359 --- linux-2.6.32.41/drivers/video/backlight/pwm_bl.c 2011-03-27 14:31:47.000000000 -0400
35360 +++ linux-2.6.32.41/drivers/video/backlight/pwm_bl.c 2011-04-17 15:56:46.000000000 -0400
35361 @@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(
35362 return bl->props.brightness;
35363 }
35364
35365 -static struct backlight_ops pwm_backlight_ops = {
35366 +static const struct backlight_ops pwm_backlight_ops = {
35367 .update_status = pwm_backlight_update_status,
35368 .get_brightness = pwm_backlight_get_brightness,
35369 };
35370 diff -urNp linux-2.6.32.41/drivers/video/backlight/tosa_bl.c linux-2.6.32.41/drivers/video/backlight/tosa_bl.c
35371 --- linux-2.6.32.41/drivers/video/backlight/tosa_bl.c 2011-03-27 14:31:47.000000000 -0400
35372 +++ linux-2.6.32.41/drivers/video/backlight/tosa_bl.c 2011-04-17 15:56:46.000000000 -0400
35373 @@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct
35374 return props->brightness;
35375 }
35376
35377 -static struct backlight_ops bl_ops = {
35378 +static const struct backlight_ops bl_ops = {
35379 .get_brightness = tosa_bl_get_brightness,
35380 .update_status = tosa_bl_update_status,
35381 };
35382 diff -urNp linux-2.6.32.41/drivers/video/backlight/wm831x_bl.c linux-2.6.32.41/drivers/video/backlight/wm831x_bl.c
35383 --- linux-2.6.32.41/drivers/video/backlight/wm831x_bl.c 2011-03-27 14:31:47.000000000 -0400
35384 +++ linux-2.6.32.41/drivers/video/backlight/wm831x_bl.c 2011-04-17 15:56:46.000000000 -0400
35385 @@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightne
35386 return data->current_brightness;
35387 }
35388
35389 -static struct backlight_ops wm831x_backlight_ops = {
35390 +static const struct backlight_ops wm831x_backlight_ops = {
35391 .options = BL_CORE_SUSPENDRESUME,
35392 .update_status = wm831x_backlight_update_status,
35393 .get_brightness = wm831x_backlight_get_brightness,
35394 diff -urNp linux-2.6.32.41/drivers/video/bf54x-lq043fb.c linux-2.6.32.41/drivers/video/bf54x-lq043fb.c
35395 --- linux-2.6.32.41/drivers/video/bf54x-lq043fb.c 2011-03-27 14:31:47.000000000 -0400
35396 +++ linux-2.6.32.41/drivers/video/bf54x-lq043fb.c 2011-04-17 15:56:46.000000000 -0400
35397 @@ -463,7 +463,7 @@ static int bl_get_brightness(struct back
35398 return 0;
35399 }
35400
35401 -static struct backlight_ops bfin_lq043fb_bl_ops = {
35402 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
35403 .get_brightness = bl_get_brightness,
35404 };
35405
35406 diff -urNp linux-2.6.32.41/drivers/video/bfin-t350mcqb-fb.c linux-2.6.32.41/drivers/video/bfin-t350mcqb-fb.c
35407 --- linux-2.6.32.41/drivers/video/bfin-t350mcqb-fb.c 2011-03-27 14:31:47.000000000 -0400
35408 +++ linux-2.6.32.41/drivers/video/bfin-t350mcqb-fb.c 2011-04-17 15:56:46.000000000 -0400
35409 @@ -381,7 +381,7 @@ static int bl_get_brightness(struct back
35410 return 0;
35411 }
35412
35413 -static struct backlight_ops bfin_lq043fb_bl_ops = {
35414 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
35415 .get_brightness = bl_get_brightness,
35416 };
35417
35418 diff -urNp linux-2.6.32.41/drivers/video/fbcmap.c linux-2.6.32.41/drivers/video/fbcmap.c
35419 --- linux-2.6.32.41/drivers/video/fbcmap.c 2011-03-27 14:31:47.000000000 -0400
35420 +++ linux-2.6.32.41/drivers/video/fbcmap.c 2011-04-17 15:56:46.000000000 -0400
35421 @@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user
35422 rc = -ENODEV;
35423 goto out;
35424 }
35425 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
35426 - !info->fbops->fb_setcmap)) {
35427 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
35428 rc = -EINVAL;
35429 goto out1;
35430 }
35431 diff -urNp linux-2.6.32.41/drivers/video/fbmem.c linux-2.6.32.41/drivers/video/fbmem.c
35432 --- linux-2.6.32.41/drivers/video/fbmem.c 2011-03-27 14:31:47.000000000 -0400
35433 +++ linux-2.6.32.41/drivers/video/fbmem.c 2011-05-16 21:46:57.000000000 -0400
35434 @@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_in
35435 image->dx += image->width + 8;
35436 }
35437 } else if (rotate == FB_ROTATE_UD) {
35438 - for (x = 0; x < num && image->dx >= 0; x++) {
35439 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
35440 info->fbops->fb_imageblit(info, image);
35441 image->dx -= image->width + 8;
35442 }
35443 @@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_in
35444 image->dy += image->height + 8;
35445 }
35446 } else if (rotate == FB_ROTATE_CCW) {
35447 - for (x = 0; x < num && image->dy >= 0; x++) {
35448 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
35449 info->fbops->fb_imageblit(info, image);
35450 image->dy -= image->height + 8;
35451 }
35452 @@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct
35453 int flags = info->flags;
35454 int ret = 0;
35455
35456 + pax_track_stack();
35457 +
35458 if (var->activate & FB_ACTIVATE_INV_MODE) {
35459 struct fb_videomode mode1, mode2;
35460
35461 @@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *
35462 void __user *argp = (void __user *)arg;
35463 long ret = 0;
35464
35465 + pax_track_stack();
35466 +
35467 switch (cmd) {
35468 case FBIOGET_VSCREENINFO:
35469 if (!lock_fb_info(info))
35470 @@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *
35471 return -EFAULT;
35472 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
35473 return -EINVAL;
35474 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
35475 + if (con2fb.framebuffer >= FB_MAX)
35476 return -EINVAL;
35477 if (!registered_fb[con2fb.framebuffer])
35478 request_module("fb%d", con2fb.framebuffer);
35479 diff -urNp linux-2.6.32.41/drivers/video/i810/i810_accel.c linux-2.6.32.41/drivers/video/i810/i810_accel.c
35480 --- linux-2.6.32.41/drivers/video/i810/i810_accel.c 2011-03-27 14:31:47.000000000 -0400
35481 +++ linux-2.6.32.41/drivers/video/i810/i810_accel.c 2011-04-17 15:56:46.000000000 -0400
35482 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct
35483 }
35484 }
35485 printk("ringbuffer lockup!!!\n");
35486 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
35487 i810_report_error(mmio);
35488 par->dev_flags |= LOCKUP;
35489 info->pixmap.scan_align = 1;
35490 diff -urNp linux-2.6.32.41/drivers/video/nvidia/nv_backlight.c linux-2.6.32.41/drivers/video/nvidia/nv_backlight.c
35491 --- linux-2.6.32.41/drivers/video/nvidia/nv_backlight.c 2011-03-27 14:31:47.000000000 -0400
35492 +++ linux-2.6.32.41/drivers/video/nvidia/nv_backlight.c 2011-04-17 15:56:46.000000000 -0400
35493 @@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(stru
35494 return bd->props.brightness;
35495 }
35496
35497 -static struct backlight_ops nvidia_bl_ops = {
35498 +static const struct backlight_ops nvidia_bl_ops = {
35499 .get_brightness = nvidia_bl_get_brightness,
35500 .update_status = nvidia_bl_update_status,
35501 };
35502 diff -urNp linux-2.6.32.41/drivers/video/riva/fbdev.c linux-2.6.32.41/drivers/video/riva/fbdev.c
35503 --- linux-2.6.32.41/drivers/video/riva/fbdev.c 2011-03-27 14:31:47.000000000 -0400
35504 +++ linux-2.6.32.41/drivers/video/riva/fbdev.c 2011-04-17 15:56:46.000000000 -0400
35505 @@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct
35506 return bd->props.brightness;
35507 }
35508
35509 -static struct backlight_ops riva_bl_ops = {
35510 +static const struct backlight_ops riva_bl_ops = {
35511 .get_brightness = riva_bl_get_brightness,
35512 .update_status = riva_bl_update_status,
35513 };
35514 diff -urNp linux-2.6.32.41/drivers/video/uvesafb.c linux-2.6.32.41/drivers/video/uvesafb.c
35515 --- linux-2.6.32.41/drivers/video/uvesafb.c 2011-03-27 14:31:47.000000000 -0400
35516 +++ linux-2.6.32.41/drivers/video/uvesafb.c 2011-04-17 15:56:46.000000000 -0400
35517 @@ -18,6 +18,7 @@
35518 #include <linux/fb.h>
35519 #include <linux/io.h>
35520 #include <linux/mutex.h>
35521 +#include <linux/moduleloader.h>
35522 #include <video/edid.h>
35523 #include <video/uvesafb.h>
35524 #ifdef CONFIG_X86
35525 @@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
35526 NULL,
35527 };
35528
35529 - return call_usermodehelper(v86d_path, argv, envp, 1);
35530 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
35531 }
35532
35533 /*
35534 @@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(
35535 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
35536 par->pmi_setpal = par->ypan = 0;
35537 } else {
35538 +
35539 +#ifdef CONFIG_PAX_KERNEXEC
35540 +#ifdef CONFIG_MODULES
35541 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
35542 +#endif
35543 + if (!par->pmi_code) {
35544 + par->pmi_setpal = par->ypan = 0;
35545 + return 0;
35546 + }
35547 +#endif
35548 +
35549 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
35550 + task->t.regs.edi);
35551 +
35552 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35553 + pax_open_kernel();
35554 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
35555 + pax_close_kernel();
35556 +
35557 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
35558 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
35559 +#else
35560 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
35561 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
35562 +#endif
35563 +
35564 printk(KERN_INFO "uvesafb: protected mode interface info at "
35565 "%04x:%04x\n",
35566 (u16)task->t.regs.es, (u16)task->t.regs.edi);
35567 @@ -1799,6 +1822,11 @@ out:
35568 if (par->vbe_modes)
35569 kfree(par->vbe_modes);
35570
35571 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35572 + if (par->pmi_code)
35573 + module_free_exec(NULL, par->pmi_code);
35574 +#endif
35575 +
35576 framebuffer_release(info);
35577 return err;
35578 }
35579 @@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platfor
35580 kfree(par->vbe_state_orig);
35581 if (par->vbe_state_saved)
35582 kfree(par->vbe_state_saved);
35583 +
35584 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35585 + if (par->pmi_code)
35586 + module_free_exec(NULL, par->pmi_code);
35587 +#endif
35588 +
35589 }
35590
35591 framebuffer_release(info);
35592 diff -urNp linux-2.6.32.41/drivers/video/vesafb.c linux-2.6.32.41/drivers/video/vesafb.c
35593 --- linux-2.6.32.41/drivers/video/vesafb.c 2011-03-27 14:31:47.000000000 -0400
35594 +++ linux-2.6.32.41/drivers/video/vesafb.c 2011-04-17 15:56:46.000000000 -0400
35595 @@ -9,6 +9,7 @@
35596 */
35597
35598 #include <linux/module.h>
35599 +#include <linux/moduleloader.h>
35600 #include <linux/kernel.h>
35601 #include <linux/errno.h>
35602 #include <linux/string.h>
35603 @@ -53,8 +54,8 @@ static int vram_remap __initdata; /*
35604 static int vram_total __initdata; /* Set total amount of memory */
35605 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
35606 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
35607 -static void (*pmi_start)(void) __read_mostly;
35608 -static void (*pmi_pal) (void) __read_mostly;
35609 +static void (*pmi_start)(void) __read_only;
35610 +static void (*pmi_pal) (void) __read_only;
35611 static int depth __read_mostly;
35612 static int vga_compat __read_mostly;
35613 /* --------------------------------------------------------------------- */
35614 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
35615 unsigned int size_vmode;
35616 unsigned int size_remap;
35617 unsigned int size_total;
35618 + void *pmi_code = NULL;
35619
35620 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
35621 return -ENODEV;
35622 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
35623 size_remap = size_total;
35624 vesafb_fix.smem_len = size_remap;
35625
35626 -#ifndef __i386__
35627 - screen_info.vesapm_seg = 0;
35628 -#endif
35629 -
35630 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
35631 printk(KERN_WARNING
35632 "vesafb: cannot reserve video memory at 0x%lx\n",
35633 @@ -315,9 +313,21 @@ static int __init vesafb_probe(struct pl
35634 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
35635 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
35636
35637 +#ifdef __i386__
35638 +
35639 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35640 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
35641 + if (!pmi_code)
35642 +#elif !defined(CONFIG_PAX_KERNEXEC)
35643 + if (0)
35644 +#endif
35645 +
35646 +#endif
35647 + screen_info.vesapm_seg = 0;
35648 +
35649 if (screen_info.vesapm_seg) {
35650 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
35651 - screen_info.vesapm_seg,screen_info.vesapm_off);
35652 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
35653 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
35654 }
35655
35656 if (screen_info.vesapm_seg < 0xc000)
35657 @@ -325,9 +335,25 @@ static int __init vesafb_probe(struct pl
35658
35659 if (ypan || pmi_setpal) {
35660 unsigned short *pmi_base;
35661 - pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
35662 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
35663 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
35664 +
35665 + pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
35666 +
35667 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35668 + pax_open_kernel();
35669 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
35670 +#else
35671 + pmi_code = pmi_base;
35672 +#endif
35673 +
35674 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
35675 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
35676 +
35677 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35678 + pmi_start = ktva_ktla(pmi_start);
35679 + pmi_pal = ktva_ktla(pmi_pal);
35680 + pax_close_kernel();
35681 +#endif
35682 +
35683 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
35684 if (pmi_base[3]) {
35685 printk(KERN_INFO "vesafb: pmi: ports = ");
35686 @@ -469,6 +495,11 @@ static int __init vesafb_probe(struct pl
35687 info->node, info->fix.id);
35688 return 0;
35689 err:
35690 +
35691 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35692 + module_free_exec(NULL, pmi_code);
35693 +#endif
35694 +
35695 if (info->screen_base)
35696 iounmap(info->screen_base);
35697 framebuffer_release(info);
35698 diff -urNp linux-2.6.32.41/drivers/xen/sys-hypervisor.c linux-2.6.32.41/drivers/xen/sys-hypervisor.c
35699 --- linux-2.6.32.41/drivers/xen/sys-hypervisor.c 2011-03-27 14:31:47.000000000 -0400
35700 +++ linux-2.6.32.41/drivers/xen/sys-hypervisor.c 2011-04-17 15:56:46.000000000 -0400
35701 @@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct ko
35702 return 0;
35703 }
35704
35705 -static struct sysfs_ops hyp_sysfs_ops = {
35706 +static const struct sysfs_ops hyp_sysfs_ops = {
35707 .show = hyp_sysfs_show,
35708 .store = hyp_sysfs_store,
35709 };
35710 diff -urNp linux-2.6.32.41/fs/9p/vfs_inode.c linux-2.6.32.41/fs/9p/vfs_inode.c
35711 --- linux-2.6.32.41/fs/9p/vfs_inode.c 2011-03-27 14:31:47.000000000 -0400
35712 +++ linux-2.6.32.41/fs/9p/vfs_inode.c 2011-04-17 15:56:46.000000000 -0400
35713 @@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct
35714 static void
35715 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
35716 {
35717 - char *s = nd_get_link(nd);
35718 + const char *s = nd_get_link(nd);
35719
35720 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
35721 IS_ERR(s) ? "<error>" : s);
35722 diff -urNp linux-2.6.32.41/fs/aio.c linux-2.6.32.41/fs/aio.c
35723 --- linux-2.6.32.41/fs/aio.c 2011-03-27 14:31:47.000000000 -0400
35724 +++ linux-2.6.32.41/fs/aio.c 2011-06-04 20:40:21.000000000 -0400
35725 @@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx
35726 size += sizeof(struct io_event) * nr_events;
35727 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
35728
35729 - if (nr_pages < 0)
35730 + if (nr_pages <= 0)
35731 return -EINVAL;
35732
35733 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
35734 @@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ct
35735 struct aio_timeout to;
35736 int retry = 0;
35737
35738 + pax_track_stack();
35739 +
35740 /* needed to zero any padding within an entry (there shouldn't be
35741 * any, but C is fun!
35742 */
35743 @@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *i
35744 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
35745 {
35746 ssize_t ret;
35747 + struct iovec iovstack;
35748
35749 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
35750 kiocb->ki_nbytes, 1,
35751 - &kiocb->ki_inline_vec, &kiocb->ki_iovec);
35752 + &iovstack, &kiocb->ki_iovec);
35753 if (ret < 0)
35754 goto out;
35755
35756 + if (kiocb->ki_iovec == &iovstack) {
35757 + kiocb->ki_inline_vec = iovstack;
35758 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
35759 + }
35760 kiocb->ki_nr_segs = kiocb->ki_nbytes;
35761 kiocb->ki_cur_seg = 0;
35762 /* ki_nbytes/left now reflect bytes instead of segs */
35763 diff -urNp linux-2.6.32.41/fs/attr.c linux-2.6.32.41/fs/attr.c
35764 --- linux-2.6.32.41/fs/attr.c 2011-03-27 14:31:47.000000000 -0400
35765 +++ linux-2.6.32.41/fs/attr.c 2011-04-17 15:56:46.000000000 -0400
35766 @@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode
35767 unsigned long limit;
35768
35769 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
35770 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
35771 if (limit != RLIM_INFINITY && offset > limit)
35772 goto out_sig;
35773 if (offset > inode->i_sb->s_maxbytes)
35774 diff -urNp linux-2.6.32.41/fs/autofs/root.c linux-2.6.32.41/fs/autofs/root.c
35775 --- linux-2.6.32.41/fs/autofs/root.c 2011-03-27 14:31:47.000000000 -0400
35776 +++ linux-2.6.32.41/fs/autofs/root.c 2011-04-17 15:56:46.000000000 -0400
35777 @@ -299,7 +299,8 @@ static int autofs_root_symlink(struct in
35778 set_bit(n,sbi->symlink_bitmap);
35779 sl = &sbi->symlink[n];
35780 sl->len = strlen(symname);
35781 - sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
35782 + slsize = sl->len+1;
35783 + sl->data = kmalloc(slsize, GFP_KERNEL);
35784 if (!sl->data) {
35785 clear_bit(n,sbi->symlink_bitmap);
35786 unlock_kernel();
35787 diff -urNp linux-2.6.32.41/fs/autofs4/symlink.c linux-2.6.32.41/fs/autofs4/symlink.c
35788 --- linux-2.6.32.41/fs/autofs4/symlink.c 2011-03-27 14:31:47.000000000 -0400
35789 +++ linux-2.6.32.41/fs/autofs4/symlink.c 2011-04-17 15:56:46.000000000 -0400
35790 @@ -15,7 +15,7 @@
35791 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
35792 {
35793 struct autofs_info *ino = autofs4_dentry_ino(dentry);
35794 - nd_set_link(nd, (char *)ino->u.symlink);
35795 + nd_set_link(nd, ino->u.symlink);
35796 return NULL;
35797 }
35798
35799 diff -urNp linux-2.6.32.41/fs/befs/linuxvfs.c linux-2.6.32.41/fs/befs/linuxvfs.c
35800 --- linux-2.6.32.41/fs/befs/linuxvfs.c 2011-03-27 14:31:47.000000000 -0400
35801 +++ linux-2.6.32.41/fs/befs/linuxvfs.c 2011-04-17 15:56:46.000000000 -0400
35802 @@ -493,7 +493,7 @@ static void befs_put_link(struct dentry
35803 {
35804 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
35805 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
35806 - char *link = nd_get_link(nd);
35807 + const char *link = nd_get_link(nd);
35808 if (!IS_ERR(link))
35809 kfree(link);
35810 }
35811 diff -urNp linux-2.6.32.41/fs/binfmt_aout.c linux-2.6.32.41/fs/binfmt_aout.c
35812 --- linux-2.6.32.41/fs/binfmt_aout.c 2011-03-27 14:31:47.000000000 -0400
35813 +++ linux-2.6.32.41/fs/binfmt_aout.c 2011-04-17 15:56:46.000000000 -0400
35814 @@ -16,6 +16,7 @@
35815 #include <linux/string.h>
35816 #include <linux/fs.h>
35817 #include <linux/file.h>
35818 +#include <linux/security.h>
35819 #include <linux/stat.h>
35820 #include <linux/fcntl.h>
35821 #include <linux/ptrace.h>
35822 @@ -102,6 +103,8 @@ static int aout_core_dump(long signr, st
35823 #endif
35824 # define START_STACK(u) (u.start_stack)
35825
35826 + memset(&dump, 0, sizeof(dump));
35827 +
35828 fs = get_fs();
35829 set_fs(KERNEL_DS);
35830 has_dumped = 1;
35831 @@ -113,10 +116,12 @@ static int aout_core_dump(long signr, st
35832
35833 /* If the size of the dump file exceeds the rlimit, then see what would happen
35834 if we wrote the stack, but not the data area. */
35835 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
35836 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
35837 dump.u_dsize = 0;
35838
35839 /* Make sure we have enough room to write the stack and data areas. */
35840 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
35841 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
35842 dump.u_ssize = 0;
35843
35844 @@ -146,9 +151,7 @@ static int aout_core_dump(long signr, st
35845 dump_size = dump.u_ssize << PAGE_SHIFT;
35846 DUMP_WRITE(dump_start,dump_size);
35847 }
35848 -/* Finally dump the task struct. Not be used by gdb, but could be useful */
35849 - set_fs(KERNEL_DS);
35850 - DUMP_WRITE(current,sizeof(*current));
35851 +/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
35852 end_coredump:
35853 set_fs(fs);
35854 return has_dumped;
35855 @@ -249,6 +252,8 @@ static int load_aout_binary(struct linux
35856 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
35857 if (rlim >= RLIM_INFINITY)
35858 rlim = ~0;
35859 +
35860 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
35861 if (ex.a_data + ex.a_bss > rlim)
35862 return -ENOMEM;
35863
35864 @@ -277,6 +282,27 @@ static int load_aout_binary(struct linux
35865 install_exec_creds(bprm);
35866 current->flags &= ~PF_FORKNOEXEC;
35867
35868 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
35869 + current->mm->pax_flags = 0UL;
35870 +#endif
35871 +
35872 +#ifdef CONFIG_PAX_PAGEEXEC
35873 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
35874 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
35875 +
35876 +#ifdef CONFIG_PAX_EMUTRAMP
35877 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
35878 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
35879 +#endif
35880 +
35881 +#ifdef CONFIG_PAX_MPROTECT
35882 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
35883 + current->mm->pax_flags |= MF_PAX_MPROTECT;
35884 +#endif
35885 +
35886 + }
35887 +#endif
35888 +
35889 if (N_MAGIC(ex) == OMAGIC) {
35890 unsigned long text_addr, map_size;
35891 loff_t pos;
35892 @@ -349,7 +375,7 @@ static int load_aout_binary(struct linux
35893
35894 down_write(&current->mm->mmap_sem);
35895 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
35896 - PROT_READ | PROT_WRITE | PROT_EXEC,
35897 + PROT_READ | PROT_WRITE,
35898 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
35899 fd_offset + ex.a_text);
35900 up_write(&current->mm->mmap_sem);
35901 diff -urNp linux-2.6.32.41/fs/binfmt_elf.c linux-2.6.32.41/fs/binfmt_elf.c
35902 --- linux-2.6.32.41/fs/binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
35903 +++ linux-2.6.32.41/fs/binfmt_elf.c 2011-05-16 21:46:57.000000000 -0400
35904 @@ -50,6 +50,10 @@ static int elf_core_dump(long signr, str
35905 #define elf_core_dump NULL
35906 #endif
35907
35908 +#ifdef CONFIG_PAX_MPROTECT
35909 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
35910 +#endif
35911 +
35912 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
35913 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
35914 #else
35915 @@ -69,6 +73,11 @@ static struct linux_binfmt elf_format =
35916 .load_binary = load_elf_binary,
35917 .load_shlib = load_elf_library,
35918 .core_dump = elf_core_dump,
35919 +
35920 +#ifdef CONFIG_PAX_MPROTECT
35921 + .handle_mprotect= elf_handle_mprotect,
35922 +#endif
35923 +
35924 .min_coredump = ELF_EXEC_PAGESIZE,
35925 .hasvdso = 1
35926 };
35927 @@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
35928
35929 static int set_brk(unsigned long start, unsigned long end)
35930 {
35931 + unsigned long e = end;
35932 +
35933 start = ELF_PAGEALIGN(start);
35934 end = ELF_PAGEALIGN(end);
35935 if (end > start) {
35936 @@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
35937 if (BAD_ADDR(addr))
35938 return addr;
35939 }
35940 - current->mm->start_brk = current->mm->brk = end;
35941 + current->mm->start_brk = current->mm->brk = e;
35942 return 0;
35943 }
35944
35945 @@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
35946 elf_addr_t __user *u_rand_bytes;
35947 const char *k_platform = ELF_PLATFORM;
35948 const char *k_base_platform = ELF_BASE_PLATFORM;
35949 - unsigned char k_rand_bytes[16];
35950 + u32 k_rand_bytes[4];
35951 int items;
35952 elf_addr_t *elf_info;
35953 int ei_index = 0;
35954 const struct cred *cred = current_cred();
35955 struct vm_area_struct *vma;
35956 + unsigned long saved_auxv[AT_VECTOR_SIZE];
35957 +
35958 + pax_track_stack();
35959
35960 /*
35961 * In some cases (e.g. Hyper-Threading), we want to avoid L1
35962 @@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
35963 * Generate 16 random bytes for userspace PRNG seeding.
35964 */
35965 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
35966 - u_rand_bytes = (elf_addr_t __user *)
35967 - STACK_ALLOC(p, sizeof(k_rand_bytes));
35968 + srandom32(k_rand_bytes[0] ^ random32());
35969 + srandom32(k_rand_bytes[1] ^ random32());
35970 + srandom32(k_rand_bytes[2] ^ random32());
35971 + srandom32(k_rand_bytes[3] ^ random32());
35972 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
35973 + u_rand_bytes = (elf_addr_t __user *) p;
35974 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
35975 return -EFAULT;
35976
35977 @@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
35978 return -EFAULT;
35979 current->mm->env_end = p;
35980
35981 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
35982 +
35983 /* Put the elf_info on the stack in the right place. */
35984 sp = (elf_addr_t __user *)envp + 1;
35985 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
35986 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
35987 return -EFAULT;
35988 return 0;
35989 }
35990 @@ -385,10 +405,10 @@ static unsigned long load_elf_interp(str
35991 {
35992 struct elf_phdr *elf_phdata;
35993 struct elf_phdr *eppnt;
35994 - unsigned long load_addr = 0;
35995 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
35996 int load_addr_set = 0;
35997 unsigned long last_bss = 0, elf_bss = 0;
35998 - unsigned long error = ~0UL;
35999 + unsigned long error = -EINVAL;
36000 unsigned long total_size;
36001 int retval, i, size;
36002
36003 @@ -434,6 +454,11 @@ static unsigned long load_elf_interp(str
36004 goto out_close;
36005 }
36006
36007 +#ifdef CONFIG_PAX_SEGMEXEC
36008 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
36009 + pax_task_size = SEGMEXEC_TASK_SIZE;
36010 +#endif
36011 +
36012 eppnt = elf_phdata;
36013 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
36014 if (eppnt->p_type == PT_LOAD) {
36015 @@ -477,8 +502,8 @@ static unsigned long load_elf_interp(str
36016 k = load_addr + eppnt->p_vaddr;
36017 if (BAD_ADDR(k) ||
36018 eppnt->p_filesz > eppnt->p_memsz ||
36019 - eppnt->p_memsz > TASK_SIZE ||
36020 - TASK_SIZE - eppnt->p_memsz < k) {
36021 + eppnt->p_memsz > pax_task_size ||
36022 + pax_task_size - eppnt->p_memsz < k) {
36023 error = -ENOMEM;
36024 goto out_close;
36025 }
36026 @@ -532,6 +557,194 @@ out:
36027 return error;
36028 }
36029
36030 +#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
36031 +static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
36032 +{
36033 + unsigned long pax_flags = 0UL;
36034 +
36035 +#ifdef CONFIG_PAX_PAGEEXEC
36036 + if (elf_phdata->p_flags & PF_PAGEEXEC)
36037 + pax_flags |= MF_PAX_PAGEEXEC;
36038 +#endif
36039 +
36040 +#ifdef CONFIG_PAX_SEGMEXEC
36041 + if (elf_phdata->p_flags & PF_SEGMEXEC)
36042 + pax_flags |= MF_PAX_SEGMEXEC;
36043 +#endif
36044 +
36045 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36046 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36047 + if (nx_enabled)
36048 + pax_flags &= ~MF_PAX_SEGMEXEC;
36049 + else
36050 + pax_flags &= ~MF_PAX_PAGEEXEC;
36051 + }
36052 +#endif
36053 +
36054 +#ifdef CONFIG_PAX_EMUTRAMP
36055 + if (elf_phdata->p_flags & PF_EMUTRAMP)
36056 + pax_flags |= MF_PAX_EMUTRAMP;
36057 +#endif
36058 +
36059 +#ifdef CONFIG_PAX_MPROTECT
36060 + if (elf_phdata->p_flags & PF_MPROTECT)
36061 + pax_flags |= MF_PAX_MPROTECT;
36062 +#endif
36063 +
36064 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
36065 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
36066 + pax_flags |= MF_PAX_RANDMMAP;
36067 +#endif
36068 +
36069 + return pax_flags;
36070 +}
36071 +#endif
36072 +
36073 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
36074 +static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
36075 +{
36076 + unsigned long pax_flags = 0UL;
36077 +
36078 +#ifdef CONFIG_PAX_PAGEEXEC
36079 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
36080 + pax_flags |= MF_PAX_PAGEEXEC;
36081 +#endif
36082 +
36083 +#ifdef CONFIG_PAX_SEGMEXEC
36084 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
36085 + pax_flags |= MF_PAX_SEGMEXEC;
36086 +#endif
36087 +
36088 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36089 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36090 + if (nx_enabled)
36091 + pax_flags &= ~MF_PAX_SEGMEXEC;
36092 + else
36093 + pax_flags &= ~MF_PAX_PAGEEXEC;
36094 + }
36095 +#endif
36096 +
36097 +#ifdef CONFIG_PAX_EMUTRAMP
36098 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
36099 + pax_flags |= MF_PAX_EMUTRAMP;
36100 +#endif
36101 +
36102 +#ifdef CONFIG_PAX_MPROTECT
36103 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
36104 + pax_flags |= MF_PAX_MPROTECT;
36105 +#endif
36106 +
36107 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
36108 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
36109 + pax_flags |= MF_PAX_RANDMMAP;
36110 +#endif
36111 +
36112 + return pax_flags;
36113 +}
36114 +#endif
36115 +
36116 +#ifdef CONFIG_PAX_EI_PAX
36117 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
36118 +{
36119 + unsigned long pax_flags = 0UL;
36120 +
36121 +#ifdef CONFIG_PAX_PAGEEXEC
36122 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
36123 + pax_flags |= MF_PAX_PAGEEXEC;
36124 +#endif
36125 +
36126 +#ifdef CONFIG_PAX_SEGMEXEC
36127 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
36128 + pax_flags |= MF_PAX_SEGMEXEC;
36129 +#endif
36130 +
36131 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36132 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36133 + if (nx_enabled)
36134 + pax_flags &= ~MF_PAX_SEGMEXEC;
36135 + else
36136 + pax_flags &= ~MF_PAX_PAGEEXEC;
36137 + }
36138 +#endif
36139 +
36140 +#ifdef CONFIG_PAX_EMUTRAMP
36141 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
36142 + pax_flags |= MF_PAX_EMUTRAMP;
36143 +#endif
36144 +
36145 +#ifdef CONFIG_PAX_MPROTECT
36146 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
36147 + pax_flags |= MF_PAX_MPROTECT;
36148 +#endif
36149 +
36150 +#ifdef CONFIG_PAX_ASLR
36151 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
36152 + pax_flags |= MF_PAX_RANDMMAP;
36153 +#endif
36154 +
36155 + return pax_flags;
36156 +}
36157 +#endif
36158 +
36159 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
36160 +static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
36161 +{
36162 + unsigned long pax_flags = 0UL;
36163 +
36164 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
36165 + unsigned long i;
36166 + int found_flags = 0;
36167 +#endif
36168 +
36169 +#ifdef CONFIG_PAX_EI_PAX
36170 + pax_flags = pax_parse_ei_pax(elf_ex);
36171 +#endif
36172 +
36173 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
36174 + for (i = 0UL; i < elf_ex->e_phnum; i++)
36175 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
36176 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
36177 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
36178 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
36179 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
36180 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
36181 + return -EINVAL;
36182 +
36183 +#ifdef CONFIG_PAX_SOFTMODE
36184 + if (pax_softmode)
36185 + pax_flags = pax_parse_softmode(&elf_phdata[i]);
36186 + else
36187 +#endif
36188 +
36189 + pax_flags = pax_parse_hardmode(&elf_phdata[i]);
36190 + found_flags = 1;
36191 + break;
36192 + }
36193 +#endif
36194 +
36195 +#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
36196 + if (found_flags == 0) {
36197 + struct elf_phdr phdr;
36198 + memset(&phdr, 0, sizeof(phdr));
36199 + phdr.p_flags = PF_NOEMUTRAMP;
36200 +#ifdef CONFIG_PAX_SOFTMODE
36201 + if (pax_softmode)
36202 + pax_flags = pax_parse_softmode(&phdr);
36203 + else
36204 +#endif
36205 + pax_flags = pax_parse_hardmode(&phdr);
36206 + }
36207 +#endif
36208 +
36209 +
36210 + if (0 > pax_check_flags(&pax_flags))
36211 + return -EINVAL;
36212 +
36213 + current->mm->pax_flags = pax_flags;
36214 + return 0;
36215 +}
36216 +#endif
36217 +
36218 /*
36219 * These are the functions used to load ELF style executables and shared
36220 * libraries. There is no binary dependent code anywhere else.
36221 @@ -548,6 +761,11 @@ static unsigned long randomize_stack_top
36222 {
36223 unsigned int random_variable = 0;
36224
36225 +#ifdef CONFIG_PAX_RANDUSTACK
36226 + if (randomize_va_space)
36227 + return stack_top - current->mm->delta_stack;
36228 +#endif
36229 +
36230 if ((current->flags & PF_RANDOMIZE) &&
36231 !(current->personality & ADDR_NO_RANDOMIZE)) {
36232 random_variable = get_random_int() & STACK_RND_MASK;
36233 @@ -566,7 +784,7 @@ static int load_elf_binary(struct linux_
36234 unsigned long load_addr = 0, load_bias = 0;
36235 int load_addr_set = 0;
36236 char * elf_interpreter = NULL;
36237 - unsigned long error;
36238 + unsigned long error = 0;
36239 struct elf_phdr *elf_ppnt, *elf_phdata;
36240 unsigned long elf_bss, elf_brk;
36241 int retval, i;
36242 @@ -576,11 +794,11 @@ static int load_elf_binary(struct linux_
36243 unsigned long start_code, end_code, start_data, end_data;
36244 unsigned long reloc_func_desc = 0;
36245 int executable_stack = EXSTACK_DEFAULT;
36246 - unsigned long def_flags = 0;
36247 struct {
36248 struct elfhdr elf_ex;
36249 struct elfhdr interp_elf_ex;
36250 } *loc;
36251 + unsigned long pax_task_size = TASK_SIZE;
36252
36253 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
36254 if (!loc) {
36255 @@ -718,11 +936,80 @@ static int load_elf_binary(struct linux_
36256
36257 /* OK, This is the point of no return */
36258 current->flags &= ~PF_FORKNOEXEC;
36259 - current->mm->def_flags = def_flags;
36260 +
36261 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
36262 + current->mm->pax_flags = 0UL;
36263 +#endif
36264 +
36265 +#ifdef CONFIG_PAX_DLRESOLVE
36266 + current->mm->call_dl_resolve = 0UL;
36267 +#endif
36268 +
36269 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
36270 + current->mm->call_syscall = 0UL;
36271 +#endif
36272 +
36273 +#ifdef CONFIG_PAX_ASLR
36274 + current->mm->delta_mmap = 0UL;
36275 + current->mm->delta_stack = 0UL;
36276 +#endif
36277 +
36278 + current->mm->def_flags = 0;
36279 +
36280 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
36281 + if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
36282 + send_sig(SIGKILL, current, 0);
36283 + goto out_free_dentry;
36284 + }
36285 +#endif
36286 +
36287 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
36288 + pax_set_initial_flags(bprm);
36289 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
36290 + if (pax_set_initial_flags_func)
36291 + (pax_set_initial_flags_func)(bprm);
36292 +#endif
36293 +
36294 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
36295 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
36296 + current->mm->context.user_cs_limit = PAGE_SIZE;
36297 + current->mm->def_flags |= VM_PAGEEXEC;
36298 + }
36299 +#endif
36300 +
36301 +#ifdef CONFIG_PAX_SEGMEXEC
36302 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
36303 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
36304 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
36305 + pax_task_size = SEGMEXEC_TASK_SIZE;
36306 + }
36307 +#endif
36308 +
36309 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
36310 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36311 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
36312 + put_cpu();
36313 + }
36314 +#endif
36315
36316 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
36317 may depend on the personality. */
36318 SET_PERSONALITY(loc->elf_ex);
36319 +
36320 +#ifdef CONFIG_PAX_ASLR
36321 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
36322 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
36323 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
36324 + }
36325 +#endif
36326 +
36327 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
36328 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36329 + executable_stack = EXSTACK_DISABLE_X;
36330 + current->personality &= ~READ_IMPLIES_EXEC;
36331 + } else
36332 +#endif
36333 +
36334 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
36335 current->personality |= READ_IMPLIES_EXEC;
36336
36337 @@ -804,6 +1091,20 @@ static int load_elf_binary(struct linux_
36338 #else
36339 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
36340 #endif
36341 +
36342 +#ifdef CONFIG_PAX_RANDMMAP
36343 + /* PaX: randomize base address at the default exe base if requested */
36344 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
36345 +#ifdef CONFIG_SPARC64
36346 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
36347 +#else
36348 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
36349 +#endif
36350 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
36351 + elf_flags |= MAP_FIXED;
36352 + }
36353 +#endif
36354 +
36355 }
36356
36357 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
36358 @@ -836,9 +1137,9 @@ static int load_elf_binary(struct linux_
36359 * allowed task size. Note that p_filesz must always be
36360 * <= p_memsz so it is only necessary to check p_memsz.
36361 */
36362 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
36363 - elf_ppnt->p_memsz > TASK_SIZE ||
36364 - TASK_SIZE - elf_ppnt->p_memsz < k) {
36365 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
36366 + elf_ppnt->p_memsz > pax_task_size ||
36367 + pax_task_size - elf_ppnt->p_memsz < k) {
36368 /* set_brk can never work. Avoid overflows. */
36369 send_sig(SIGKILL, current, 0);
36370 retval = -EINVAL;
36371 @@ -866,6 +1167,11 @@ static int load_elf_binary(struct linux_
36372 start_data += load_bias;
36373 end_data += load_bias;
36374
36375 +#ifdef CONFIG_PAX_RANDMMAP
36376 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
36377 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
36378 +#endif
36379 +
36380 /* Calling set_brk effectively mmaps the pages that we need
36381 * for the bss and break sections. We must do this before
36382 * mapping in the interpreter, to make sure it doesn't wind
36383 @@ -877,9 +1183,11 @@ static int load_elf_binary(struct linux_
36384 goto out_free_dentry;
36385 }
36386 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
36387 - send_sig(SIGSEGV, current, 0);
36388 - retval = -EFAULT; /* Nobody gets to see this, but.. */
36389 - goto out_free_dentry;
36390 + /*
36391 + * This bss-zeroing can fail if the ELF
36392 + * file specifies odd protections. So
36393 + * we don't check the return value
36394 + */
36395 }
36396
36397 if (elf_interpreter) {
36398 @@ -1112,8 +1420,10 @@ static int dump_seek(struct file *file,
36399 unsigned long n = off;
36400 if (n > PAGE_SIZE)
36401 n = PAGE_SIZE;
36402 - if (!dump_write(file, buf, n))
36403 + if (!dump_write(file, buf, n)) {
36404 + free_page((unsigned long)buf);
36405 return 0;
36406 + }
36407 off -= n;
36408 }
36409 free_page((unsigned long)buf);
36410 @@ -1125,7 +1435,7 @@ static int dump_seek(struct file *file,
36411 * Decide what to dump of a segment, part, all or none.
36412 */
36413 static unsigned long vma_dump_size(struct vm_area_struct *vma,
36414 - unsigned long mm_flags)
36415 + unsigned long mm_flags, long signr)
36416 {
36417 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
36418
36419 @@ -1159,7 +1469,7 @@ static unsigned long vma_dump_size(struc
36420 if (vma->vm_file == NULL)
36421 return 0;
36422
36423 - if (FILTER(MAPPED_PRIVATE))
36424 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
36425 goto whole;
36426
36427 /*
36428 @@ -1255,8 +1565,11 @@ static int writenote(struct memelfnote *
36429 #undef DUMP_WRITE
36430
36431 #define DUMP_WRITE(addr, nr) \
36432 + do { \
36433 + gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
36434 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
36435 - goto end_coredump;
36436 + goto end_coredump; \
36437 + } while (0);
36438
36439 static void fill_elf_header(struct elfhdr *elf, int segs,
36440 u16 machine, u32 flags, u8 osabi)
36441 @@ -1385,9 +1698,9 @@ static void fill_auxv_note(struct memelf
36442 {
36443 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
36444 int i = 0;
36445 - do
36446 + do {
36447 i += 2;
36448 - while (auxv[i - 2] != AT_NULL);
36449 + } while (auxv[i - 2] != AT_NULL);
36450 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
36451 }
36452
36453 @@ -1973,7 +2286,7 @@ static int elf_core_dump(long signr, str
36454 phdr.p_offset = offset;
36455 phdr.p_vaddr = vma->vm_start;
36456 phdr.p_paddr = 0;
36457 - phdr.p_filesz = vma_dump_size(vma, mm_flags);
36458 + phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
36459 phdr.p_memsz = vma->vm_end - vma->vm_start;
36460 offset += phdr.p_filesz;
36461 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
36462 @@ -2006,7 +2319,7 @@ static int elf_core_dump(long signr, str
36463 unsigned long addr;
36464 unsigned long end;
36465
36466 - end = vma->vm_start + vma_dump_size(vma, mm_flags);
36467 + end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
36468
36469 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
36470 struct page *page;
36471 @@ -2015,6 +2328,7 @@ static int elf_core_dump(long signr, str
36472 page = get_dump_page(addr);
36473 if (page) {
36474 void *kaddr = kmap(page);
36475 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
36476 stop = ((size += PAGE_SIZE) > limit) ||
36477 !dump_write(file, kaddr, PAGE_SIZE);
36478 kunmap(page);
36479 @@ -2042,6 +2356,97 @@ out:
36480
36481 #endif /* USE_ELF_CORE_DUMP */
36482
36483 +#ifdef CONFIG_PAX_MPROTECT
36484 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
36485 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
36486 + * we'll remove VM_MAYWRITE for good on RELRO segments.
36487 + *
36488 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
36489 + * basis because we want to allow the common case and not the special ones.
36490 + */
36491 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
36492 +{
36493 + struct elfhdr elf_h;
36494 + struct elf_phdr elf_p;
36495 + unsigned long i;
36496 + unsigned long oldflags;
36497 + bool is_textrel_rw, is_textrel_rx, is_relro;
36498 +
36499 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
36500 + return;
36501 +
36502 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
36503 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
36504 +
36505 +#ifdef CONFIG_PAX_ELFRELOCS
36506 + /* possible TEXTREL */
36507 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
36508 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
36509 +#else
36510 + is_textrel_rw = false;
36511 + is_textrel_rx = false;
36512 +#endif
36513 +
36514 + /* possible RELRO */
36515 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
36516 +
36517 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
36518 + return;
36519 +
36520 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
36521 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
36522 +
36523 +#ifdef CONFIG_PAX_ETEXECRELOCS
36524 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
36525 +#else
36526 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
36527 +#endif
36528 +
36529 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
36530 + !elf_check_arch(&elf_h) ||
36531 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
36532 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
36533 + return;
36534 +
36535 + for (i = 0UL; i < elf_h.e_phnum; i++) {
36536 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
36537 + return;
36538 + switch (elf_p.p_type) {
36539 + case PT_DYNAMIC:
36540 + if (!is_textrel_rw && !is_textrel_rx)
36541 + continue;
36542 + i = 0UL;
36543 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
36544 + elf_dyn dyn;
36545 +
36546 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
36547 + return;
36548 + if (dyn.d_tag == DT_NULL)
36549 + return;
36550 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
36551 + gr_log_textrel(vma);
36552 + if (is_textrel_rw)
36553 + vma->vm_flags |= VM_MAYWRITE;
36554 + else
36555 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
36556 + vma->vm_flags &= ~VM_MAYWRITE;
36557 + return;
36558 + }
36559 + i++;
36560 + }
36561 + return;
36562 +
36563 + case PT_GNU_RELRO:
36564 + if (!is_relro)
36565 + continue;
36566 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
36567 + vma->vm_flags &= ~VM_MAYWRITE;
36568 + return;
36569 + }
36570 + }
36571 +}
36572 +#endif
36573 +
36574 static int __init init_elf_binfmt(void)
36575 {
36576 return register_binfmt(&elf_format);
36577 diff -urNp linux-2.6.32.41/fs/binfmt_flat.c linux-2.6.32.41/fs/binfmt_flat.c
36578 --- linux-2.6.32.41/fs/binfmt_flat.c 2011-03-27 14:31:47.000000000 -0400
36579 +++ linux-2.6.32.41/fs/binfmt_flat.c 2011-04-17 15:56:46.000000000 -0400
36580 @@ -564,7 +564,9 @@ static int load_flat_file(struct linux_b
36581 realdatastart = (unsigned long) -ENOMEM;
36582 printk("Unable to allocate RAM for process data, errno %d\n",
36583 (int)-realdatastart);
36584 + down_write(&current->mm->mmap_sem);
36585 do_munmap(current->mm, textpos, text_len);
36586 + up_write(&current->mm->mmap_sem);
36587 ret = realdatastart;
36588 goto err;
36589 }
36590 @@ -588,8 +590,10 @@ static int load_flat_file(struct linux_b
36591 }
36592 if (IS_ERR_VALUE(result)) {
36593 printk("Unable to read data+bss, errno %d\n", (int)-result);
36594 + down_write(&current->mm->mmap_sem);
36595 do_munmap(current->mm, textpos, text_len);
36596 do_munmap(current->mm, realdatastart, data_len + extra);
36597 + up_write(&current->mm->mmap_sem);
36598 ret = result;
36599 goto err;
36600 }
36601 @@ -658,8 +662,10 @@ static int load_flat_file(struct linux_b
36602 }
36603 if (IS_ERR_VALUE(result)) {
36604 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
36605 + down_write(&current->mm->mmap_sem);
36606 do_munmap(current->mm, textpos, text_len + data_len + extra +
36607 MAX_SHARED_LIBS * sizeof(unsigned long));
36608 + up_write(&current->mm->mmap_sem);
36609 ret = result;
36610 goto err;
36611 }
36612 diff -urNp linux-2.6.32.41/fs/bio.c linux-2.6.32.41/fs/bio.c
36613 --- linux-2.6.32.41/fs/bio.c 2011-03-27 14:31:47.000000000 -0400
36614 +++ linux-2.6.32.41/fs/bio.c 2011-04-17 15:56:46.000000000 -0400
36615 @@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_cr
36616
36617 i = 0;
36618 while (i < bio_slab_nr) {
36619 - struct bio_slab *bslab = &bio_slabs[i];
36620 + bslab = &bio_slabs[i];
36621
36622 if (!bslab->slab && entry == -1)
36623 entry = i;
36624 @@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct b
36625 const int read = bio_data_dir(bio) == READ;
36626 struct bio_map_data *bmd = bio->bi_private;
36627 int i;
36628 - char *p = bmd->sgvecs[0].iov_base;
36629 + char *p = (__force char *)bmd->sgvecs[0].iov_base;
36630
36631 __bio_for_each_segment(bvec, bio, i, 0) {
36632 char *addr = page_address(bvec->bv_page);
36633 diff -urNp linux-2.6.32.41/fs/block_dev.c linux-2.6.32.41/fs/block_dev.c
36634 --- linux-2.6.32.41/fs/block_dev.c 2011-03-27 14:31:47.000000000 -0400
36635 +++ linux-2.6.32.41/fs/block_dev.c 2011-04-17 15:56:46.000000000 -0400
36636 @@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev,
36637 else if (bdev->bd_contains == bdev)
36638 res = 0; /* is a whole device which isn't held */
36639
36640 - else if (bdev->bd_contains->bd_holder == bd_claim)
36641 + else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
36642 res = 0; /* is a partition of a device that is being partitioned */
36643 else if (bdev->bd_contains->bd_holder != NULL)
36644 res = -EBUSY; /* is a partition of a held device */
36645 diff -urNp linux-2.6.32.41/fs/btrfs/ctree.c linux-2.6.32.41/fs/btrfs/ctree.c
36646 --- linux-2.6.32.41/fs/btrfs/ctree.c 2011-03-27 14:31:47.000000000 -0400
36647 +++ linux-2.6.32.41/fs/btrfs/ctree.c 2011-04-17 15:56:46.000000000 -0400
36648 @@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(st
36649 free_extent_buffer(buf);
36650 add_root_to_dirty_list(root);
36651 } else {
36652 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
36653 - parent_start = parent->start;
36654 - else
36655 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
36656 + if (parent)
36657 + parent_start = parent->start;
36658 + else
36659 + parent_start = 0;
36660 + } else
36661 parent_start = 0;
36662
36663 WARN_ON(trans->transid != btrfs_header_generation(parent));
36664 @@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_tran
36665
36666 ret = 0;
36667 if (slot == 0) {
36668 - struct btrfs_disk_key disk_key;
36669 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
36670 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
36671 }
36672 diff -urNp linux-2.6.32.41/fs/btrfs/disk-io.c linux-2.6.32.41/fs/btrfs/disk-io.c
36673 --- linux-2.6.32.41/fs/btrfs/disk-io.c 2011-04-17 17:00:52.000000000 -0400
36674 +++ linux-2.6.32.41/fs/btrfs/disk-io.c 2011-04-17 17:03:11.000000000 -0400
36675 @@ -39,7 +39,7 @@
36676 #include "tree-log.h"
36677 #include "free-space-cache.h"
36678
36679 -static struct extent_io_ops btree_extent_io_ops;
36680 +static const struct extent_io_ops btree_extent_io_ops;
36681 static void end_workqueue_fn(struct btrfs_work *work);
36682 static void free_fs_root(struct btrfs_root *root);
36683
36684 @@ -2607,7 +2607,7 @@ out:
36685 return 0;
36686 }
36687
36688 -static struct extent_io_ops btree_extent_io_ops = {
36689 +static const struct extent_io_ops btree_extent_io_ops = {
36690 .write_cache_pages_lock_hook = btree_lock_page_hook,
36691 .readpage_end_io_hook = btree_readpage_end_io_hook,
36692 .submit_bio_hook = btree_submit_bio_hook,
36693 diff -urNp linux-2.6.32.41/fs/btrfs/extent_io.h linux-2.6.32.41/fs/btrfs/extent_io.h
36694 --- linux-2.6.32.41/fs/btrfs/extent_io.h 2011-03-27 14:31:47.000000000 -0400
36695 +++ linux-2.6.32.41/fs/btrfs/extent_io.h 2011-04-17 15:56:46.000000000 -0400
36696 @@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(s
36697 struct bio *bio, int mirror_num,
36698 unsigned long bio_flags);
36699 struct extent_io_ops {
36700 - int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
36701 + int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
36702 u64 start, u64 end, int *page_started,
36703 unsigned long *nr_written);
36704 - int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
36705 - int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
36706 + int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
36707 + int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
36708 extent_submit_bio_hook_t *submit_bio_hook;
36709 - int (*merge_bio_hook)(struct page *page, unsigned long offset,
36710 + int (* const merge_bio_hook)(struct page *page, unsigned long offset,
36711 size_t size, struct bio *bio,
36712 unsigned long bio_flags);
36713 - int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
36714 - int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
36715 + int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
36716 + int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
36717 u64 start, u64 end,
36718 struct extent_state *state);
36719 - int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
36720 + int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
36721 u64 start, u64 end,
36722 struct extent_state *state);
36723 - int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
36724 + int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
36725 struct extent_state *state);
36726 - int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
36727 + int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
36728 struct extent_state *state, int uptodate);
36729 - int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
36730 + int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
36731 unsigned long old, unsigned long bits);
36732 - int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
36733 + int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
36734 unsigned long bits);
36735 - int (*merge_extent_hook)(struct inode *inode,
36736 + int (* const merge_extent_hook)(struct inode *inode,
36737 struct extent_state *new,
36738 struct extent_state *other);
36739 - int (*split_extent_hook)(struct inode *inode,
36740 + int (* const split_extent_hook)(struct inode *inode,
36741 struct extent_state *orig, u64 split);
36742 - int (*write_cache_pages_lock_hook)(struct page *page);
36743 + int (* const write_cache_pages_lock_hook)(struct page *page);
36744 };
36745
36746 struct extent_io_tree {
36747 @@ -88,7 +88,7 @@ struct extent_io_tree {
36748 u64 dirty_bytes;
36749 spinlock_t lock;
36750 spinlock_t buffer_lock;
36751 - struct extent_io_ops *ops;
36752 + const struct extent_io_ops *ops;
36753 };
36754
36755 struct extent_state {
36756 diff -urNp linux-2.6.32.41/fs/btrfs/free-space-cache.c linux-2.6.32.41/fs/btrfs/free-space-cache.c
36757 --- linux-2.6.32.41/fs/btrfs/free-space-cache.c 2011-03-27 14:31:47.000000000 -0400
36758 +++ linux-2.6.32.41/fs/btrfs/free-space-cache.c 2011-04-17 15:56:46.000000000 -0400
36759 @@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
36760
36761 while(1) {
36762 if (entry->bytes < bytes || entry->offset < min_start) {
36763 - struct rb_node *node;
36764 -
36765 node = rb_next(&entry->offset_index);
36766 if (!node)
36767 break;
36768 @@ -1226,7 +1224,7 @@ again:
36769 */
36770 while (entry->bitmap || found_bitmap ||
36771 (!entry->bitmap && entry->bytes < min_bytes)) {
36772 - struct rb_node *node = rb_next(&entry->offset_index);
36773 + node = rb_next(&entry->offset_index);
36774
36775 if (entry->bitmap && entry->bytes > bytes + empty_size) {
36776 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
36777 diff -urNp linux-2.6.32.41/fs/btrfs/inode.c linux-2.6.32.41/fs/btrfs/inode.c
36778 --- linux-2.6.32.41/fs/btrfs/inode.c 2011-03-27 14:31:47.000000000 -0400
36779 +++ linux-2.6.32.41/fs/btrfs/inode.c 2011-04-17 15:56:46.000000000 -0400
36780 @@ -63,7 +63,7 @@ static const struct inode_operations btr
36781 static const struct address_space_operations btrfs_aops;
36782 static const struct address_space_operations btrfs_symlink_aops;
36783 static const struct file_operations btrfs_dir_file_operations;
36784 -static struct extent_io_ops btrfs_extent_io_ops;
36785 +static const struct extent_io_ops btrfs_extent_io_ops;
36786
36787 static struct kmem_cache *btrfs_inode_cachep;
36788 struct kmem_cache *btrfs_trans_handle_cachep;
36789 @@ -5410,7 +5410,7 @@ fail:
36790 return -ENOMEM;
36791 }
36792
36793 -static int btrfs_getattr(struct vfsmount *mnt,
36794 +int btrfs_getattr(struct vfsmount *mnt,
36795 struct dentry *dentry, struct kstat *stat)
36796 {
36797 struct inode *inode = dentry->d_inode;
36798 @@ -5422,6 +5422,14 @@ static int btrfs_getattr(struct vfsmount
36799 return 0;
36800 }
36801
36802 +EXPORT_SYMBOL(btrfs_getattr);
36803 +
36804 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
36805 +{
36806 + return BTRFS_I(inode)->root->anon_super.s_dev;
36807 +}
36808 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
36809 +
36810 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
36811 struct inode *new_dir, struct dentry *new_dentry)
36812 {
36813 @@ -5972,7 +5980,7 @@ static const struct file_operations btrf
36814 .fsync = btrfs_sync_file,
36815 };
36816
36817 -static struct extent_io_ops btrfs_extent_io_ops = {
36818 +static const struct extent_io_ops btrfs_extent_io_ops = {
36819 .fill_delalloc = run_delalloc_range,
36820 .submit_bio_hook = btrfs_submit_bio_hook,
36821 .merge_bio_hook = btrfs_merge_bio_hook,
36822 diff -urNp linux-2.6.32.41/fs/btrfs/relocation.c linux-2.6.32.41/fs/btrfs/relocation.c
36823 --- linux-2.6.32.41/fs/btrfs/relocation.c 2011-03-27 14:31:47.000000000 -0400
36824 +++ linux-2.6.32.41/fs/btrfs/relocation.c 2011-04-17 15:56:46.000000000 -0400
36825 @@ -884,7 +884,7 @@ static int __update_reloc_root(struct bt
36826 }
36827 spin_unlock(&rc->reloc_root_tree.lock);
36828
36829 - BUG_ON((struct btrfs_root *)node->data != root);
36830 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
36831
36832 if (!del) {
36833 spin_lock(&rc->reloc_root_tree.lock);
36834 diff -urNp linux-2.6.32.41/fs/btrfs/sysfs.c linux-2.6.32.41/fs/btrfs/sysfs.c
36835 --- linux-2.6.32.41/fs/btrfs/sysfs.c 2011-03-27 14:31:47.000000000 -0400
36836 +++ linux-2.6.32.41/fs/btrfs/sysfs.c 2011-04-17 15:56:46.000000000 -0400
36837 @@ -164,12 +164,12 @@ static void btrfs_root_release(struct ko
36838 complete(&root->kobj_unregister);
36839 }
36840
36841 -static struct sysfs_ops btrfs_super_attr_ops = {
36842 +static const struct sysfs_ops btrfs_super_attr_ops = {
36843 .show = btrfs_super_attr_show,
36844 .store = btrfs_super_attr_store,
36845 };
36846
36847 -static struct sysfs_ops btrfs_root_attr_ops = {
36848 +static const struct sysfs_ops btrfs_root_attr_ops = {
36849 .show = btrfs_root_attr_show,
36850 .store = btrfs_root_attr_store,
36851 };
36852 diff -urNp linux-2.6.32.41/fs/buffer.c linux-2.6.32.41/fs/buffer.c
36853 --- linux-2.6.32.41/fs/buffer.c 2011-03-27 14:31:47.000000000 -0400
36854 +++ linux-2.6.32.41/fs/buffer.c 2011-04-17 15:56:46.000000000 -0400
36855 @@ -25,6 +25,7 @@
36856 #include <linux/percpu.h>
36857 #include <linux/slab.h>
36858 #include <linux/capability.h>
36859 +#include <linux/security.h>
36860 #include <linux/blkdev.h>
36861 #include <linux/file.h>
36862 #include <linux/quotaops.h>
36863 diff -urNp linux-2.6.32.41/fs/cachefiles/bind.c linux-2.6.32.41/fs/cachefiles/bind.c
36864 --- linux-2.6.32.41/fs/cachefiles/bind.c 2011-03-27 14:31:47.000000000 -0400
36865 +++ linux-2.6.32.41/fs/cachefiles/bind.c 2011-04-17 15:56:46.000000000 -0400
36866 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
36867 args);
36868
36869 /* start by checking things over */
36870 - ASSERT(cache->fstop_percent >= 0 &&
36871 - cache->fstop_percent < cache->fcull_percent &&
36872 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
36873 cache->fcull_percent < cache->frun_percent &&
36874 cache->frun_percent < 100);
36875
36876 - ASSERT(cache->bstop_percent >= 0 &&
36877 - cache->bstop_percent < cache->bcull_percent &&
36878 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
36879 cache->bcull_percent < cache->brun_percent &&
36880 cache->brun_percent < 100);
36881
36882 diff -urNp linux-2.6.32.41/fs/cachefiles/daemon.c linux-2.6.32.41/fs/cachefiles/daemon.c
36883 --- linux-2.6.32.41/fs/cachefiles/daemon.c 2011-03-27 14:31:47.000000000 -0400
36884 +++ linux-2.6.32.41/fs/cachefiles/daemon.c 2011-04-17 15:56:46.000000000 -0400
36885 @@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(s
36886 if (test_bit(CACHEFILES_DEAD, &cache->flags))
36887 return -EIO;
36888
36889 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
36890 + if (datalen > PAGE_SIZE - 1)
36891 return -EOPNOTSUPP;
36892
36893 /* drag the command string into the kernel so we can parse it */
36894 @@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struc
36895 if (args[0] != '%' || args[1] != '\0')
36896 return -EINVAL;
36897
36898 - if (fstop < 0 || fstop >= cache->fcull_percent)
36899 + if (fstop >= cache->fcull_percent)
36900 return cachefiles_daemon_range_error(cache, args);
36901
36902 cache->fstop_percent = fstop;
36903 @@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struc
36904 if (args[0] != '%' || args[1] != '\0')
36905 return -EINVAL;
36906
36907 - if (bstop < 0 || bstop >= cache->bcull_percent)
36908 + if (bstop >= cache->bcull_percent)
36909 return cachefiles_daemon_range_error(cache, args);
36910
36911 cache->bstop_percent = bstop;
36912 diff -urNp linux-2.6.32.41/fs/cachefiles/internal.h linux-2.6.32.41/fs/cachefiles/internal.h
36913 --- linux-2.6.32.41/fs/cachefiles/internal.h 2011-03-27 14:31:47.000000000 -0400
36914 +++ linux-2.6.32.41/fs/cachefiles/internal.h 2011-05-04 17:56:28.000000000 -0400
36915 @@ -56,7 +56,7 @@ struct cachefiles_cache {
36916 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
36917 struct rb_root active_nodes; /* active nodes (can't be culled) */
36918 rwlock_t active_lock; /* lock for active_nodes */
36919 - atomic_t gravecounter; /* graveyard uniquifier */
36920 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
36921 unsigned frun_percent; /* when to stop culling (% files) */
36922 unsigned fcull_percent; /* when to start culling (% files) */
36923 unsigned fstop_percent; /* when to stop allocating (% files) */
36924 @@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struc
36925 * proc.c
36926 */
36927 #ifdef CONFIG_CACHEFILES_HISTOGRAM
36928 -extern atomic_t cachefiles_lookup_histogram[HZ];
36929 -extern atomic_t cachefiles_mkdir_histogram[HZ];
36930 -extern atomic_t cachefiles_create_histogram[HZ];
36931 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
36932 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
36933 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
36934
36935 extern int __init cachefiles_proc_init(void);
36936 extern void cachefiles_proc_cleanup(void);
36937 static inline
36938 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
36939 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
36940 {
36941 unsigned long jif = jiffies - start_jif;
36942 if (jif >= HZ)
36943 jif = HZ - 1;
36944 - atomic_inc(&histogram[jif]);
36945 + atomic_inc_unchecked(&histogram[jif]);
36946 }
36947
36948 #else
36949 diff -urNp linux-2.6.32.41/fs/cachefiles/namei.c linux-2.6.32.41/fs/cachefiles/namei.c
36950 --- linux-2.6.32.41/fs/cachefiles/namei.c 2011-03-27 14:31:47.000000000 -0400
36951 +++ linux-2.6.32.41/fs/cachefiles/namei.c 2011-05-04 17:56:28.000000000 -0400
36952 @@ -250,7 +250,7 @@ try_again:
36953 /* first step is to make up a grave dentry in the graveyard */
36954 sprintf(nbuffer, "%08x%08x",
36955 (uint32_t) get_seconds(),
36956 - (uint32_t) atomic_inc_return(&cache->gravecounter));
36957 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
36958
36959 /* do the multiway lock magic */
36960 trap = lock_rename(cache->graveyard, dir);
36961 diff -urNp linux-2.6.32.41/fs/cachefiles/proc.c linux-2.6.32.41/fs/cachefiles/proc.c
36962 --- linux-2.6.32.41/fs/cachefiles/proc.c 2011-03-27 14:31:47.000000000 -0400
36963 +++ linux-2.6.32.41/fs/cachefiles/proc.c 2011-05-04 17:56:28.000000000 -0400
36964 @@ -14,9 +14,9 @@
36965 #include <linux/seq_file.h>
36966 #include "internal.h"
36967
36968 -atomic_t cachefiles_lookup_histogram[HZ];
36969 -atomic_t cachefiles_mkdir_histogram[HZ];
36970 -atomic_t cachefiles_create_histogram[HZ];
36971 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
36972 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
36973 +atomic_unchecked_t cachefiles_create_histogram[HZ];
36974
36975 /*
36976 * display the latency histogram
36977 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
36978 return 0;
36979 default:
36980 index = (unsigned long) v - 3;
36981 - x = atomic_read(&cachefiles_lookup_histogram[index]);
36982 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
36983 - z = atomic_read(&cachefiles_create_histogram[index]);
36984 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
36985 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
36986 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
36987 if (x == 0 && y == 0 && z == 0)
36988 return 0;
36989
36990 diff -urNp linux-2.6.32.41/fs/cachefiles/rdwr.c linux-2.6.32.41/fs/cachefiles/rdwr.c
36991 --- linux-2.6.32.41/fs/cachefiles/rdwr.c 2011-03-27 14:31:47.000000000 -0400
36992 +++ linux-2.6.32.41/fs/cachefiles/rdwr.c 2011-04-17 15:56:46.000000000 -0400
36993 @@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache
36994 old_fs = get_fs();
36995 set_fs(KERNEL_DS);
36996 ret = file->f_op->write(
36997 - file, (const void __user *) data, len, &pos);
36998 + file, (__force const void __user *) data, len, &pos);
36999 set_fs(old_fs);
37000 kunmap(page);
37001 if (ret != len)
37002 diff -urNp linux-2.6.32.41/fs/cifs/cifs_debug.c linux-2.6.32.41/fs/cifs/cifs_debug.c
37003 --- linux-2.6.32.41/fs/cifs/cifs_debug.c 2011-03-27 14:31:47.000000000 -0400
37004 +++ linux-2.6.32.41/fs/cifs/cifs_debug.c 2011-05-04 17:56:28.000000000 -0400
37005 @@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(str
37006 tcon = list_entry(tmp3,
37007 struct cifsTconInfo,
37008 tcon_list);
37009 - atomic_set(&tcon->num_smbs_sent, 0);
37010 - atomic_set(&tcon->num_writes, 0);
37011 - atomic_set(&tcon->num_reads, 0);
37012 - atomic_set(&tcon->num_oplock_brks, 0);
37013 - atomic_set(&tcon->num_opens, 0);
37014 - atomic_set(&tcon->num_posixopens, 0);
37015 - atomic_set(&tcon->num_posixmkdirs, 0);
37016 - atomic_set(&tcon->num_closes, 0);
37017 - atomic_set(&tcon->num_deletes, 0);
37018 - atomic_set(&tcon->num_mkdirs, 0);
37019 - atomic_set(&tcon->num_rmdirs, 0);
37020 - atomic_set(&tcon->num_renames, 0);
37021 - atomic_set(&tcon->num_t2renames, 0);
37022 - atomic_set(&tcon->num_ffirst, 0);
37023 - atomic_set(&tcon->num_fnext, 0);
37024 - atomic_set(&tcon->num_fclose, 0);
37025 - atomic_set(&tcon->num_hardlinks, 0);
37026 - atomic_set(&tcon->num_symlinks, 0);
37027 - atomic_set(&tcon->num_locks, 0);
37028 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
37029 + atomic_set_unchecked(&tcon->num_writes, 0);
37030 + atomic_set_unchecked(&tcon->num_reads, 0);
37031 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
37032 + atomic_set_unchecked(&tcon->num_opens, 0);
37033 + atomic_set_unchecked(&tcon->num_posixopens, 0);
37034 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
37035 + atomic_set_unchecked(&tcon->num_closes, 0);
37036 + atomic_set_unchecked(&tcon->num_deletes, 0);
37037 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
37038 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
37039 + atomic_set_unchecked(&tcon->num_renames, 0);
37040 + atomic_set_unchecked(&tcon->num_t2renames, 0);
37041 + atomic_set_unchecked(&tcon->num_ffirst, 0);
37042 + atomic_set_unchecked(&tcon->num_fnext, 0);
37043 + atomic_set_unchecked(&tcon->num_fclose, 0);
37044 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
37045 + atomic_set_unchecked(&tcon->num_symlinks, 0);
37046 + atomic_set_unchecked(&tcon->num_locks, 0);
37047 }
37048 }
37049 }
37050 @@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct s
37051 if (tcon->need_reconnect)
37052 seq_puts(m, "\tDISCONNECTED ");
37053 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
37054 - atomic_read(&tcon->num_smbs_sent),
37055 - atomic_read(&tcon->num_oplock_brks));
37056 + atomic_read_unchecked(&tcon->num_smbs_sent),
37057 + atomic_read_unchecked(&tcon->num_oplock_brks));
37058 seq_printf(m, "\nReads: %d Bytes: %lld",
37059 - atomic_read(&tcon->num_reads),
37060 + atomic_read_unchecked(&tcon->num_reads),
37061 (long long)(tcon->bytes_read));
37062 seq_printf(m, "\nWrites: %d Bytes: %lld",
37063 - atomic_read(&tcon->num_writes),
37064 + atomic_read_unchecked(&tcon->num_writes),
37065 (long long)(tcon->bytes_written));
37066 seq_printf(m, "\nFlushes: %d",
37067 - atomic_read(&tcon->num_flushes));
37068 + atomic_read_unchecked(&tcon->num_flushes));
37069 seq_printf(m, "\nLocks: %d HardLinks: %d "
37070 "Symlinks: %d",
37071 - atomic_read(&tcon->num_locks),
37072 - atomic_read(&tcon->num_hardlinks),
37073 - atomic_read(&tcon->num_symlinks));
37074 + atomic_read_unchecked(&tcon->num_locks),
37075 + atomic_read_unchecked(&tcon->num_hardlinks),
37076 + atomic_read_unchecked(&tcon->num_symlinks));
37077 seq_printf(m, "\nOpens: %d Closes: %d "
37078 "Deletes: %d",
37079 - atomic_read(&tcon->num_opens),
37080 - atomic_read(&tcon->num_closes),
37081 - atomic_read(&tcon->num_deletes));
37082 + atomic_read_unchecked(&tcon->num_opens),
37083 + atomic_read_unchecked(&tcon->num_closes),
37084 + atomic_read_unchecked(&tcon->num_deletes));
37085 seq_printf(m, "\nPosix Opens: %d "
37086 "Posix Mkdirs: %d",
37087 - atomic_read(&tcon->num_posixopens),
37088 - atomic_read(&tcon->num_posixmkdirs));
37089 + atomic_read_unchecked(&tcon->num_posixopens),
37090 + atomic_read_unchecked(&tcon->num_posixmkdirs));
37091 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
37092 - atomic_read(&tcon->num_mkdirs),
37093 - atomic_read(&tcon->num_rmdirs));
37094 + atomic_read_unchecked(&tcon->num_mkdirs),
37095 + atomic_read_unchecked(&tcon->num_rmdirs));
37096 seq_printf(m, "\nRenames: %d T2 Renames %d",
37097 - atomic_read(&tcon->num_renames),
37098 - atomic_read(&tcon->num_t2renames));
37099 + atomic_read_unchecked(&tcon->num_renames),
37100 + atomic_read_unchecked(&tcon->num_t2renames));
37101 seq_printf(m, "\nFindFirst: %d FNext %d "
37102 "FClose %d",
37103 - atomic_read(&tcon->num_ffirst),
37104 - atomic_read(&tcon->num_fnext),
37105 - atomic_read(&tcon->num_fclose));
37106 + atomic_read_unchecked(&tcon->num_ffirst),
37107 + atomic_read_unchecked(&tcon->num_fnext),
37108 + atomic_read_unchecked(&tcon->num_fclose));
37109 }
37110 }
37111 }
37112 diff -urNp linux-2.6.32.41/fs/cifs/cifsglob.h linux-2.6.32.41/fs/cifs/cifsglob.h
37113 --- linux-2.6.32.41/fs/cifs/cifsglob.h 2011-03-27 14:31:47.000000000 -0400
37114 +++ linux-2.6.32.41/fs/cifs/cifsglob.h 2011-05-04 17:56:28.000000000 -0400
37115 @@ -252,28 +252,28 @@ struct cifsTconInfo {
37116 __u16 Flags; /* optional support bits */
37117 enum statusEnum tidStatus;
37118 #ifdef CONFIG_CIFS_STATS
37119 - atomic_t num_smbs_sent;
37120 - atomic_t num_writes;
37121 - atomic_t num_reads;
37122 - atomic_t num_flushes;
37123 - atomic_t num_oplock_brks;
37124 - atomic_t num_opens;
37125 - atomic_t num_closes;
37126 - atomic_t num_deletes;
37127 - atomic_t num_mkdirs;
37128 - atomic_t num_posixopens;
37129 - atomic_t num_posixmkdirs;
37130 - atomic_t num_rmdirs;
37131 - atomic_t num_renames;
37132 - atomic_t num_t2renames;
37133 - atomic_t num_ffirst;
37134 - atomic_t num_fnext;
37135 - atomic_t num_fclose;
37136 - atomic_t num_hardlinks;
37137 - atomic_t num_symlinks;
37138 - atomic_t num_locks;
37139 - atomic_t num_acl_get;
37140 - atomic_t num_acl_set;
37141 + atomic_unchecked_t num_smbs_sent;
37142 + atomic_unchecked_t num_writes;
37143 + atomic_unchecked_t num_reads;
37144 + atomic_unchecked_t num_flushes;
37145 + atomic_unchecked_t num_oplock_brks;
37146 + atomic_unchecked_t num_opens;
37147 + atomic_unchecked_t num_closes;
37148 + atomic_unchecked_t num_deletes;
37149 + atomic_unchecked_t num_mkdirs;
37150 + atomic_unchecked_t num_posixopens;
37151 + atomic_unchecked_t num_posixmkdirs;
37152 + atomic_unchecked_t num_rmdirs;
37153 + atomic_unchecked_t num_renames;
37154 + atomic_unchecked_t num_t2renames;
37155 + atomic_unchecked_t num_ffirst;
37156 + atomic_unchecked_t num_fnext;
37157 + atomic_unchecked_t num_fclose;
37158 + atomic_unchecked_t num_hardlinks;
37159 + atomic_unchecked_t num_symlinks;
37160 + atomic_unchecked_t num_locks;
37161 + atomic_unchecked_t num_acl_get;
37162 + atomic_unchecked_t num_acl_set;
37163 #ifdef CONFIG_CIFS_STATS2
37164 unsigned long long time_writes;
37165 unsigned long long time_reads;
37166 @@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const st
37167 }
37168
37169 #ifdef CONFIG_CIFS_STATS
37170 -#define cifs_stats_inc atomic_inc
37171 +#define cifs_stats_inc atomic_inc_unchecked
37172
37173 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
37174 unsigned int bytes)
37175 diff -urNp linux-2.6.32.41/fs/cifs/link.c linux-2.6.32.41/fs/cifs/link.c
37176 --- linux-2.6.32.41/fs/cifs/link.c 2011-03-27 14:31:47.000000000 -0400
37177 +++ linux-2.6.32.41/fs/cifs/link.c 2011-04-17 15:56:46.000000000 -0400
37178 @@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct
37179
37180 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
37181 {
37182 - char *p = nd_get_link(nd);
37183 + const char *p = nd_get_link(nd);
37184 if (!IS_ERR(p))
37185 kfree(p);
37186 }
37187 diff -urNp linux-2.6.32.41/fs/coda/cache.c linux-2.6.32.41/fs/coda/cache.c
37188 --- linux-2.6.32.41/fs/coda/cache.c 2011-03-27 14:31:47.000000000 -0400
37189 +++ linux-2.6.32.41/fs/coda/cache.c 2011-05-04 17:56:28.000000000 -0400
37190 @@ -24,14 +24,14 @@
37191 #include <linux/coda_fs_i.h>
37192 #include <linux/coda_cache.h>
37193
37194 -static atomic_t permission_epoch = ATOMIC_INIT(0);
37195 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
37196
37197 /* replace or extend an acl cache hit */
37198 void coda_cache_enter(struct inode *inode, int mask)
37199 {
37200 struct coda_inode_info *cii = ITOC(inode);
37201
37202 - cii->c_cached_epoch = atomic_read(&permission_epoch);
37203 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
37204 if (cii->c_uid != current_fsuid()) {
37205 cii->c_uid = current_fsuid();
37206 cii->c_cached_perm = mask;
37207 @@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inod
37208 void coda_cache_clear_inode(struct inode *inode)
37209 {
37210 struct coda_inode_info *cii = ITOC(inode);
37211 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
37212 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
37213 }
37214
37215 /* remove all acl caches */
37216 void coda_cache_clear_all(struct super_block *sb)
37217 {
37218 - atomic_inc(&permission_epoch);
37219 + atomic_inc_unchecked(&permission_epoch);
37220 }
37221
37222
37223 @@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode
37224
37225 hit = (mask & cii->c_cached_perm) == mask &&
37226 cii->c_uid == current_fsuid() &&
37227 - cii->c_cached_epoch == atomic_read(&permission_epoch);
37228 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
37229
37230 return hit;
37231 }
37232 diff -urNp linux-2.6.32.41/fs/compat_binfmt_elf.c linux-2.6.32.41/fs/compat_binfmt_elf.c
37233 --- linux-2.6.32.41/fs/compat_binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
37234 +++ linux-2.6.32.41/fs/compat_binfmt_elf.c 2011-04-17 15:56:46.000000000 -0400
37235 @@ -29,10 +29,12 @@
37236 #undef elfhdr
37237 #undef elf_phdr
37238 #undef elf_note
37239 +#undef elf_dyn
37240 #undef elf_addr_t
37241 #define elfhdr elf32_hdr
37242 #define elf_phdr elf32_phdr
37243 #define elf_note elf32_note
37244 +#define elf_dyn Elf32_Dyn
37245 #define elf_addr_t Elf32_Addr
37246
37247 /*
37248 diff -urNp linux-2.6.32.41/fs/compat.c linux-2.6.32.41/fs/compat.c
37249 --- linux-2.6.32.41/fs/compat.c 2011-04-17 17:00:52.000000000 -0400
37250 +++ linux-2.6.32.41/fs/compat.c 2011-05-16 21:46:57.000000000 -0400
37251 @@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
37252
37253 struct compat_readdir_callback {
37254 struct compat_old_linux_dirent __user *dirent;
37255 + struct file * file;
37256 int result;
37257 };
37258
37259 @@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf
37260 buf->result = -EOVERFLOW;
37261 return -EOVERFLOW;
37262 }
37263 +
37264 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
37265 + return 0;
37266 +
37267 buf->result++;
37268 dirent = buf->dirent;
37269 if (!access_ok(VERIFY_WRITE, dirent,
37270 @@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(u
37271
37272 buf.result = 0;
37273 buf.dirent = dirent;
37274 + buf.file = file;
37275
37276 error = vfs_readdir(file, compat_fillonedir, &buf);
37277 if (buf.result)
37278 @@ -899,6 +905,7 @@ struct compat_linux_dirent {
37279 struct compat_getdents_callback {
37280 struct compat_linux_dirent __user *current_dir;
37281 struct compat_linux_dirent __user *previous;
37282 + struct file * file;
37283 int count;
37284 int error;
37285 };
37286 @@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, c
37287 buf->error = -EOVERFLOW;
37288 return -EOVERFLOW;
37289 }
37290 +
37291 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
37292 + return 0;
37293 +
37294 dirent = buf->previous;
37295 if (dirent) {
37296 if (__put_user(offset, &dirent->d_off))
37297 @@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsi
37298 buf.previous = NULL;
37299 buf.count = count;
37300 buf.error = 0;
37301 + buf.file = file;
37302
37303 error = vfs_readdir(file, compat_filldir, &buf);
37304 if (error >= 0)
37305 @@ -987,6 +999,7 @@ out:
37306 struct compat_getdents_callback64 {
37307 struct linux_dirent64 __user *current_dir;
37308 struct linux_dirent64 __user *previous;
37309 + struct file * file;
37310 int count;
37311 int error;
37312 };
37313 @@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf
37314 buf->error = -EINVAL; /* only used if we fail.. */
37315 if (reclen > buf->count)
37316 return -EINVAL;
37317 +
37318 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
37319 + return 0;
37320 +
37321 dirent = buf->previous;
37322
37323 if (dirent) {
37324 @@ -1054,6 +1071,7 @@ asmlinkage long compat_sys_getdents64(un
37325 buf.previous = NULL;
37326 buf.count = count;
37327 buf.error = 0;
37328 + buf.file = file;
37329
37330 error = vfs_readdir(file, compat_filldir64, &buf);
37331 if (error >= 0)
37332 @@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(in
37333 * verify all the pointers
37334 */
37335 ret = -EINVAL;
37336 - if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
37337 + if (nr_segs > UIO_MAXIOV)
37338 goto out;
37339 if (!file->f_op)
37340 goto out;
37341 @@ -1463,6 +1481,11 @@ int compat_do_execve(char * filename,
37342 compat_uptr_t __user *envp,
37343 struct pt_regs * regs)
37344 {
37345 +#ifdef CONFIG_GRKERNSEC
37346 + struct file *old_exec_file;
37347 + struct acl_subject_label *old_acl;
37348 + struct rlimit old_rlim[RLIM_NLIMITS];
37349 +#endif
37350 struct linux_binprm *bprm;
37351 struct file *file;
37352 struct files_struct *displaced;
37353 @@ -1499,6 +1522,19 @@ int compat_do_execve(char * filename,
37354 bprm->filename = filename;
37355 bprm->interp = filename;
37356
37357 + if (gr_process_user_ban()) {
37358 + retval = -EPERM;
37359 + goto out_file;
37360 + }
37361 +
37362 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
37363 + retval = -EAGAIN;
37364 + if (gr_handle_nproc())
37365 + goto out_file;
37366 + retval = -EACCES;
37367 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
37368 + goto out_file;
37369 +
37370 retval = bprm_mm_init(bprm);
37371 if (retval)
37372 goto out_file;
37373 @@ -1528,9 +1564,40 @@ int compat_do_execve(char * filename,
37374 if (retval < 0)
37375 goto out;
37376
37377 + if (!gr_tpe_allow(file)) {
37378 + retval = -EACCES;
37379 + goto out;
37380 + }
37381 +
37382 + if (gr_check_crash_exec(file)) {
37383 + retval = -EACCES;
37384 + goto out;
37385 + }
37386 +
37387 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
37388 +
37389 + gr_handle_exec_args_compat(bprm, argv);
37390 +
37391 +#ifdef CONFIG_GRKERNSEC
37392 + old_acl = current->acl;
37393 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
37394 + old_exec_file = current->exec_file;
37395 + get_file(file);
37396 + current->exec_file = file;
37397 +#endif
37398 +
37399 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
37400 + bprm->unsafe & LSM_UNSAFE_SHARE);
37401 + if (retval < 0)
37402 + goto out_fail;
37403 +
37404 retval = search_binary_handler(bprm, regs);
37405 if (retval < 0)
37406 - goto out;
37407 + goto out_fail;
37408 +#ifdef CONFIG_GRKERNSEC
37409 + if (old_exec_file)
37410 + fput(old_exec_file);
37411 +#endif
37412
37413 /* execve succeeded */
37414 current->fs->in_exec = 0;
37415 @@ -1541,6 +1608,14 @@ int compat_do_execve(char * filename,
37416 put_files_struct(displaced);
37417 return retval;
37418
37419 +out_fail:
37420 +#ifdef CONFIG_GRKERNSEC
37421 + current->acl = old_acl;
37422 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
37423 + fput(current->exec_file);
37424 + current->exec_file = old_exec_file;
37425 +#endif
37426 +
37427 out:
37428 if (bprm->mm) {
37429 acct_arg_size(bprm, 0);
37430 @@ -1711,6 +1786,8 @@ int compat_core_sys_select(int n, compat
37431 struct fdtable *fdt;
37432 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
37433
37434 + pax_track_stack();
37435 +
37436 if (n < 0)
37437 goto out_nofds;
37438
37439 diff -urNp linux-2.6.32.41/fs/compat_ioctl.c linux-2.6.32.41/fs/compat_ioctl.c
37440 --- linux-2.6.32.41/fs/compat_ioctl.c 2011-03-27 14:31:47.000000000 -0400
37441 +++ linux-2.6.32.41/fs/compat_ioctl.c 2011-04-23 12:56:11.000000000 -0400
37442 @@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsi
37443 up = (struct compat_video_spu_palette __user *) arg;
37444 err = get_user(palp, &up->palette);
37445 err |= get_user(length, &up->length);
37446 + if (err)
37447 + return -EFAULT;
37448
37449 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
37450 err = put_user(compat_ptr(palp), &up_native->palette);
37451 diff -urNp linux-2.6.32.41/fs/configfs/dir.c linux-2.6.32.41/fs/configfs/dir.c
37452 --- linux-2.6.32.41/fs/configfs/dir.c 2011-03-27 14:31:47.000000000 -0400
37453 +++ linux-2.6.32.41/fs/configfs/dir.c 2011-05-11 18:25:15.000000000 -0400
37454 @@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file
37455 }
37456 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
37457 struct configfs_dirent *next;
37458 - const char * name;
37459 + const unsigned char * name;
37460 + char d_name[sizeof(next->s_dentry->d_iname)];
37461 int len;
37462
37463 next = list_entry(p, struct configfs_dirent,
37464 @@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file
37465 continue;
37466
37467 name = configfs_get_name(next);
37468 - len = strlen(name);
37469 + if (next->s_dentry && name == next->s_dentry->d_iname) {
37470 + len = next->s_dentry->d_name.len;
37471 + memcpy(d_name, name, len);
37472 + name = d_name;
37473 + } else
37474 + len = strlen(name);
37475 if (next->s_dentry)
37476 ino = next->s_dentry->d_inode->i_ino;
37477 else
37478 diff -urNp linux-2.6.32.41/fs/dcache.c linux-2.6.32.41/fs/dcache.c
37479 --- linux-2.6.32.41/fs/dcache.c 2011-03-27 14:31:47.000000000 -0400
37480 +++ linux-2.6.32.41/fs/dcache.c 2011-04-23 13:32:21.000000000 -0400
37481 @@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
37482
37483 static struct kmem_cache *dentry_cache __read_mostly;
37484
37485 -#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
37486 -
37487 /*
37488 * This is the single most critical data structure when it comes
37489 * to the dcache: the hashtable for lookups. Somebody should try
37490 @@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned lon
37491 mempages -= reserve;
37492
37493 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
37494 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
37495 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
37496
37497 dcache_init();
37498 inode_init();
37499 diff -urNp linux-2.6.32.41/fs/dlm/lockspace.c linux-2.6.32.41/fs/dlm/lockspace.c
37500 --- linux-2.6.32.41/fs/dlm/lockspace.c 2011-03-27 14:31:47.000000000 -0400
37501 +++ linux-2.6.32.41/fs/dlm/lockspace.c 2011-04-17 15:56:46.000000000 -0400
37502 @@ -148,7 +148,7 @@ static void lockspace_kobj_release(struc
37503 kfree(ls);
37504 }
37505
37506 -static struct sysfs_ops dlm_attr_ops = {
37507 +static const struct sysfs_ops dlm_attr_ops = {
37508 .show = dlm_attr_show,
37509 .store = dlm_attr_store,
37510 };
37511 diff -urNp linux-2.6.32.41/fs/ecryptfs/inode.c linux-2.6.32.41/fs/ecryptfs/inode.c
37512 --- linux-2.6.32.41/fs/ecryptfs/inode.c 2011-03-27 14:31:47.000000000 -0400
37513 +++ linux-2.6.32.41/fs/ecryptfs/inode.c 2011-04-17 15:56:46.000000000 -0400
37514 @@ -660,7 +660,7 @@ static int ecryptfs_readlink_lower(struc
37515 old_fs = get_fs();
37516 set_fs(get_ds());
37517 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
37518 - (char __user *)lower_buf,
37519 + (__force char __user *)lower_buf,
37520 lower_bufsiz);
37521 set_fs(old_fs);
37522 if (rc < 0)
37523 @@ -706,7 +706,7 @@ static void *ecryptfs_follow_link(struct
37524 }
37525 old_fs = get_fs();
37526 set_fs(get_ds());
37527 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
37528 + rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
37529 set_fs(old_fs);
37530 if (rc < 0)
37531 goto out_free;
37532 diff -urNp linux-2.6.32.41/fs/exec.c linux-2.6.32.41/fs/exec.c
37533 --- linux-2.6.32.41/fs/exec.c 2011-04-17 17:00:52.000000000 -0400
37534 +++ linux-2.6.32.41/fs/exec.c 2011-06-04 20:41:36.000000000 -0400
37535 @@ -56,12 +56,24 @@
37536 #include <linux/fsnotify.h>
37537 #include <linux/fs_struct.h>
37538 #include <linux/pipe_fs_i.h>
37539 +#include <linux/random.h>
37540 +#include <linux/seq_file.h>
37541 +
37542 +#ifdef CONFIG_PAX_REFCOUNT
37543 +#include <linux/kallsyms.h>
37544 +#include <linux/kdebug.h>
37545 +#endif
37546
37547 #include <asm/uaccess.h>
37548 #include <asm/mmu_context.h>
37549 #include <asm/tlb.h>
37550 #include "internal.h"
37551
37552 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
37553 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
37554 +EXPORT_SYMBOL(pax_set_initial_flags_func);
37555 +#endif
37556 +
37557 int core_uses_pid;
37558 char core_pattern[CORENAME_MAX_SIZE] = "core";
37559 unsigned int core_pipe_limit;
37560 @@ -115,7 +127,7 @@ SYSCALL_DEFINE1(uselib, const char __use
37561 goto out;
37562
37563 file = do_filp_open(AT_FDCWD, tmp,
37564 - O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
37565 + O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
37566 MAY_READ | MAY_EXEC | MAY_OPEN);
37567 putname(tmp);
37568 error = PTR_ERR(file);
37569 @@ -178,18 +190,10 @@ struct page *get_arg_page(struct linux_b
37570 int write)
37571 {
37572 struct page *page;
37573 - int ret;
37574
37575 -#ifdef CONFIG_STACK_GROWSUP
37576 - if (write) {
37577 - ret = expand_stack_downwards(bprm->vma, pos);
37578 - if (ret < 0)
37579 - return NULL;
37580 - }
37581 -#endif
37582 - ret = get_user_pages(current, bprm->mm, pos,
37583 - 1, write, 1, &page, NULL);
37584 - if (ret <= 0)
37585 + if (0 > expand_stack_downwards(bprm->vma, pos))
37586 + return NULL;
37587 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
37588 return NULL;
37589
37590 if (write) {
37591 @@ -263,6 +267,11 @@ static int __bprm_mm_init(struct linux_b
37592 vma->vm_end = STACK_TOP_MAX;
37593 vma->vm_start = vma->vm_end - PAGE_SIZE;
37594 vma->vm_flags = VM_STACK_FLAGS;
37595 +
37596 +#ifdef CONFIG_PAX_SEGMEXEC
37597 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
37598 +#endif
37599 +
37600 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
37601
37602 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
37603 @@ -276,6 +285,12 @@ static int __bprm_mm_init(struct linux_b
37604 mm->stack_vm = mm->total_vm = 1;
37605 up_write(&mm->mmap_sem);
37606 bprm->p = vma->vm_end - sizeof(void *);
37607 +
37608 +#ifdef CONFIG_PAX_RANDUSTACK
37609 + if (randomize_va_space)
37610 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
37611 +#endif
37612 +
37613 return 0;
37614 err:
37615 up_write(&mm->mmap_sem);
37616 @@ -510,7 +525,7 @@ int copy_strings_kernel(int argc,char **
37617 int r;
37618 mm_segment_t oldfs = get_fs();
37619 set_fs(KERNEL_DS);
37620 - r = copy_strings(argc, (char __user * __user *)argv, bprm);
37621 + r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
37622 set_fs(oldfs);
37623 return r;
37624 }
37625 @@ -540,7 +555,8 @@ static int shift_arg_pages(struct vm_are
37626 unsigned long new_end = old_end - shift;
37627 struct mmu_gather *tlb;
37628
37629 - BUG_ON(new_start > new_end);
37630 + if (new_start >= new_end || new_start < mmap_min_addr)
37631 + return -ENOMEM;
37632
37633 /*
37634 * ensure there are no vmas between where we want to go
37635 @@ -549,6 +565,10 @@ static int shift_arg_pages(struct vm_are
37636 if (vma != find_vma(mm, new_start))
37637 return -EFAULT;
37638
37639 +#ifdef CONFIG_PAX_SEGMEXEC
37640 + BUG_ON(pax_find_mirror_vma(vma));
37641 +#endif
37642 +
37643 /*
37644 * cover the whole range: [new_start, old_end)
37645 */
37646 @@ -630,10 +650,6 @@ int setup_arg_pages(struct linux_binprm
37647 stack_top = arch_align_stack(stack_top);
37648 stack_top = PAGE_ALIGN(stack_top);
37649
37650 - if (unlikely(stack_top < mmap_min_addr) ||
37651 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
37652 - return -ENOMEM;
37653 -
37654 stack_shift = vma->vm_end - stack_top;
37655
37656 bprm->p -= stack_shift;
37657 @@ -645,6 +661,14 @@ int setup_arg_pages(struct linux_binprm
37658 bprm->exec -= stack_shift;
37659
37660 down_write(&mm->mmap_sem);
37661 +
37662 + /* Move stack pages down in memory. */
37663 + if (stack_shift) {
37664 + ret = shift_arg_pages(vma, stack_shift);
37665 + if (ret)
37666 + goto out_unlock;
37667 + }
37668 +
37669 vm_flags = VM_STACK_FLAGS;
37670
37671 /*
37672 @@ -658,19 +682,24 @@ int setup_arg_pages(struct linux_binprm
37673 vm_flags &= ~VM_EXEC;
37674 vm_flags |= mm->def_flags;
37675
37676 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
37677 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
37678 + vm_flags &= ~VM_EXEC;
37679 +
37680 +#ifdef CONFIG_PAX_MPROTECT
37681 + if (mm->pax_flags & MF_PAX_MPROTECT)
37682 + vm_flags &= ~VM_MAYEXEC;
37683 +#endif
37684 +
37685 + }
37686 +#endif
37687 +
37688 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
37689 vm_flags);
37690 if (ret)
37691 goto out_unlock;
37692 BUG_ON(prev != vma);
37693
37694 - /* Move stack pages down in memory. */
37695 - if (stack_shift) {
37696 - ret = shift_arg_pages(vma, stack_shift);
37697 - if (ret)
37698 - goto out_unlock;
37699 - }
37700 -
37701 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
37702 stack_size = vma->vm_end - vma->vm_start;
37703 /*
37704 @@ -707,7 +736,7 @@ struct file *open_exec(const char *name)
37705 int err;
37706
37707 file = do_filp_open(AT_FDCWD, name,
37708 - O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
37709 + O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
37710 MAY_EXEC | MAY_OPEN);
37711 if (IS_ERR(file))
37712 goto out;
37713 @@ -744,7 +773,7 @@ int kernel_read(struct file *file, loff_
37714 old_fs = get_fs();
37715 set_fs(get_ds());
37716 /* The cast to a user pointer is valid due to the set_fs() */
37717 - result = vfs_read(file, (void __user *)addr, count, &pos);
37718 + result = vfs_read(file, (__force void __user *)addr, count, &pos);
37719 set_fs(old_fs);
37720 return result;
37721 }
37722 @@ -1151,7 +1180,7 @@ int check_unsafe_exec(struct linux_binpr
37723 }
37724 rcu_read_unlock();
37725
37726 - if (p->fs->users > n_fs) {
37727 + if (atomic_read(&p->fs->users) > n_fs) {
37728 bprm->unsafe |= LSM_UNSAFE_SHARE;
37729 } else {
37730 res = -EAGAIN;
37731 @@ -1350,6 +1379,11 @@ int do_execve(char * filename,
37732 char __user *__user *envp,
37733 struct pt_regs * regs)
37734 {
37735 +#ifdef CONFIG_GRKERNSEC
37736 + struct file *old_exec_file;
37737 + struct acl_subject_label *old_acl;
37738 + struct rlimit old_rlim[RLIM_NLIMITS];
37739 +#endif
37740 struct linux_binprm *bprm;
37741 struct file *file;
37742 struct files_struct *displaced;
37743 @@ -1386,6 +1420,23 @@ int do_execve(char * filename,
37744 bprm->filename = filename;
37745 bprm->interp = filename;
37746
37747 + if (gr_process_user_ban()) {
37748 + retval = -EPERM;
37749 + goto out_file;
37750 + }
37751 +
37752 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
37753 +
37754 + if (gr_handle_nproc()) {
37755 + retval = -EAGAIN;
37756 + goto out_file;
37757 + }
37758 +
37759 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
37760 + retval = -EACCES;
37761 + goto out_file;
37762 + }
37763 +
37764 retval = bprm_mm_init(bprm);
37765 if (retval)
37766 goto out_file;
37767 @@ -1415,10 +1466,41 @@ int do_execve(char * filename,
37768 if (retval < 0)
37769 goto out;
37770
37771 + if (!gr_tpe_allow(file)) {
37772 + retval = -EACCES;
37773 + goto out;
37774 + }
37775 +
37776 + if (gr_check_crash_exec(file)) {
37777 + retval = -EACCES;
37778 + goto out;
37779 + }
37780 +
37781 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
37782 +
37783 + gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
37784 +
37785 +#ifdef CONFIG_GRKERNSEC
37786 + old_acl = current->acl;
37787 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
37788 + old_exec_file = current->exec_file;
37789 + get_file(file);
37790 + current->exec_file = file;
37791 +#endif
37792 +
37793 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
37794 + bprm->unsafe & LSM_UNSAFE_SHARE);
37795 + if (retval < 0)
37796 + goto out_fail;
37797 +
37798 current->flags &= ~PF_KTHREAD;
37799 retval = search_binary_handler(bprm,regs);
37800 if (retval < 0)
37801 - goto out;
37802 + goto out_fail;
37803 +#ifdef CONFIG_GRKERNSEC
37804 + if (old_exec_file)
37805 + fput(old_exec_file);
37806 +#endif
37807
37808 /* execve succeeded */
37809 current->fs->in_exec = 0;
37810 @@ -1429,6 +1511,14 @@ int do_execve(char * filename,
37811 put_files_struct(displaced);
37812 return retval;
37813
37814 +out_fail:
37815 +#ifdef CONFIG_GRKERNSEC
37816 + current->acl = old_acl;
37817 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
37818 + fput(current->exec_file);
37819 + current->exec_file = old_exec_file;
37820 +#endif
37821 +
37822 out:
37823 if (bprm->mm) {
37824 acct_arg_size(bprm, 0);
37825 @@ -1594,6 +1684,220 @@ out:
37826 return ispipe;
37827 }
37828
37829 +int pax_check_flags(unsigned long *flags)
37830 +{
37831 + int retval = 0;
37832 +
37833 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
37834 + if (*flags & MF_PAX_SEGMEXEC)
37835 + {
37836 + *flags &= ~MF_PAX_SEGMEXEC;
37837 + retval = -EINVAL;
37838 + }
37839 +#endif
37840 +
37841 + if ((*flags & MF_PAX_PAGEEXEC)
37842 +
37843 +#ifdef CONFIG_PAX_PAGEEXEC
37844 + && (*flags & MF_PAX_SEGMEXEC)
37845 +#endif
37846 +
37847 + )
37848 + {
37849 + *flags &= ~MF_PAX_PAGEEXEC;
37850 + retval = -EINVAL;
37851 + }
37852 +
37853 + if ((*flags & MF_PAX_MPROTECT)
37854 +
37855 +#ifdef CONFIG_PAX_MPROTECT
37856 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
37857 +#endif
37858 +
37859 + )
37860 + {
37861 + *flags &= ~MF_PAX_MPROTECT;
37862 + retval = -EINVAL;
37863 + }
37864 +
37865 + if ((*flags & MF_PAX_EMUTRAMP)
37866 +
37867 +#ifdef CONFIG_PAX_EMUTRAMP
37868 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
37869 +#endif
37870 +
37871 + )
37872 + {
37873 + *flags &= ~MF_PAX_EMUTRAMP;
37874 + retval = -EINVAL;
37875 + }
37876 +
37877 + return retval;
37878 +}
37879 +
37880 +EXPORT_SYMBOL(pax_check_flags);
37881 +
37882 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
37883 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
37884 +{
37885 + struct task_struct *tsk = current;
37886 + struct mm_struct *mm = current->mm;
37887 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
37888 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
37889 + char *path_exec = NULL;
37890 + char *path_fault = NULL;
37891 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
37892 +
37893 + if (buffer_exec && buffer_fault) {
37894 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
37895 +
37896 + down_read(&mm->mmap_sem);
37897 + vma = mm->mmap;
37898 + while (vma && (!vma_exec || !vma_fault)) {
37899 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
37900 + vma_exec = vma;
37901 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
37902 + vma_fault = vma;
37903 + vma = vma->vm_next;
37904 + }
37905 + if (vma_exec) {
37906 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
37907 + if (IS_ERR(path_exec))
37908 + path_exec = "<path too long>";
37909 + else {
37910 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
37911 + if (path_exec) {
37912 + *path_exec = 0;
37913 + path_exec = buffer_exec;
37914 + } else
37915 + path_exec = "<path too long>";
37916 + }
37917 + }
37918 + if (vma_fault) {
37919 + start = vma_fault->vm_start;
37920 + end = vma_fault->vm_end;
37921 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
37922 + if (vma_fault->vm_file) {
37923 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
37924 + if (IS_ERR(path_fault))
37925 + path_fault = "<path too long>";
37926 + else {
37927 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
37928 + if (path_fault) {
37929 + *path_fault = 0;
37930 + path_fault = buffer_fault;
37931 + } else
37932 + path_fault = "<path too long>";
37933 + }
37934 + } else
37935 + path_fault = "<anonymous mapping>";
37936 + }
37937 + up_read(&mm->mmap_sem);
37938 + }
37939 + if (tsk->signal->curr_ip)
37940 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
37941 + else
37942 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
37943 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
37944 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
37945 + task_uid(tsk), task_euid(tsk), pc, sp);
37946 + free_page((unsigned long)buffer_exec);
37947 + free_page((unsigned long)buffer_fault);
37948 + pax_report_insns(pc, sp);
37949 + do_coredump(SIGKILL, SIGKILL, regs);
37950 +}
37951 +#endif
37952 +
37953 +#ifdef CONFIG_PAX_REFCOUNT
37954 +void pax_report_refcount_overflow(struct pt_regs *regs)
37955 +{
37956 + if (current->signal->curr_ip)
37957 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
37958 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
37959 + else
37960 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
37961 + current->comm, task_pid_nr(current), current_uid(), current_euid());
37962 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
37963 + show_regs(regs);
37964 + force_sig_specific(SIGKILL, current);
37965 +}
37966 +#endif
37967 +
37968 +#ifdef CONFIG_PAX_USERCOPY
37969 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
37970 +int object_is_on_stack(const void *obj, unsigned long len)
37971 +{
37972 + const void * const stack = task_stack_page(current);
37973 + const void * const stackend = stack + THREAD_SIZE;
37974 +
37975 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
37976 + const void *frame = NULL;
37977 + const void *oldframe;
37978 +#endif
37979 +
37980 + if (obj + len < obj)
37981 + return -1;
37982 +
37983 + if (obj + len <= stack || stackend <= obj)
37984 + return 0;
37985 +
37986 + if (obj < stack || stackend < obj + len)
37987 + return -1;
37988 +
37989 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
37990 + oldframe = __builtin_frame_address(1);
37991 + if (oldframe)
37992 + frame = __builtin_frame_address(2);
37993 + /*
37994 + low ----------------------------------------------> high
37995 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
37996 + ^----------------^
37997 + allow copies only within here
37998 + */
37999 + while (stack <= frame && frame < stackend) {
38000 + /* if obj + len extends past the last frame, this
38001 + check won't pass and the next frame will be 0,
38002 + causing us to bail out and correctly report
38003 + the copy as invalid
38004 + */
38005 + if (obj + len <= frame)
38006 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
38007 + oldframe = frame;
38008 + frame = *(const void * const *)frame;
38009 + }
38010 + return -1;
38011 +#else
38012 + return 1;
38013 +#endif
38014 +}
38015 +
38016 +
38017 +void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
38018 +{
38019 + if (current->signal->curr_ip)
38020 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
38021 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
38022 + else
38023 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
38024 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
38025 +
38026 + dump_stack();
38027 + gr_handle_kernel_exploit();
38028 + do_group_exit(SIGKILL);
38029 +}
38030 +#endif
38031 +
38032 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
38033 +void pax_track_stack(void)
38034 +{
38035 + unsigned long sp = (unsigned long)&sp;
38036 + if (sp < current_thread_info()->lowest_stack &&
38037 + sp > (unsigned long)task_stack_page(current))
38038 + current_thread_info()->lowest_stack = sp;
38039 +}
38040 +EXPORT_SYMBOL(pax_track_stack);
38041 +#endif
38042 +
38043 static int zap_process(struct task_struct *start)
38044 {
38045 struct task_struct *t;
38046 @@ -1796,17 +2100,17 @@ static void wait_for_dump_helpers(struct
38047 pipe = file->f_path.dentry->d_inode->i_pipe;
38048
38049 pipe_lock(pipe);
38050 - pipe->readers++;
38051 - pipe->writers--;
38052 + atomic_inc(&pipe->readers);
38053 + atomic_dec(&pipe->writers);
38054
38055 - while ((pipe->readers > 1) && (!signal_pending(current))) {
38056 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
38057 wake_up_interruptible_sync(&pipe->wait);
38058 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
38059 pipe_wait(pipe);
38060 }
38061
38062 - pipe->readers--;
38063 - pipe->writers++;
38064 + atomic_dec(&pipe->readers);
38065 + atomic_inc(&pipe->writers);
38066 pipe_unlock(pipe);
38067
38068 }
38069 @@ -1829,10 +2133,13 @@ void do_coredump(long signr, int exit_co
38070 char **helper_argv = NULL;
38071 int helper_argc = 0;
38072 int dump_count = 0;
38073 - static atomic_t core_dump_count = ATOMIC_INIT(0);
38074 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
38075
38076 audit_core_dumps(signr);
38077
38078 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
38079 + gr_handle_brute_attach(current, mm->flags);
38080 +
38081 binfmt = mm->binfmt;
38082 if (!binfmt || !binfmt->core_dump)
38083 goto fail;
38084 @@ -1877,6 +2184,8 @@ void do_coredump(long signr, int exit_co
38085 */
38086 clear_thread_flag(TIF_SIGPENDING);
38087
38088 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
38089 +
38090 /*
38091 * lock_kernel() because format_corename() is controlled by sysctl, which
38092 * uses lock_kernel()
38093 @@ -1911,7 +2220,7 @@ void do_coredump(long signr, int exit_co
38094 goto fail_unlock;
38095 }
38096
38097 - dump_count = atomic_inc_return(&core_dump_count);
38098 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
38099 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
38100 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
38101 task_tgid_vnr(current), current->comm);
38102 @@ -1975,7 +2284,7 @@ close_fail:
38103 filp_close(file, NULL);
38104 fail_dropcount:
38105 if (dump_count)
38106 - atomic_dec(&core_dump_count);
38107 + atomic_dec_unchecked(&core_dump_count);
38108 fail_unlock:
38109 if (helper_argv)
38110 argv_free(helper_argv);
38111 diff -urNp linux-2.6.32.41/fs/ext2/balloc.c linux-2.6.32.41/fs/ext2/balloc.c
38112 --- linux-2.6.32.41/fs/ext2/balloc.c 2011-03-27 14:31:47.000000000 -0400
38113 +++ linux-2.6.32.41/fs/ext2/balloc.c 2011-04-17 15:56:46.000000000 -0400
38114 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
38115
38116 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
38117 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
38118 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
38119 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
38120 sbi->s_resuid != current_fsuid() &&
38121 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
38122 return 0;
38123 diff -urNp linux-2.6.32.41/fs/ext3/balloc.c linux-2.6.32.41/fs/ext3/balloc.c
38124 --- linux-2.6.32.41/fs/ext3/balloc.c 2011-03-27 14:31:47.000000000 -0400
38125 +++ linux-2.6.32.41/fs/ext3/balloc.c 2011-04-17 15:56:46.000000000 -0400
38126 @@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct e
38127
38128 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
38129 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
38130 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
38131 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
38132 sbi->s_resuid != current_fsuid() &&
38133 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
38134 return 0;
38135 diff -urNp linux-2.6.32.41/fs/ext4/balloc.c linux-2.6.32.41/fs/ext4/balloc.c
38136 --- linux-2.6.32.41/fs/ext4/balloc.c 2011-03-27 14:31:47.000000000 -0400
38137 +++ linux-2.6.32.41/fs/ext4/balloc.c 2011-04-17 15:56:46.000000000 -0400
38138 @@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_
38139 /* Hm, nope. Are (enough) root reserved blocks available? */
38140 if (sbi->s_resuid == current_fsuid() ||
38141 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
38142 - capable(CAP_SYS_RESOURCE)) {
38143 + capable_nolog(CAP_SYS_RESOURCE)) {
38144 if (free_blocks >= (nblocks + dirty_blocks))
38145 return 1;
38146 }
38147 diff -urNp linux-2.6.32.41/fs/ext4/ext4.h linux-2.6.32.41/fs/ext4/ext4.h
38148 --- linux-2.6.32.41/fs/ext4/ext4.h 2011-03-27 14:31:47.000000000 -0400
38149 +++ linux-2.6.32.41/fs/ext4/ext4.h 2011-04-17 15:56:46.000000000 -0400
38150 @@ -1078,19 +1078,19 @@ struct ext4_sb_info {
38151
38152 /* stats for buddy allocator */
38153 spinlock_t s_mb_pa_lock;
38154 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
38155 - atomic_t s_bal_success; /* we found long enough chunks */
38156 - atomic_t s_bal_allocated; /* in blocks */
38157 - atomic_t s_bal_ex_scanned; /* total extents scanned */
38158 - atomic_t s_bal_goals; /* goal hits */
38159 - atomic_t s_bal_breaks; /* too long searches */
38160 - atomic_t s_bal_2orders; /* 2^order hits */
38161 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
38162 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
38163 + atomic_unchecked_t s_bal_allocated; /* in blocks */
38164 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
38165 + atomic_unchecked_t s_bal_goals; /* goal hits */
38166 + atomic_unchecked_t s_bal_breaks; /* too long searches */
38167 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
38168 spinlock_t s_bal_lock;
38169 unsigned long s_mb_buddies_generated;
38170 unsigned long long s_mb_generation_time;
38171 - atomic_t s_mb_lost_chunks;
38172 - atomic_t s_mb_preallocated;
38173 - atomic_t s_mb_discarded;
38174 + atomic_unchecked_t s_mb_lost_chunks;
38175 + atomic_unchecked_t s_mb_preallocated;
38176 + atomic_unchecked_t s_mb_discarded;
38177 atomic_t s_lock_busy;
38178
38179 /* locality groups */
38180 diff -urNp linux-2.6.32.41/fs/ext4/mballoc.c linux-2.6.32.41/fs/ext4/mballoc.c
38181 --- linux-2.6.32.41/fs/ext4/mballoc.c 2011-03-27 14:31:47.000000000 -0400
38182 +++ linux-2.6.32.41/fs/ext4/mballoc.c 2011-05-16 21:46:57.000000000 -0400
38183 @@ -1753,7 +1753,7 @@ void ext4_mb_simple_scan_group(struct ex
38184 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
38185
38186 if (EXT4_SB(sb)->s_mb_stats)
38187 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
38188 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
38189
38190 break;
38191 }
38192 @@ -2129,7 +2129,7 @@ repeat:
38193 ac->ac_status = AC_STATUS_CONTINUE;
38194 ac->ac_flags |= EXT4_MB_HINT_FIRST;
38195 cr = 3;
38196 - atomic_inc(&sbi->s_mb_lost_chunks);
38197 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
38198 goto repeat;
38199 }
38200 }
38201 @@ -2172,6 +2172,8 @@ static int ext4_mb_seq_groups_show(struc
38202 ext4_grpblk_t counters[16];
38203 } sg;
38204
38205 + pax_track_stack();
38206 +
38207 group--;
38208 if (group == 0)
38209 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
38210 @@ -2532,25 +2534,25 @@ int ext4_mb_release(struct super_block *
38211 if (sbi->s_mb_stats) {
38212 printk(KERN_INFO
38213 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
38214 - atomic_read(&sbi->s_bal_allocated),
38215 - atomic_read(&sbi->s_bal_reqs),
38216 - atomic_read(&sbi->s_bal_success));
38217 + atomic_read_unchecked(&sbi->s_bal_allocated),
38218 + atomic_read_unchecked(&sbi->s_bal_reqs),
38219 + atomic_read_unchecked(&sbi->s_bal_success));
38220 printk(KERN_INFO
38221 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
38222 "%u 2^N hits, %u breaks, %u lost\n",
38223 - atomic_read(&sbi->s_bal_ex_scanned),
38224 - atomic_read(&sbi->s_bal_goals),
38225 - atomic_read(&sbi->s_bal_2orders),
38226 - atomic_read(&sbi->s_bal_breaks),
38227 - atomic_read(&sbi->s_mb_lost_chunks));
38228 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
38229 + atomic_read_unchecked(&sbi->s_bal_goals),
38230 + atomic_read_unchecked(&sbi->s_bal_2orders),
38231 + atomic_read_unchecked(&sbi->s_bal_breaks),
38232 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
38233 printk(KERN_INFO
38234 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
38235 sbi->s_mb_buddies_generated++,
38236 sbi->s_mb_generation_time);
38237 printk(KERN_INFO
38238 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
38239 - atomic_read(&sbi->s_mb_preallocated),
38240 - atomic_read(&sbi->s_mb_discarded));
38241 + atomic_read_unchecked(&sbi->s_mb_preallocated),
38242 + atomic_read_unchecked(&sbi->s_mb_discarded));
38243 }
38244
38245 free_percpu(sbi->s_locality_groups);
38246 @@ -3032,16 +3034,16 @@ static void ext4_mb_collect_stats(struct
38247 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
38248
38249 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
38250 - atomic_inc(&sbi->s_bal_reqs);
38251 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
38252 + atomic_inc_unchecked(&sbi->s_bal_reqs);
38253 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
38254 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
38255 - atomic_inc(&sbi->s_bal_success);
38256 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
38257 + atomic_inc_unchecked(&sbi->s_bal_success);
38258 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
38259 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
38260 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
38261 - atomic_inc(&sbi->s_bal_goals);
38262 + atomic_inc_unchecked(&sbi->s_bal_goals);
38263 if (ac->ac_found > sbi->s_mb_max_to_scan)
38264 - atomic_inc(&sbi->s_bal_breaks);
38265 + atomic_inc_unchecked(&sbi->s_bal_breaks);
38266 }
38267
38268 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
38269 @@ -3441,7 +3443,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
38270 trace_ext4_mb_new_inode_pa(ac, pa);
38271
38272 ext4_mb_use_inode_pa(ac, pa);
38273 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
38274 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
38275
38276 ei = EXT4_I(ac->ac_inode);
38277 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
38278 @@ -3501,7 +3503,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
38279 trace_ext4_mb_new_group_pa(ac, pa);
38280
38281 ext4_mb_use_group_pa(ac, pa);
38282 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
38283 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
38284
38285 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
38286 lg = ac->ac_lg;
38287 @@ -3605,7 +3607,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
38288 * from the bitmap and continue.
38289 */
38290 }
38291 - atomic_add(free, &sbi->s_mb_discarded);
38292 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
38293
38294 return err;
38295 }
38296 @@ -3624,7 +3626,7 @@ ext4_mb_release_group_pa(struct ext4_bud
38297 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
38298 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
38299 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
38300 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
38301 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
38302
38303 if (ac) {
38304 ac->ac_sb = sb;
38305 diff -urNp linux-2.6.32.41/fs/ext4/super.c linux-2.6.32.41/fs/ext4/super.c
38306 --- linux-2.6.32.41/fs/ext4/super.c 2011-03-27 14:31:47.000000000 -0400
38307 +++ linux-2.6.32.41/fs/ext4/super.c 2011-04-17 15:56:46.000000000 -0400
38308 @@ -2287,7 +2287,7 @@ static void ext4_sb_release(struct kobje
38309 }
38310
38311
38312 -static struct sysfs_ops ext4_attr_ops = {
38313 +static const struct sysfs_ops ext4_attr_ops = {
38314 .show = ext4_attr_show,
38315 .store = ext4_attr_store,
38316 };
38317 diff -urNp linux-2.6.32.41/fs/fcntl.c linux-2.6.32.41/fs/fcntl.c
38318 --- linux-2.6.32.41/fs/fcntl.c 2011-03-27 14:31:47.000000000 -0400
38319 +++ linux-2.6.32.41/fs/fcntl.c 2011-04-17 15:56:46.000000000 -0400
38320 @@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct
38321 if (err)
38322 return err;
38323
38324 + if (gr_handle_chroot_fowner(pid, type))
38325 + return -ENOENT;
38326 + if (gr_check_protected_task_fowner(pid, type))
38327 + return -EACCES;
38328 +
38329 f_modown(filp, pid, type, force);
38330 return 0;
38331 }
38332 @@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned in
38333 switch (cmd) {
38334 case F_DUPFD:
38335 case F_DUPFD_CLOEXEC:
38336 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
38337 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
38338 break;
38339 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
38340 diff -urNp linux-2.6.32.41/fs/fifo.c linux-2.6.32.41/fs/fifo.c
38341 --- linux-2.6.32.41/fs/fifo.c 2011-03-27 14:31:47.000000000 -0400
38342 +++ linux-2.6.32.41/fs/fifo.c 2011-04-17 15:56:46.000000000 -0400
38343 @@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode
38344 */
38345 filp->f_op = &read_pipefifo_fops;
38346 pipe->r_counter++;
38347 - if (pipe->readers++ == 0)
38348 + if (atomic_inc_return(&pipe->readers) == 1)
38349 wake_up_partner(inode);
38350
38351 - if (!pipe->writers) {
38352 + if (!atomic_read(&pipe->writers)) {
38353 if ((filp->f_flags & O_NONBLOCK)) {
38354 /* suppress POLLHUP until we have
38355 * seen a writer */
38356 @@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode
38357 * errno=ENXIO when there is no process reading the FIFO.
38358 */
38359 ret = -ENXIO;
38360 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
38361 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
38362 goto err;
38363
38364 filp->f_op = &write_pipefifo_fops;
38365 pipe->w_counter++;
38366 - if (!pipe->writers++)
38367 + if (atomic_inc_return(&pipe->writers) == 1)
38368 wake_up_partner(inode);
38369
38370 - if (!pipe->readers) {
38371 + if (!atomic_read(&pipe->readers)) {
38372 wait_for_partner(inode, &pipe->r_counter);
38373 if (signal_pending(current))
38374 goto err_wr;
38375 @@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode
38376 */
38377 filp->f_op = &rdwr_pipefifo_fops;
38378
38379 - pipe->readers++;
38380 - pipe->writers++;
38381 + atomic_inc(&pipe->readers);
38382 + atomic_inc(&pipe->writers);
38383 pipe->r_counter++;
38384 pipe->w_counter++;
38385 - if (pipe->readers == 1 || pipe->writers == 1)
38386 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
38387 wake_up_partner(inode);
38388 break;
38389
38390 @@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode
38391 return 0;
38392
38393 err_rd:
38394 - if (!--pipe->readers)
38395 + if (atomic_dec_and_test(&pipe->readers))
38396 wake_up_interruptible(&pipe->wait);
38397 ret = -ERESTARTSYS;
38398 goto err;
38399
38400 err_wr:
38401 - if (!--pipe->writers)
38402 + if (atomic_dec_and_test(&pipe->writers))
38403 wake_up_interruptible(&pipe->wait);
38404 ret = -ERESTARTSYS;
38405 goto err;
38406
38407 err:
38408 - if (!pipe->readers && !pipe->writers)
38409 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
38410 free_pipe_info(inode);
38411
38412 err_nocleanup:
38413 diff -urNp linux-2.6.32.41/fs/file.c linux-2.6.32.41/fs/file.c
38414 --- linux-2.6.32.41/fs/file.c 2011-03-27 14:31:47.000000000 -0400
38415 +++ linux-2.6.32.41/fs/file.c 2011-04-17 15:56:46.000000000 -0400
38416 @@ -14,6 +14,7 @@
38417 #include <linux/slab.h>
38418 #include <linux/vmalloc.h>
38419 #include <linux/file.h>
38420 +#include <linux/security.h>
38421 #include <linux/fdtable.h>
38422 #include <linux/bitops.h>
38423 #include <linux/interrupt.h>
38424 @@ -257,6 +258,8 @@ int expand_files(struct files_struct *fi
38425 * N.B. For clone tasks sharing a files structure, this test
38426 * will limit the total number of files that can be opened.
38427 */
38428 +
38429 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
38430 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
38431 return -EMFILE;
38432
38433 diff -urNp linux-2.6.32.41/fs/filesystems.c linux-2.6.32.41/fs/filesystems.c
38434 --- linux-2.6.32.41/fs/filesystems.c 2011-03-27 14:31:47.000000000 -0400
38435 +++ linux-2.6.32.41/fs/filesystems.c 2011-04-17 15:56:46.000000000 -0400
38436 @@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(con
38437 int len = dot ? dot - name : strlen(name);
38438
38439 fs = __get_fs_type(name, len);
38440 +
38441 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
38442 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
38443 +#else
38444 if (!fs && (request_module("%.*s", len, name) == 0))
38445 +#endif
38446 fs = __get_fs_type(name, len);
38447
38448 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
38449 diff -urNp linux-2.6.32.41/fs/fscache/cookie.c linux-2.6.32.41/fs/fscache/cookie.c
38450 --- linux-2.6.32.41/fs/fscache/cookie.c 2011-03-27 14:31:47.000000000 -0400
38451 +++ linux-2.6.32.41/fs/fscache/cookie.c 2011-05-04 17:56:28.000000000 -0400
38452 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
38453 parent ? (char *) parent->def->name : "<no-parent>",
38454 def->name, netfs_data);
38455
38456 - fscache_stat(&fscache_n_acquires);
38457 + fscache_stat_unchecked(&fscache_n_acquires);
38458
38459 /* if there's no parent cookie, then we don't create one here either */
38460 if (!parent) {
38461 - fscache_stat(&fscache_n_acquires_null);
38462 + fscache_stat_unchecked(&fscache_n_acquires_null);
38463 _leave(" [no parent]");
38464 return NULL;
38465 }
38466 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
38467 /* allocate and initialise a cookie */
38468 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
38469 if (!cookie) {
38470 - fscache_stat(&fscache_n_acquires_oom);
38471 + fscache_stat_unchecked(&fscache_n_acquires_oom);
38472 _leave(" [ENOMEM]");
38473 return NULL;
38474 }
38475 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
38476
38477 switch (cookie->def->type) {
38478 case FSCACHE_COOKIE_TYPE_INDEX:
38479 - fscache_stat(&fscache_n_cookie_index);
38480 + fscache_stat_unchecked(&fscache_n_cookie_index);
38481 break;
38482 case FSCACHE_COOKIE_TYPE_DATAFILE:
38483 - fscache_stat(&fscache_n_cookie_data);
38484 + fscache_stat_unchecked(&fscache_n_cookie_data);
38485 break;
38486 default:
38487 - fscache_stat(&fscache_n_cookie_special);
38488 + fscache_stat_unchecked(&fscache_n_cookie_special);
38489 break;
38490 }
38491
38492 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
38493 if (fscache_acquire_non_index_cookie(cookie) < 0) {
38494 atomic_dec(&parent->n_children);
38495 __fscache_cookie_put(cookie);
38496 - fscache_stat(&fscache_n_acquires_nobufs);
38497 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
38498 _leave(" = NULL");
38499 return NULL;
38500 }
38501 }
38502
38503 - fscache_stat(&fscache_n_acquires_ok);
38504 + fscache_stat_unchecked(&fscache_n_acquires_ok);
38505 _leave(" = %p", cookie);
38506 return cookie;
38507 }
38508 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
38509 cache = fscache_select_cache_for_object(cookie->parent);
38510 if (!cache) {
38511 up_read(&fscache_addremove_sem);
38512 - fscache_stat(&fscache_n_acquires_no_cache);
38513 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
38514 _leave(" = -ENOMEDIUM [no cache]");
38515 return -ENOMEDIUM;
38516 }
38517 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
38518 object = cache->ops->alloc_object(cache, cookie);
38519 fscache_stat_d(&fscache_n_cop_alloc_object);
38520 if (IS_ERR(object)) {
38521 - fscache_stat(&fscache_n_object_no_alloc);
38522 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
38523 ret = PTR_ERR(object);
38524 goto error;
38525 }
38526
38527 - fscache_stat(&fscache_n_object_alloc);
38528 + fscache_stat_unchecked(&fscache_n_object_alloc);
38529
38530 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
38531
38532 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
38533 struct fscache_object *object;
38534 struct hlist_node *_p;
38535
38536 - fscache_stat(&fscache_n_updates);
38537 + fscache_stat_unchecked(&fscache_n_updates);
38538
38539 if (!cookie) {
38540 - fscache_stat(&fscache_n_updates_null);
38541 + fscache_stat_unchecked(&fscache_n_updates_null);
38542 _leave(" [no cookie]");
38543 return;
38544 }
38545 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
38546 struct fscache_object *object;
38547 unsigned long event;
38548
38549 - fscache_stat(&fscache_n_relinquishes);
38550 + fscache_stat_unchecked(&fscache_n_relinquishes);
38551 if (retire)
38552 - fscache_stat(&fscache_n_relinquishes_retire);
38553 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
38554
38555 if (!cookie) {
38556 - fscache_stat(&fscache_n_relinquishes_null);
38557 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
38558 _leave(" [no cookie]");
38559 return;
38560 }
38561 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
38562
38563 /* wait for the cookie to finish being instantiated (or to fail) */
38564 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
38565 - fscache_stat(&fscache_n_relinquishes_waitcrt);
38566 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
38567 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
38568 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
38569 }
38570 diff -urNp linux-2.6.32.41/fs/fscache/internal.h linux-2.6.32.41/fs/fscache/internal.h
38571 --- linux-2.6.32.41/fs/fscache/internal.h 2011-03-27 14:31:47.000000000 -0400
38572 +++ linux-2.6.32.41/fs/fscache/internal.h 2011-05-04 17:56:28.000000000 -0400
38573 @@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
38574 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
38575 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
38576
38577 -extern atomic_t fscache_n_op_pend;
38578 -extern atomic_t fscache_n_op_run;
38579 -extern atomic_t fscache_n_op_enqueue;
38580 -extern atomic_t fscache_n_op_deferred_release;
38581 -extern atomic_t fscache_n_op_release;
38582 -extern atomic_t fscache_n_op_gc;
38583 -extern atomic_t fscache_n_op_cancelled;
38584 -extern atomic_t fscache_n_op_rejected;
38585 -
38586 -extern atomic_t fscache_n_attr_changed;
38587 -extern atomic_t fscache_n_attr_changed_ok;
38588 -extern atomic_t fscache_n_attr_changed_nobufs;
38589 -extern atomic_t fscache_n_attr_changed_nomem;
38590 -extern atomic_t fscache_n_attr_changed_calls;
38591 -
38592 -extern atomic_t fscache_n_allocs;
38593 -extern atomic_t fscache_n_allocs_ok;
38594 -extern atomic_t fscache_n_allocs_wait;
38595 -extern atomic_t fscache_n_allocs_nobufs;
38596 -extern atomic_t fscache_n_allocs_intr;
38597 -extern atomic_t fscache_n_allocs_object_dead;
38598 -extern atomic_t fscache_n_alloc_ops;
38599 -extern atomic_t fscache_n_alloc_op_waits;
38600 -
38601 -extern atomic_t fscache_n_retrievals;
38602 -extern atomic_t fscache_n_retrievals_ok;
38603 -extern atomic_t fscache_n_retrievals_wait;
38604 -extern atomic_t fscache_n_retrievals_nodata;
38605 -extern atomic_t fscache_n_retrievals_nobufs;
38606 -extern atomic_t fscache_n_retrievals_intr;
38607 -extern atomic_t fscache_n_retrievals_nomem;
38608 -extern atomic_t fscache_n_retrievals_object_dead;
38609 -extern atomic_t fscache_n_retrieval_ops;
38610 -extern atomic_t fscache_n_retrieval_op_waits;
38611 -
38612 -extern atomic_t fscache_n_stores;
38613 -extern atomic_t fscache_n_stores_ok;
38614 -extern atomic_t fscache_n_stores_again;
38615 -extern atomic_t fscache_n_stores_nobufs;
38616 -extern atomic_t fscache_n_stores_oom;
38617 -extern atomic_t fscache_n_store_ops;
38618 -extern atomic_t fscache_n_store_calls;
38619 -extern atomic_t fscache_n_store_pages;
38620 -extern atomic_t fscache_n_store_radix_deletes;
38621 -extern atomic_t fscache_n_store_pages_over_limit;
38622 -
38623 -extern atomic_t fscache_n_store_vmscan_not_storing;
38624 -extern atomic_t fscache_n_store_vmscan_gone;
38625 -extern atomic_t fscache_n_store_vmscan_busy;
38626 -extern atomic_t fscache_n_store_vmscan_cancelled;
38627 -
38628 -extern atomic_t fscache_n_marks;
38629 -extern atomic_t fscache_n_uncaches;
38630 -
38631 -extern atomic_t fscache_n_acquires;
38632 -extern atomic_t fscache_n_acquires_null;
38633 -extern atomic_t fscache_n_acquires_no_cache;
38634 -extern atomic_t fscache_n_acquires_ok;
38635 -extern atomic_t fscache_n_acquires_nobufs;
38636 -extern atomic_t fscache_n_acquires_oom;
38637 -
38638 -extern atomic_t fscache_n_updates;
38639 -extern atomic_t fscache_n_updates_null;
38640 -extern atomic_t fscache_n_updates_run;
38641 -
38642 -extern atomic_t fscache_n_relinquishes;
38643 -extern atomic_t fscache_n_relinquishes_null;
38644 -extern atomic_t fscache_n_relinquishes_waitcrt;
38645 -extern atomic_t fscache_n_relinquishes_retire;
38646 -
38647 -extern atomic_t fscache_n_cookie_index;
38648 -extern atomic_t fscache_n_cookie_data;
38649 -extern atomic_t fscache_n_cookie_special;
38650 -
38651 -extern atomic_t fscache_n_object_alloc;
38652 -extern atomic_t fscache_n_object_no_alloc;
38653 -extern atomic_t fscache_n_object_lookups;
38654 -extern atomic_t fscache_n_object_lookups_negative;
38655 -extern atomic_t fscache_n_object_lookups_positive;
38656 -extern atomic_t fscache_n_object_lookups_timed_out;
38657 -extern atomic_t fscache_n_object_created;
38658 -extern atomic_t fscache_n_object_avail;
38659 -extern atomic_t fscache_n_object_dead;
38660 -
38661 -extern atomic_t fscache_n_checkaux_none;
38662 -extern atomic_t fscache_n_checkaux_okay;
38663 -extern atomic_t fscache_n_checkaux_update;
38664 -extern atomic_t fscache_n_checkaux_obsolete;
38665 +extern atomic_unchecked_t fscache_n_op_pend;
38666 +extern atomic_unchecked_t fscache_n_op_run;
38667 +extern atomic_unchecked_t fscache_n_op_enqueue;
38668 +extern atomic_unchecked_t fscache_n_op_deferred_release;
38669 +extern atomic_unchecked_t fscache_n_op_release;
38670 +extern atomic_unchecked_t fscache_n_op_gc;
38671 +extern atomic_unchecked_t fscache_n_op_cancelled;
38672 +extern atomic_unchecked_t fscache_n_op_rejected;
38673 +
38674 +extern atomic_unchecked_t fscache_n_attr_changed;
38675 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
38676 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
38677 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
38678 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
38679 +
38680 +extern atomic_unchecked_t fscache_n_allocs;
38681 +extern atomic_unchecked_t fscache_n_allocs_ok;
38682 +extern atomic_unchecked_t fscache_n_allocs_wait;
38683 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
38684 +extern atomic_unchecked_t fscache_n_allocs_intr;
38685 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
38686 +extern atomic_unchecked_t fscache_n_alloc_ops;
38687 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
38688 +
38689 +extern atomic_unchecked_t fscache_n_retrievals;
38690 +extern atomic_unchecked_t fscache_n_retrievals_ok;
38691 +extern atomic_unchecked_t fscache_n_retrievals_wait;
38692 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
38693 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
38694 +extern atomic_unchecked_t fscache_n_retrievals_intr;
38695 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
38696 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
38697 +extern atomic_unchecked_t fscache_n_retrieval_ops;
38698 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
38699 +
38700 +extern atomic_unchecked_t fscache_n_stores;
38701 +extern atomic_unchecked_t fscache_n_stores_ok;
38702 +extern atomic_unchecked_t fscache_n_stores_again;
38703 +extern atomic_unchecked_t fscache_n_stores_nobufs;
38704 +extern atomic_unchecked_t fscache_n_stores_oom;
38705 +extern atomic_unchecked_t fscache_n_store_ops;
38706 +extern atomic_unchecked_t fscache_n_store_calls;
38707 +extern atomic_unchecked_t fscache_n_store_pages;
38708 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
38709 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
38710 +
38711 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
38712 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
38713 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
38714 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
38715 +
38716 +extern atomic_unchecked_t fscache_n_marks;
38717 +extern atomic_unchecked_t fscache_n_uncaches;
38718 +
38719 +extern atomic_unchecked_t fscache_n_acquires;
38720 +extern atomic_unchecked_t fscache_n_acquires_null;
38721 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
38722 +extern atomic_unchecked_t fscache_n_acquires_ok;
38723 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
38724 +extern atomic_unchecked_t fscache_n_acquires_oom;
38725 +
38726 +extern atomic_unchecked_t fscache_n_updates;
38727 +extern atomic_unchecked_t fscache_n_updates_null;
38728 +extern atomic_unchecked_t fscache_n_updates_run;
38729 +
38730 +extern atomic_unchecked_t fscache_n_relinquishes;
38731 +extern atomic_unchecked_t fscache_n_relinquishes_null;
38732 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
38733 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
38734 +
38735 +extern atomic_unchecked_t fscache_n_cookie_index;
38736 +extern atomic_unchecked_t fscache_n_cookie_data;
38737 +extern atomic_unchecked_t fscache_n_cookie_special;
38738 +
38739 +extern atomic_unchecked_t fscache_n_object_alloc;
38740 +extern atomic_unchecked_t fscache_n_object_no_alloc;
38741 +extern atomic_unchecked_t fscache_n_object_lookups;
38742 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
38743 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
38744 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
38745 +extern atomic_unchecked_t fscache_n_object_created;
38746 +extern atomic_unchecked_t fscache_n_object_avail;
38747 +extern atomic_unchecked_t fscache_n_object_dead;
38748 +
38749 +extern atomic_unchecked_t fscache_n_checkaux_none;
38750 +extern atomic_unchecked_t fscache_n_checkaux_okay;
38751 +extern atomic_unchecked_t fscache_n_checkaux_update;
38752 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
38753
38754 extern atomic_t fscache_n_cop_alloc_object;
38755 extern atomic_t fscache_n_cop_lookup_object;
38756 @@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t
38757 atomic_inc(stat);
38758 }
38759
38760 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
38761 +{
38762 + atomic_inc_unchecked(stat);
38763 +}
38764 +
38765 static inline void fscache_stat_d(atomic_t *stat)
38766 {
38767 atomic_dec(stat);
38768 @@ -259,6 +264,7 @@ extern const struct file_operations fsca
38769
38770 #define __fscache_stat(stat) (NULL)
38771 #define fscache_stat(stat) do {} while (0)
38772 +#define fscache_stat_unchecked(stat) do {} while (0)
38773 #define fscache_stat_d(stat) do {} while (0)
38774 #endif
38775
38776 diff -urNp linux-2.6.32.41/fs/fscache/object.c linux-2.6.32.41/fs/fscache/object.c
38777 --- linux-2.6.32.41/fs/fscache/object.c 2011-03-27 14:31:47.000000000 -0400
38778 +++ linux-2.6.32.41/fs/fscache/object.c 2011-05-04 17:56:28.000000000 -0400
38779 @@ -144,7 +144,7 @@ static void fscache_object_state_machine
38780 /* update the object metadata on disk */
38781 case FSCACHE_OBJECT_UPDATING:
38782 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
38783 - fscache_stat(&fscache_n_updates_run);
38784 + fscache_stat_unchecked(&fscache_n_updates_run);
38785 fscache_stat(&fscache_n_cop_update_object);
38786 object->cache->ops->update_object(object);
38787 fscache_stat_d(&fscache_n_cop_update_object);
38788 @@ -233,7 +233,7 @@ static void fscache_object_state_machine
38789 spin_lock(&object->lock);
38790 object->state = FSCACHE_OBJECT_DEAD;
38791 spin_unlock(&object->lock);
38792 - fscache_stat(&fscache_n_object_dead);
38793 + fscache_stat_unchecked(&fscache_n_object_dead);
38794 goto terminal_transit;
38795
38796 /* handle the parent cache of this object being withdrawn from
38797 @@ -248,7 +248,7 @@ static void fscache_object_state_machine
38798 spin_lock(&object->lock);
38799 object->state = FSCACHE_OBJECT_DEAD;
38800 spin_unlock(&object->lock);
38801 - fscache_stat(&fscache_n_object_dead);
38802 + fscache_stat_unchecked(&fscache_n_object_dead);
38803 goto terminal_transit;
38804
38805 /* complain about the object being woken up once it is
38806 @@ -492,7 +492,7 @@ static void fscache_lookup_object(struct
38807 parent->cookie->def->name, cookie->def->name,
38808 object->cache->tag->name);
38809
38810 - fscache_stat(&fscache_n_object_lookups);
38811 + fscache_stat_unchecked(&fscache_n_object_lookups);
38812 fscache_stat(&fscache_n_cop_lookup_object);
38813 ret = object->cache->ops->lookup_object(object);
38814 fscache_stat_d(&fscache_n_cop_lookup_object);
38815 @@ -503,7 +503,7 @@ static void fscache_lookup_object(struct
38816 if (ret == -ETIMEDOUT) {
38817 /* probably stuck behind another object, so move this one to
38818 * the back of the queue */
38819 - fscache_stat(&fscache_n_object_lookups_timed_out);
38820 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
38821 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
38822 }
38823
38824 @@ -526,7 +526,7 @@ void fscache_object_lookup_negative(stru
38825
38826 spin_lock(&object->lock);
38827 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
38828 - fscache_stat(&fscache_n_object_lookups_negative);
38829 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
38830
38831 /* transit here to allow write requests to begin stacking up
38832 * and read requests to begin returning ENODATA */
38833 @@ -572,7 +572,7 @@ void fscache_obtained_object(struct fsca
38834 * result, in which case there may be data available */
38835 spin_lock(&object->lock);
38836 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
38837 - fscache_stat(&fscache_n_object_lookups_positive);
38838 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
38839
38840 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
38841
38842 @@ -586,7 +586,7 @@ void fscache_obtained_object(struct fsca
38843 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
38844 } else {
38845 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
38846 - fscache_stat(&fscache_n_object_created);
38847 + fscache_stat_unchecked(&fscache_n_object_created);
38848
38849 object->state = FSCACHE_OBJECT_AVAILABLE;
38850 spin_unlock(&object->lock);
38851 @@ -633,7 +633,7 @@ static void fscache_object_available(str
38852 fscache_enqueue_dependents(object);
38853
38854 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
38855 - fscache_stat(&fscache_n_object_avail);
38856 + fscache_stat_unchecked(&fscache_n_object_avail);
38857
38858 _leave("");
38859 }
38860 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
38861 enum fscache_checkaux result;
38862
38863 if (!object->cookie->def->check_aux) {
38864 - fscache_stat(&fscache_n_checkaux_none);
38865 + fscache_stat_unchecked(&fscache_n_checkaux_none);
38866 return FSCACHE_CHECKAUX_OKAY;
38867 }
38868
38869 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
38870 switch (result) {
38871 /* entry okay as is */
38872 case FSCACHE_CHECKAUX_OKAY:
38873 - fscache_stat(&fscache_n_checkaux_okay);
38874 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
38875 break;
38876
38877 /* entry requires update */
38878 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
38879 - fscache_stat(&fscache_n_checkaux_update);
38880 + fscache_stat_unchecked(&fscache_n_checkaux_update);
38881 break;
38882
38883 /* entry requires deletion */
38884 case FSCACHE_CHECKAUX_OBSOLETE:
38885 - fscache_stat(&fscache_n_checkaux_obsolete);
38886 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
38887 break;
38888
38889 default:
38890 diff -urNp linux-2.6.32.41/fs/fscache/operation.c linux-2.6.32.41/fs/fscache/operation.c
38891 --- linux-2.6.32.41/fs/fscache/operation.c 2011-03-27 14:31:47.000000000 -0400
38892 +++ linux-2.6.32.41/fs/fscache/operation.c 2011-05-04 17:56:28.000000000 -0400
38893 @@ -16,7 +16,7 @@
38894 #include <linux/seq_file.h>
38895 #include "internal.h"
38896
38897 -atomic_t fscache_op_debug_id;
38898 +atomic_unchecked_t fscache_op_debug_id;
38899 EXPORT_SYMBOL(fscache_op_debug_id);
38900
38901 /**
38902 @@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fs
38903 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
38904 ASSERTCMP(atomic_read(&op->usage), >, 0);
38905
38906 - fscache_stat(&fscache_n_op_enqueue);
38907 + fscache_stat_unchecked(&fscache_n_op_enqueue);
38908 switch (op->flags & FSCACHE_OP_TYPE) {
38909 case FSCACHE_OP_FAST:
38910 _debug("queue fast");
38911 @@ -76,7 +76,7 @@ static void fscache_run_op(struct fscach
38912 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
38913 if (op->processor)
38914 fscache_enqueue_operation(op);
38915 - fscache_stat(&fscache_n_op_run);
38916 + fscache_stat_unchecked(&fscache_n_op_run);
38917 }
38918
38919 /*
38920 @@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct f
38921 if (object->n_ops > 0) {
38922 atomic_inc(&op->usage);
38923 list_add_tail(&op->pend_link, &object->pending_ops);
38924 - fscache_stat(&fscache_n_op_pend);
38925 + fscache_stat_unchecked(&fscache_n_op_pend);
38926 } else if (!list_empty(&object->pending_ops)) {
38927 atomic_inc(&op->usage);
38928 list_add_tail(&op->pend_link, &object->pending_ops);
38929 - fscache_stat(&fscache_n_op_pend);
38930 + fscache_stat_unchecked(&fscache_n_op_pend);
38931 fscache_start_operations(object);
38932 } else {
38933 ASSERTCMP(object->n_in_progress, ==, 0);
38934 @@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct f
38935 object->n_exclusive++; /* reads and writes must wait */
38936 atomic_inc(&op->usage);
38937 list_add_tail(&op->pend_link, &object->pending_ops);
38938 - fscache_stat(&fscache_n_op_pend);
38939 + fscache_stat_unchecked(&fscache_n_op_pend);
38940 ret = 0;
38941 } else {
38942 /* not allowed to submit ops in any other state */
38943 @@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_obj
38944 if (object->n_exclusive > 0) {
38945 atomic_inc(&op->usage);
38946 list_add_tail(&op->pend_link, &object->pending_ops);
38947 - fscache_stat(&fscache_n_op_pend);
38948 + fscache_stat_unchecked(&fscache_n_op_pend);
38949 } else if (!list_empty(&object->pending_ops)) {
38950 atomic_inc(&op->usage);
38951 list_add_tail(&op->pend_link, &object->pending_ops);
38952 - fscache_stat(&fscache_n_op_pend);
38953 + fscache_stat_unchecked(&fscache_n_op_pend);
38954 fscache_start_operations(object);
38955 } else {
38956 ASSERTCMP(object->n_exclusive, ==, 0);
38957 @@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_obj
38958 object->n_ops++;
38959 atomic_inc(&op->usage);
38960 list_add_tail(&op->pend_link, &object->pending_ops);
38961 - fscache_stat(&fscache_n_op_pend);
38962 + fscache_stat_unchecked(&fscache_n_op_pend);
38963 ret = 0;
38964 } else if (object->state == FSCACHE_OBJECT_DYING ||
38965 object->state == FSCACHE_OBJECT_LC_DYING ||
38966 object->state == FSCACHE_OBJECT_WITHDRAWING) {
38967 - fscache_stat(&fscache_n_op_rejected);
38968 + fscache_stat_unchecked(&fscache_n_op_rejected);
38969 ret = -ENOBUFS;
38970 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
38971 fscache_report_unexpected_submission(object, op, ostate);
38972 @@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_ope
38973
38974 ret = -EBUSY;
38975 if (!list_empty(&op->pend_link)) {
38976 - fscache_stat(&fscache_n_op_cancelled);
38977 + fscache_stat_unchecked(&fscache_n_op_cancelled);
38978 list_del_init(&op->pend_link);
38979 object->n_ops--;
38980 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
38981 @@ -344,7 +344,7 @@ void fscache_put_operation(struct fscach
38982 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
38983 BUG();
38984
38985 - fscache_stat(&fscache_n_op_release);
38986 + fscache_stat_unchecked(&fscache_n_op_release);
38987
38988 if (op->release) {
38989 op->release(op);
38990 @@ -361,7 +361,7 @@ void fscache_put_operation(struct fscach
38991 * lock, and defer it otherwise */
38992 if (!spin_trylock(&object->lock)) {
38993 _debug("defer put");
38994 - fscache_stat(&fscache_n_op_deferred_release);
38995 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
38996
38997 cache = object->cache;
38998 spin_lock(&cache->op_gc_list_lock);
38999 @@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_st
39000
39001 _debug("GC DEFERRED REL OBJ%x OP%x",
39002 object->debug_id, op->debug_id);
39003 - fscache_stat(&fscache_n_op_gc);
39004 + fscache_stat_unchecked(&fscache_n_op_gc);
39005
39006 ASSERTCMP(atomic_read(&op->usage), ==, 0);
39007
39008 diff -urNp linux-2.6.32.41/fs/fscache/page.c linux-2.6.32.41/fs/fscache/page.c
39009 --- linux-2.6.32.41/fs/fscache/page.c 2011-03-27 14:31:47.000000000 -0400
39010 +++ linux-2.6.32.41/fs/fscache/page.c 2011-05-04 17:56:28.000000000 -0400
39011 @@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct
39012 val = radix_tree_lookup(&cookie->stores, page->index);
39013 if (!val) {
39014 rcu_read_unlock();
39015 - fscache_stat(&fscache_n_store_vmscan_not_storing);
39016 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
39017 __fscache_uncache_page(cookie, page);
39018 return true;
39019 }
39020 @@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct
39021 spin_unlock(&cookie->stores_lock);
39022
39023 if (xpage) {
39024 - fscache_stat(&fscache_n_store_vmscan_cancelled);
39025 - fscache_stat(&fscache_n_store_radix_deletes);
39026 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
39027 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
39028 ASSERTCMP(xpage, ==, page);
39029 } else {
39030 - fscache_stat(&fscache_n_store_vmscan_gone);
39031 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
39032 }
39033
39034 wake_up_bit(&cookie->flags, 0);
39035 @@ -106,7 +106,7 @@ page_busy:
39036 /* we might want to wait here, but that could deadlock the allocator as
39037 * the slow-work threads writing to the cache may all end up sleeping
39038 * on memory allocation */
39039 - fscache_stat(&fscache_n_store_vmscan_busy);
39040 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
39041 return false;
39042 }
39043 EXPORT_SYMBOL(__fscache_maybe_release_page);
39044 @@ -130,7 +130,7 @@ static void fscache_end_page_write(struc
39045 FSCACHE_COOKIE_STORING_TAG);
39046 if (!radix_tree_tag_get(&cookie->stores, page->index,
39047 FSCACHE_COOKIE_PENDING_TAG)) {
39048 - fscache_stat(&fscache_n_store_radix_deletes);
39049 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
39050 xpage = radix_tree_delete(&cookie->stores, page->index);
39051 }
39052 spin_unlock(&cookie->stores_lock);
39053 @@ -151,7 +151,7 @@ static void fscache_attr_changed_op(stru
39054
39055 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
39056
39057 - fscache_stat(&fscache_n_attr_changed_calls);
39058 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
39059
39060 if (fscache_object_is_active(object)) {
39061 fscache_set_op_state(op, "CallFS");
39062 @@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscach
39063
39064 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
39065
39066 - fscache_stat(&fscache_n_attr_changed);
39067 + fscache_stat_unchecked(&fscache_n_attr_changed);
39068
39069 op = kzalloc(sizeof(*op), GFP_KERNEL);
39070 if (!op) {
39071 - fscache_stat(&fscache_n_attr_changed_nomem);
39072 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
39073 _leave(" = -ENOMEM");
39074 return -ENOMEM;
39075 }
39076 @@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscach
39077 if (fscache_submit_exclusive_op(object, op) < 0)
39078 goto nobufs;
39079 spin_unlock(&cookie->lock);
39080 - fscache_stat(&fscache_n_attr_changed_ok);
39081 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
39082 fscache_put_operation(op);
39083 _leave(" = 0");
39084 return 0;
39085 @@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscach
39086 nobufs:
39087 spin_unlock(&cookie->lock);
39088 kfree(op);
39089 - fscache_stat(&fscache_n_attr_changed_nobufs);
39090 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
39091 _leave(" = %d", -ENOBUFS);
39092 return -ENOBUFS;
39093 }
39094 @@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache
39095 /* allocate a retrieval operation and attempt to submit it */
39096 op = kzalloc(sizeof(*op), GFP_NOIO);
39097 if (!op) {
39098 - fscache_stat(&fscache_n_retrievals_nomem);
39099 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
39100 return NULL;
39101 }
39102
39103 @@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_loo
39104 return 0;
39105 }
39106
39107 - fscache_stat(&fscache_n_retrievals_wait);
39108 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
39109
39110 jif = jiffies;
39111 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
39112 fscache_wait_bit_interruptible,
39113 TASK_INTERRUPTIBLE) != 0) {
39114 - fscache_stat(&fscache_n_retrievals_intr);
39115 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
39116 _leave(" = -ERESTARTSYS");
39117 return -ERESTARTSYS;
39118 }
39119 @@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_loo
39120 */
39121 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
39122 struct fscache_retrieval *op,
39123 - atomic_t *stat_op_waits,
39124 - atomic_t *stat_object_dead)
39125 + atomic_unchecked_t *stat_op_waits,
39126 + atomic_unchecked_t *stat_object_dead)
39127 {
39128 int ret;
39129
39130 @@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_ac
39131 goto check_if_dead;
39132
39133 _debug(">>> WT");
39134 - fscache_stat(stat_op_waits);
39135 + fscache_stat_unchecked(stat_op_waits);
39136 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
39137 fscache_wait_bit_interruptible,
39138 TASK_INTERRUPTIBLE) < 0) {
39139 @@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_ac
39140
39141 check_if_dead:
39142 if (unlikely(fscache_object_is_dead(object))) {
39143 - fscache_stat(stat_object_dead);
39144 + fscache_stat_unchecked(stat_object_dead);
39145 return -ENOBUFS;
39146 }
39147 return 0;
39148 @@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct
39149
39150 _enter("%p,%p,,,", cookie, page);
39151
39152 - fscache_stat(&fscache_n_retrievals);
39153 + fscache_stat_unchecked(&fscache_n_retrievals);
39154
39155 if (hlist_empty(&cookie->backing_objects))
39156 goto nobufs;
39157 @@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct
39158 goto nobufs_unlock;
39159 spin_unlock(&cookie->lock);
39160
39161 - fscache_stat(&fscache_n_retrieval_ops);
39162 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
39163
39164 /* pin the netfs read context in case we need to do the actual netfs
39165 * read because we've encountered a cache read failure */
39166 @@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct
39167
39168 error:
39169 if (ret == -ENOMEM)
39170 - fscache_stat(&fscache_n_retrievals_nomem);
39171 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
39172 else if (ret == -ERESTARTSYS)
39173 - fscache_stat(&fscache_n_retrievals_intr);
39174 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
39175 else if (ret == -ENODATA)
39176 - fscache_stat(&fscache_n_retrievals_nodata);
39177 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
39178 else if (ret < 0)
39179 - fscache_stat(&fscache_n_retrievals_nobufs);
39180 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
39181 else
39182 - fscache_stat(&fscache_n_retrievals_ok);
39183 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
39184
39185 fscache_put_retrieval(op);
39186 _leave(" = %d", ret);
39187 @@ -453,7 +453,7 @@ nobufs_unlock:
39188 spin_unlock(&cookie->lock);
39189 kfree(op);
39190 nobufs:
39191 - fscache_stat(&fscache_n_retrievals_nobufs);
39192 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
39193 _leave(" = -ENOBUFS");
39194 return -ENOBUFS;
39195 }
39196 @@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct
39197
39198 _enter("%p,,%d,,,", cookie, *nr_pages);
39199
39200 - fscache_stat(&fscache_n_retrievals);
39201 + fscache_stat_unchecked(&fscache_n_retrievals);
39202
39203 if (hlist_empty(&cookie->backing_objects))
39204 goto nobufs;
39205 @@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct
39206 goto nobufs_unlock;
39207 spin_unlock(&cookie->lock);
39208
39209 - fscache_stat(&fscache_n_retrieval_ops);
39210 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
39211
39212 /* pin the netfs read context in case we need to do the actual netfs
39213 * read because we've encountered a cache read failure */
39214 @@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct
39215
39216 error:
39217 if (ret == -ENOMEM)
39218 - fscache_stat(&fscache_n_retrievals_nomem);
39219 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
39220 else if (ret == -ERESTARTSYS)
39221 - fscache_stat(&fscache_n_retrievals_intr);
39222 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
39223 else if (ret == -ENODATA)
39224 - fscache_stat(&fscache_n_retrievals_nodata);
39225 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
39226 else if (ret < 0)
39227 - fscache_stat(&fscache_n_retrievals_nobufs);
39228 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
39229 else
39230 - fscache_stat(&fscache_n_retrievals_ok);
39231 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
39232
39233 fscache_put_retrieval(op);
39234 _leave(" = %d", ret);
39235 @@ -570,7 +570,7 @@ nobufs_unlock:
39236 spin_unlock(&cookie->lock);
39237 kfree(op);
39238 nobufs:
39239 - fscache_stat(&fscache_n_retrievals_nobufs);
39240 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
39241 _leave(" = -ENOBUFS");
39242 return -ENOBUFS;
39243 }
39244 @@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_
39245
39246 _enter("%p,%p,,,", cookie, page);
39247
39248 - fscache_stat(&fscache_n_allocs);
39249 + fscache_stat_unchecked(&fscache_n_allocs);
39250
39251 if (hlist_empty(&cookie->backing_objects))
39252 goto nobufs;
39253 @@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_
39254 goto nobufs_unlock;
39255 spin_unlock(&cookie->lock);
39256
39257 - fscache_stat(&fscache_n_alloc_ops);
39258 + fscache_stat_unchecked(&fscache_n_alloc_ops);
39259
39260 ret = fscache_wait_for_retrieval_activation(
39261 object, op,
39262 @@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_
39263
39264 error:
39265 if (ret == -ERESTARTSYS)
39266 - fscache_stat(&fscache_n_allocs_intr);
39267 + fscache_stat_unchecked(&fscache_n_allocs_intr);
39268 else if (ret < 0)
39269 - fscache_stat(&fscache_n_allocs_nobufs);
39270 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
39271 else
39272 - fscache_stat(&fscache_n_allocs_ok);
39273 + fscache_stat_unchecked(&fscache_n_allocs_ok);
39274
39275 fscache_put_retrieval(op);
39276 _leave(" = %d", ret);
39277 @@ -651,7 +651,7 @@ nobufs_unlock:
39278 spin_unlock(&cookie->lock);
39279 kfree(op);
39280 nobufs:
39281 - fscache_stat(&fscache_n_allocs_nobufs);
39282 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
39283 _leave(" = -ENOBUFS");
39284 return -ENOBUFS;
39285 }
39286 @@ -694,7 +694,7 @@ static void fscache_write_op(struct fsca
39287
39288 spin_lock(&cookie->stores_lock);
39289
39290 - fscache_stat(&fscache_n_store_calls);
39291 + fscache_stat_unchecked(&fscache_n_store_calls);
39292
39293 /* find a page to store */
39294 page = NULL;
39295 @@ -705,7 +705,7 @@ static void fscache_write_op(struct fsca
39296 page = results[0];
39297 _debug("gang %d [%lx]", n, page->index);
39298 if (page->index > op->store_limit) {
39299 - fscache_stat(&fscache_n_store_pages_over_limit);
39300 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
39301 goto superseded;
39302 }
39303
39304 @@ -721,7 +721,7 @@ static void fscache_write_op(struct fsca
39305
39306 if (page) {
39307 fscache_set_op_state(&op->op, "Store");
39308 - fscache_stat(&fscache_n_store_pages);
39309 + fscache_stat_unchecked(&fscache_n_store_pages);
39310 fscache_stat(&fscache_n_cop_write_page);
39311 ret = object->cache->ops->write_page(op, page);
39312 fscache_stat_d(&fscache_n_cop_write_page);
39313 @@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_
39314 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
39315 ASSERT(PageFsCache(page));
39316
39317 - fscache_stat(&fscache_n_stores);
39318 + fscache_stat_unchecked(&fscache_n_stores);
39319
39320 op = kzalloc(sizeof(*op), GFP_NOIO);
39321 if (!op)
39322 @@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_
39323 spin_unlock(&cookie->stores_lock);
39324 spin_unlock(&object->lock);
39325
39326 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
39327 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
39328 op->store_limit = object->store_limit;
39329
39330 if (fscache_submit_op(object, &op->op) < 0)
39331 @@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_
39332
39333 spin_unlock(&cookie->lock);
39334 radix_tree_preload_end();
39335 - fscache_stat(&fscache_n_store_ops);
39336 - fscache_stat(&fscache_n_stores_ok);
39337 + fscache_stat_unchecked(&fscache_n_store_ops);
39338 + fscache_stat_unchecked(&fscache_n_stores_ok);
39339
39340 /* the slow work queue now carries its own ref on the object */
39341 fscache_put_operation(&op->op);
39342 @@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_
39343 return 0;
39344
39345 already_queued:
39346 - fscache_stat(&fscache_n_stores_again);
39347 + fscache_stat_unchecked(&fscache_n_stores_again);
39348 already_pending:
39349 spin_unlock(&cookie->stores_lock);
39350 spin_unlock(&object->lock);
39351 spin_unlock(&cookie->lock);
39352 radix_tree_preload_end();
39353 kfree(op);
39354 - fscache_stat(&fscache_n_stores_ok);
39355 + fscache_stat_unchecked(&fscache_n_stores_ok);
39356 _leave(" = 0");
39357 return 0;
39358
39359 @@ -886,14 +886,14 @@ nobufs:
39360 spin_unlock(&cookie->lock);
39361 radix_tree_preload_end();
39362 kfree(op);
39363 - fscache_stat(&fscache_n_stores_nobufs);
39364 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
39365 _leave(" = -ENOBUFS");
39366 return -ENOBUFS;
39367
39368 nomem_free:
39369 kfree(op);
39370 nomem:
39371 - fscache_stat(&fscache_n_stores_oom);
39372 + fscache_stat_unchecked(&fscache_n_stores_oom);
39373 _leave(" = -ENOMEM");
39374 return -ENOMEM;
39375 }
39376 @@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscac
39377 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
39378 ASSERTCMP(page, !=, NULL);
39379
39380 - fscache_stat(&fscache_n_uncaches);
39381 + fscache_stat_unchecked(&fscache_n_uncaches);
39382
39383 /* cache withdrawal may beat us to it */
39384 if (!PageFsCache(page))
39385 @@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fs
39386 unsigned long loop;
39387
39388 #ifdef CONFIG_FSCACHE_STATS
39389 - atomic_add(pagevec->nr, &fscache_n_marks);
39390 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
39391 #endif
39392
39393 for (loop = 0; loop < pagevec->nr; loop++) {
39394 diff -urNp linux-2.6.32.41/fs/fscache/stats.c linux-2.6.32.41/fs/fscache/stats.c
39395 --- linux-2.6.32.41/fs/fscache/stats.c 2011-03-27 14:31:47.000000000 -0400
39396 +++ linux-2.6.32.41/fs/fscache/stats.c 2011-05-04 17:56:28.000000000 -0400
39397 @@ -18,95 +18,95 @@
39398 /*
39399 * operation counters
39400 */
39401 -atomic_t fscache_n_op_pend;
39402 -atomic_t fscache_n_op_run;
39403 -atomic_t fscache_n_op_enqueue;
39404 -atomic_t fscache_n_op_requeue;
39405 -atomic_t fscache_n_op_deferred_release;
39406 -atomic_t fscache_n_op_release;
39407 -atomic_t fscache_n_op_gc;
39408 -atomic_t fscache_n_op_cancelled;
39409 -atomic_t fscache_n_op_rejected;
39410 -
39411 -atomic_t fscache_n_attr_changed;
39412 -atomic_t fscache_n_attr_changed_ok;
39413 -atomic_t fscache_n_attr_changed_nobufs;
39414 -atomic_t fscache_n_attr_changed_nomem;
39415 -atomic_t fscache_n_attr_changed_calls;
39416 -
39417 -atomic_t fscache_n_allocs;
39418 -atomic_t fscache_n_allocs_ok;
39419 -atomic_t fscache_n_allocs_wait;
39420 -atomic_t fscache_n_allocs_nobufs;
39421 -atomic_t fscache_n_allocs_intr;
39422 -atomic_t fscache_n_allocs_object_dead;
39423 -atomic_t fscache_n_alloc_ops;
39424 -atomic_t fscache_n_alloc_op_waits;
39425 -
39426 -atomic_t fscache_n_retrievals;
39427 -atomic_t fscache_n_retrievals_ok;
39428 -atomic_t fscache_n_retrievals_wait;
39429 -atomic_t fscache_n_retrievals_nodata;
39430 -atomic_t fscache_n_retrievals_nobufs;
39431 -atomic_t fscache_n_retrievals_intr;
39432 -atomic_t fscache_n_retrievals_nomem;
39433 -atomic_t fscache_n_retrievals_object_dead;
39434 -atomic_t fscache_n_retrieval_ops;
39435 -atomic_t fscache_n_retrieval_op_waits;
39436 -
39437 -atomic_t fscache_n_stores;
39438 -atomic_t fscache_n_stores_ok;
39439 -atomic_t fscache_n_stores_again;
39440 -atomic_t fscache_n_stores_nobufs;
39441 -atomic_t fscache_n_stores_oom;
39442 -atomic_t fscache_n_store_ops;
39443 -atomic_t fscache_n_store_calls;
39444 -atomic_t fscache_n_store_pages;
39445 -atomic_t fscache_n_store_radix_deletes;
39446 -atomic_t fscache_n_store_pages_over_limit;
39447 -
39448 -atomic_t fscache_n_store_vmscan_not_storing;
39449 -atomic_t fscache_n_store_vmscan_gone;
39450 -atomic_t fscache_n_store_vmscan_busy;
39451 -atomic_t fscache_n_store_vmscan_cancelled;
39452 -
39453 -atomic_t fscache_n_marks;
39454 -atomic_t fscache_n_uncaches;
39455 -
39456 -atomic_t fscache_n_acquires;
39457 -atomic_t fscache_n_acquires_null;
39458 -atomic_t fscache_n_acquires_no_cache;
39459 -atomic_t fscache_n_acquires_ok;
39460 -atomic_t fscache_n_acquires_nobufs;
39461 -atomic_t fscache_n_acquires_oom;
39462 -
39463 -atomic_t fscache_n_updates;
39464 -atomic_t fscache_n_updates_null;
39465 -atomic_t fscache_n_updates_run;
39466 -
39467 -atomic_t fscache_n_relinquishes;
39468 -atomic_t fscache_n_relinquishes_null;
39469 -atomic_t fscache_n_relinquishes_waitcrt;
39470 -atomic_t fscache_n_relinquishes_retire;
39471 -
39472 -atomic_t fscache_n_cookie_index;
39473 -atomic_t fscache_n_cookie_data;
39474 -atomic_t fscache_n_cookie_special;
39475 -
39476 -atomic_t fscache_n_object_alloc;
39477 -atomic_t fscache_n_object_no_alloc;
39478 -atomic_t fscache_n_object_lookups;
39479 -atomic_t fscache_n_object_lookups_negative;
39480 -atomic_t fscache_n_object_lookups_positive;
39481 -atomic_t fscache_n_object_lookups_timed_out;
39482 -atomic_t fscache_n_object_created;
39483 -atomic_t fscache_n_object_avail;
39484 -atomic_t fscache_n_object_dead;
39485 -
39486 -atomic_t fscache_n_checkaux_none;
39487 -atomic_t fscache_n_checkaux_okay;
39488 -atomic_t fscache_n_checkaux_update;
39489 -atomic_t fscache_n_checkaux_obsolete;
39490 +atomic_unchecked_t fscache_n_op_pend;
39491 +atomic_unchecked_t fscache_n_op_run;
39492 +atomic_unchecked_t fscache_n_op_enqueue;
39493 +atomic_unchecked_t fscache_n_op_requeue;
39494 +atomic_unchecked_t fscache_n_op_deferred_release;
39495 +atomic_unchecked_t fscache_n_op_release;
39496 +atomic_unchecked_t fscache_n_op_gc;
39497 +atomic_unchecked_t fscache_n_op_cancelled;
39498 +atomic_unchecked_t fscache_n_op_rejected;
39499 +
39500 +atomic_unchecked_t fscache_n_attr_changed;
39501 +atomic_unchecked_t fscache_n_attr_changed_ok;
39502 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
39503 +atomic_unchecked_t fscache_n_attr_changed_nomem;
39504 +atomic_unchecked_t fscache_n_attr_changed_calls;
39505 +
39506 +atomic_unchecked_t fscache_n_allocs;
39507 +atomic_unchecked_t fscache_n_allocs_ok;
39508 +atomic_unchecked_t fscache_n_allocs_wait;
39509 +atomic_unchecked_t fscache_n_allocs_nobufs;
39510 +atomic_unchecked_t fscache_n_allocs_intr;
39511 +atomic_unchecked_t fscache_n_allocs_object_dead;
39512 +atomic_unchecked_t fscache_n_alloc_ops;
39513 +atomic_unchecked_t fscache_n_alloc_op_waits;
39514 +
39515 +atomic_unchecked_t fscache_n_retrievals;
39516 +atomic_unchecked_t fscache_n_retrievals_ok;
39517 +atomic_unchecked_t fscache_n_retrievals_wait;
39518 +atomic_unchecked_t fscache_n_retrievals_nodata;
39519 +atomic_unchecked_t fscache_n_retrievals_nobufs;
39520 +atomic_unchecked_t fscache_n_retrievals_intr;
39521 +atomic_unchecked_t fscache_n_retrievals_nomem;
39522 +atomic_unchecked_t fscache_n_retrievals_object_dead;
39523 +atomic_unchecked_t fscache_n_retrieval_ops;
39524 +atomic_unchecked_t fscache_n_retrieval_op_waits;
39525 +
39526 +atomic_unchecked_t fscache_n_stores;
39527 +atomic_unchecked_t fscache_n_stores_ok;
39528 +atomic_unchecked_t fscache_n_stores_again;
39529 +atomic_unchecked_t fscache_n_stores_nobufs;
39530 +atomic_unchecked_t fscache_n_stores_oom;
39531 +atomic_unchecked_t fscache_n_store_ops;
39532 +atomic_unchecked_t fscache_n_store_calls;
39533 +atomic_unchecked_t fscache_n_store_pages;
39534 +atomic_unchecked_t fscache_n_store_radix_deletes;
39535 +atomic_unchecked_t fscache_n_store_pages_over_limit;
39536 +
39537 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
39538 +atomic_unchecked_t fscache_n_store_vmscan_gone;
39539 +atomic_unchecked_t fscache_n_store_vmscan_busy;
39540 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
39541 +
39542 +atomic_unchecked_t fscache_n_marks;
39543 +atomic_unchecked_t fscache_n_uncaches;
39544 +
39545 +atomic_unchecked_t fscache_n_acquires;
39546 +atomic_unchecked_t fscache_n_acquires_null;
39547 +atomic_unchecked_t fscache_n_acquires_no_cache;
39548 +atomic_unchecked_t fscache_n_acquires_ok;
39549 +atomic_unchecked_t fscache_n_acquires_nobufs;
39550 +atomic_unchecked_t fscache_n_acquires_oom;
39551 +
39552 +atomic_unchecked_t fscache_n_updates;
39553 +atomic_unchecked_t fscache_n_updates_null;
39554 +atomic_unchecked_t fscache_n_updates_run;
39555 +
39556 +atomic_unchecked_t fscache_n_relinquishes;
39557 +atomic_unchecked_t fscache_n_relinquishes_null;
39558 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
39559 +atomic_unchecked_t fscache_n_relinquishes_retire;
39560 +
39561 +atomic_unchecked_t fscache_n_cookie_index;
39562 +atomic_unchecked_t fscache_n_cookie_data;
39563 +atomic_unchecked_t fscache_n_cookie_special;
39564 +
39565 +atomic_unchecked_t fscache_n_object_alloc;
39566 +atomic_unchecked_t fscache_n_object_no_alloc;
39567 +atomic_unchecked_t fscache_n_object_lookups;
39568 +atomic_unchecked_t fscache_n_object_lookups_negative;
39569 +atomic_unchecked_t fscache_n_object_lookups_positive;
39570 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
39571 +atomic_unchecked_t fscache_n_object_created;
39572 +atomic_unchecked_t fscache_n_object_avail;
39573 +atomic_unchecked_t fscache_n_object_dead;
39574 +
39575 +atomic_unchecked_t fscache_n_checkaux_none;
39576 +atomic_unchecked_t fscache_n_checkaux_okay;
39577 +atomic_unchecked_t fscache_n_checkaux_update;
39578 +atomic_unchecked_t fscache_n_checkaux_obsolete;
39579
39580 atomic_t fscache_n_cop_alloc_object;
39581 atomic_t fscache_n_cop_lookup_object;
39582 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
39583 seq_puts(m, "FS-Cache statistics\n");
39584
39585 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
39586 - atomic_read(&fscache_n_cookie_index),
39587 - atomic_read(&fscache_n_cookie_data),
39588 - atomic_read(&fscache_n_cookie_special));
39589 + atomic_read_unchecked(&fscache_n_cookie_index),
39590 + atomic_read_unchecked(&fscache_n_cookie_data),
39591 + atomic_read_unchecked(&fscache_n_cookie_special));
39592
39593 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
39594 - atomic_read(&fscache_n_object_alloc),
39595 - atomic_read(&fscache_n_object_no_alloc),
39596 - atomic_read(&fscache_n_object_avail),
39597 - atomic_read(&fscache_n_object_dead));
39598 + atomic_read_unchecked(&fscache_n_object_alloc),
39599 + atomic_read_unchecked(&fscache_n_object_no_alloc),
39600 + atomic_read_unchecked(&fscache_n_object_avail),
39601 + atomic_read_unchecked(&fscache_n_object_dead));
39602 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
39603 - atomic_read(&fscache_n_checkaux_none),
39604 - atomic_read(&fscache_n_checkaux_okay),
39605 - atomic_read(&fscache_n_checkaux_update),
39606 - atomic_read(&fscache_n_checkaux_obsolete));
39607 + atomic_read_unchecked(&fscache_n_checkaux_none),
39608 + atomic_read_unchecked(&fscache_n_checkaux_okay),
39609 + atomic_read_unchecked(&fscache_n_checkaux_update),
39610 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
39611
39612 seq_printf(m, "Pages : mrk=%u unc=%u\n",
39613 - atomic_read(&fscache_n_marks),
39614 - atomic_read(&fscache_n_uncaches));
39615 + atomic_read_unchecked(&fscache_n_marks),
39616 + atomic_read_unchecked(&fscache_n_uncaches));
39617
39618 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
39619 " oom=%u\n",
39620 - atomic_read(&fscache_n_acquires),
39621 - atomic_read(&fscache_n_acquires_null),
39622 - atomic_read(&fscache_n_acquires_no_cache),
39623 - atomic_read(&fscache_n_acquires_ok),
39624 - atomic_read(&fscache_n_acquires_nobufs),
39625 - atomic_read(&fscache_n_acquires_oom));
39626 + atomic_read_unchecked(&fscache_n_acquires),
39627 + atomic_read_unchecked(&fscache_n_acquires_null),
39628 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
39629 + atomic_read_unchecked(&fscache_n_acquires_ok),
39630 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
39631 + atomic_read_unchecked(&fscache_n_acquires_oom));
39632
39633 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
39634 - atomic_read(&fscache_n_object_lookups),
39635 - atomic_read(&fscache_n_object_lookups_negative),
39636 - atomic_read(&fscache_n_object_lookups_positive),
39637 - atomic_read(&fscache_n_object_lookups_timed_out),
39638 - atomic_read(&fscache_n_object_created));
39639 + atomic_read_unchecked(&fscache_n_object_lookups),
39640 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
39641 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
39642 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
39643 + atomic_read_unchecked(&fscache_n_object_created));
39644
39645 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
39646 - atomic_read(&fscache_n_updates),
39647 - atomic_read(&fscache_n_updates_null),
39648 - atomic_read(&fscache_n_updates_run));
39649 + atomic_read_unchecked(&fscache_n_updates),
39650 + atomic_read_unchecked(&fscache_n_updates_null),
39651 + atomic_read_unchecked(&fscache_n_updates_run));
39652
39653 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
39654 - atomic_read(&fscache_n_relinquishes),
39655 - atomic_read(&fscache_n_relinquishes_null),
39656 - atomic_read(&fscache_n_relinquishes_waitcrt),
39657 - atomic_read(&fscache_n_relinquishes_retire));
39658 + atomic_read_unchecked(&fscache_n_relinquishes),
39659 + atomic_read_unchecked(&fscache_n_relinquishes_null),
39660 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
39661 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
39662
39663 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
39664 - atomic_read(&fscache_n_attr_changed),
39665 - atomic_read(&fscache_n_attr_changed_ok),
39666 - atomic_read(&fscache_n_attr_changed_nobufs),
39667 - atomic_read(&fscache_n_attr_changed_nomem),
39668 - atomic_read(&fscache_n_attr_changed_calls));
39669 + atomic_read_unchecked(&fscache_n_attr_changed),
39670 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
39671 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
39672 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
39673 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
39674
39675 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
39676 - atomic_read(&fscache_n_allocs),
39677 - atomic_read(&fscache_n_allocs_ok),
39678 - atomic_read(&fscache_n_allocs_wait),
39679 - atomic_read(&fscache_n_allocs_nobufs),
39680 - atomic_read(&fscache_n_allocs_intr));
39681 + atomic_read_unchecked(&fscache_n_allocs),
39682 + atomic_read_unchecked(&fscache_n_allocs_ok),
39683 + atomic_read_unchecked(&fscache_n_allocs_wait),
39684 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
39685 + atomic_read_unchecked(&fscache_n_allocs_intr));
39686 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
39687 - atomic_read(&fscache_n_alloc_ops),
39688 - atomic_read(&fscache_n_alloc_op_waits),
39689 - atomic_read(&fscache_n_allocs_object_dead));
39690 + atomic_read_unchecked(&fscache_n_alloc_ops),
39691 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
39692 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
39693
39694 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
39695 " int=%u oom=%u\n",
39696 - atomic_read(&fscache_n_retrievals),
39697 - atomic_read(&fscache_n_retrievals_ok),
39698 - atomic_read(&fscache_n_retrievals_wait),
39699 - atomic_read(&fscache_n_retrievals_nodata),
39700 - atomic_read(&fscache_n_retrievals_nobufs),
39701 - atomic_read(&fscache_n_retrievals_intr),
39702 - atomic_read(&fscache_n_retrievals_nomem));
39703 + atomic_read_unchecked(&fscache_n_retrievals),
39704 + atomic_read_unchecked(&fscache_n_retrievals_ok),
39705 + atomic_read_unchecked(&fscache_n_retrievals_wait),
39706 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
39707 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
39708 + atomic_read_unchecked(&fscache_n_retrievals_intr),
39709 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
39710 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
39711 - atomic_read(&fscache_n_retrieval_ops),
39712 - atomic_read(&fscache_n_retrieval_op_waits),
39713 - atomic_read(&fscache_n_retrievals_object_dead));
39714 + atomic_read_unchecked(&fscache_n_retrieval_ops),
39715 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
39716 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
39717
39718 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
39719 - atomic_read(&fscache_n_stores),
39720 - atomic_read(&fscache_n_stores_ok),
39721 - atomic_read(&fscache_n_stores_again),
39722 - atomic_read(&fscache_n_stores_nobufs),
39723 - atomic_read(&fscache_n_stores_oom));
39724 + atomic_read_unchecked(&fscache_n_stores),
39725 + atomic_read_unchecked(&fscache_n_stores_ok),
39726 + atomic_read_unchecked(&fscache_n_stores_again),
39727 + atomic_read_unchecked(&fscache_n_stores_nobufs),
39728 + atomic_read_unchecked(&fscache_n_stores_oom));
39729 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
39730 - atomic_read(&fscache_n_store_ops),
39731 - atomic_read(&fscache_n_store_calls),
39732 - atomic_read(&fscache_n_store_pages),
39733 - atomic_read(&fscache_n_store_radix_deletes),
39734 - atomic_read(&fscache_n_store_pages_over_limit));
39735 + atomic_read_unchecked(&fscache_n_store_ops),
39736 + atomic_read_unchecked(&fscache_n_store_calls),
39737 + atomic_read_unchecked(&fscache_n_store_pages),
39738 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
39739 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
39740
39741 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
39742 - atomic_read(&fscache_n_store_vmscan_not_storing),
39743 - atomic_read(&fscache_n_store_vmscan_gone),
39744 - atomic_read(&fscache_n_store_vmscan_busy),
39745 - atomic_read(&fscache_n_store_vmscan_cancelled));
39746 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
39747 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
39748 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
39749 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
39750
39751 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
39752 - atomic_read(&fscache_n_op_pend),
39753 - atomic_read(&fscache_n_op_run),
39754 - atomic_read(&fscache_n_op_enqueue),
39755 - atomic_read(&fscache_n_op_cancelled),
39756 - atomic_read(&fscache_n_op_rejected));
39757 + atomic_read_unchecked(&fscache_n_op_pend),
39758 + atomic_read_unchecked(&fscache_n_op_run),
39759 + atomic_read_unchecked(&fscache_n_op_enqueue),
39760 + atomic_read_unchecked(&fscache_n_op_cancelled),
39761 + atomic_read_unchecked(&fscache_n_op_rejected));
39762 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
39763 - atomic_read(&fscache_n_op_deferred_release),
39764 - atomic_read(&fscache_n_op_release),
39765 - atomic_read(&fscache_n_op_gc));
39766 + atomic_read_unchecked(&fscache_n_op_deferred_release),
39767 + atomic_read_unchecked(&fscache_n_op_release),
39768 + atomic_read_unchecked(&fscache_n_op_gc));
39769
39770 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
39771 atomic_read(&fscache_n_cop_alloc_object),
39772 diff -urNp linux-2.6.32.41/fs/fs_struct.c linux-2.6.32.41/fs/fs_struct.c
39773 --- linux-2.6.32.41/fs/fs_struct.c 2011-03-27 14:31:47.000000000 -0400
39774 +++ linux-2.6.32.41/fs/fs_struct.c 2011-04-17 15:56:46.000000000 -0400
39775 @@ -4,6 +4,7 @@
39776 #include <linux/path.h>
39777 #include <linux/slab.h>
39778 #include <linux/fs_struct.h>
39779 +#include <linux/grsecurity.h>
39780
39781 /*
39782 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
39783 @@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, s
39784 old_root = fs->root;
39785 fs->root = *path;
39786 path_get(path);
39787 + gr_set_chroot_entries(current, path);
39788 write_unlock(&fs->lock);
39789 if (old_root.dentry)
39790 path_put(&old_root);
39791 @@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_roo
39792 && fs->root.mnt == old_root->mnt) {
39793 path_get(new_root);
39794 fs->root = *new_root;
39795 + gr_set_chroot_entries(p, new_root);
39796 count++;
39797 }
39798 if (fs->pwd.dentry == old_root->dentry
39799 @@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
39800 task_lock(tsk);
39801 write_lock(&fs->lock);
39802 tsk->fs = NULL;
39803 - kill = !--fs->users;
39804 + gr_clear_chroot_entries(tsk);
39805 + kill = !atomic_dec_return(&fs->users);
39806 write_unlock(&fs->lock);
39807 task_unlock(tsk);
39808 if (kill)
39809 @@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct
39810 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
39811 /* We don't need to lock fs - think why ;-) */
39812 if (fs) {
39813 - fs->users = 1;
39814 + atomic_set(&fs->users, 1);
39815 fs->in_exec = 0;
39816 rwlock_init(&fs->lock);
39817 fs->umask = old->umask;
39818 @@ -127,8 +131,9 @@ int unshare_fs_struct(void)
39819
39820 task_lock(current);
39821 write_lock(&fs->lock);
39822 - kill = !--fs->users;
39823 + kill = !atomic_dec_return(&fs->users);
39824 current->fs = new_fs;
39825 + gr_set_chroot_entries(current, &new_fs->root);
39826 write_unlock(&fs->lock);
39827 task_unlock(current);
39828
39829 @@ -147,7 +152,7 @@ EXPORT_SYMBOL(current_umask);
39830
39831 /* to be mentioned only in INIT_TASK */
39832 struct fs_struct init_fs = {
39833 - .users = 1,
39834 + .users = ATOMIC_INIT(1),
39835 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
39836 .umask = 0022,
39837 };
39838 @@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
39839 task_lock(current);
39840
39841 write_lock(&init_fs.lock);
39842 - init_fs.users++;
39843 + atomic_inc(&init_fs.users);
39844 write_unlock(&init_fs.lock);
39845
39846 write_lock(&fs->lock);
39847 current->fs = &init_fs;
39848 - kill = !--fs->users;
39849 + gr_set_chroot_entries(current, &current->fs->root);
39850 + kill = !atomic_dec_return(&fs->users);
39851 write_unlock(&fs->lock);
39852
39853 task_unlock(current);
39854 diff -urNp linux-2.6.32.41/fs/fuse/cuse.c linux-2.6.32.41/fs/fuse/cuse.c
39855 --- linux-2.6.32.41/fs/fuse/cuse.c 2011-03-27 14:31:47.000000000 -0400
39856 +++ linux-2.6.32.41/fs/fuse/cuse.c 2011-04-17 15:56:46.000000000 -0400
39857 @@ -528,8 +528,18 @@ static int cuse_channel_release(struct i
39858 return rc;
39859 }
39860
39861 -static struct file_operations cuse_channel_fops; /* initialized during init */
39862 -
39863 +static const struct file_operations cuse_channel_fops = { /* initialized during init */
39864 + .owner = THIS_MODULE,
39865 + .llseek = no_llseek,
39866 + .read = do_sync_read,
39867 + .aio_read = fuse_dev_read,
39868 + .write = do_sync_write,
39869 + .aio_write = fuse_dev_write,
39870 + .poll = fuse_dev_poll,
39871 + .open = cuse_channel_open,
39872 + .release = cuse_channel_release,
39873 + .fasync = fuse_dev_fasync,
39874 +};
39875
39876 /**************************************************************************
39877 * Misc stuff and module initializatiion
39878 @@ -575,12 +585,6 @@ static int __init cuse_init(void)
39879 for (i = 0; i < CUSE_CONNTBL_LEN; i++)
39880 INIT_LIST_HEAD(&cuse_conntbl[i]);
39881
39882 - /* inherit and extend fuse_dev_operations */
39883 - cuse_channel_fops = fuse_dev_operations;
39884 - cuse_channel_fops.owner = THIS_MODULE;
39885 - cuse_channel_fops.open = cuse_channel_open;
39886 - cuse_channel_fops.release = cuse_channel_release;
39887 -
39888 cuse_class = class_create(THIS_MODULE, "cuse");
39889 if (IS_ERR(cuse_class))
39890 return PTR_ERR(cuse_class);
39891 diff -urNp linux-2.6.32.41/fs/fuse/dev.c linux-2.6.32.41/fs/fuse/dev.c
39892 --- linux-2.6.32.41/fs/fuse/dev.c 2011-03-27 14:31:47.000000000 -0400
39893 +++ linux-2.6.32.41/fs/fuse/dev.c 2011-04-17 15:56:46.000000000 -0400
39894 @@ -745,7 +745,7 @@ __releases(&fc->lock)
39895 * request_end(). Otherwise add it to the processing list, and set
39896 * the 'sent' flag.
39897 */
39898 -static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
39899 +ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
39900 unsigned long nr_segs, loff_t pos)
39901 {
39902 int err;
39903 @@ -827,6 +827,7 @@ static ssize_t fuse_dev_read(struct kioc
39904 spin_unlock(&fc->lock);
39905 return err;
39906 }
39907 +EXPORT_SYMBOL_GPL(fuse_dev_read);
39908
39909 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
39910 struct fuse_copy_state *cs)
39911 @@ -885,7 +886,7 @@ static int fuse_notify_inval_entry(struc
39912 {
39913 struct fuse_notify_inval_entry_out outarg;
39914 int err = -EINVAL;
39915 - char buf[FUSE_NAME_MAX+1];
39916 + char *buf = NULL;
39917 struct qstr name;
39918
39919 if (size < sizeof(outarg))
39920 @@ -899,6 +900,11 @@ static int fuse_notify_inval_entry(struc
39921 if (outarg.namelen > FUSE_NAME_MAX)
39922 goto err;
39923
39924 + err = -ENOMEM;
39925 + buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
39926 + if (!buf)
39927 + goto err;
39928 +
39929 name.name = buf;
39930 name.len = outarg.namelen;
39931 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
39932 @@ -910,17 +916,15 @@ static int fuse_notify_inval_entry(struc
39933
39934 down_read(&fc->killsb);
39935 err = -ENOENT;
39936 - if (!fc->sb)
39937 - goto err_unlock;
39938 -
39939 - err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
39940 -
39941 -err_unlock:
39942 + if (fc->sb)
39943 + err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
39944 up_read(&fc->killsb);
39945 + kfree(buf);
39946 return err;
39947
39948 err:
39949 fuse_copy_finish(cs);
39950 + kfree(buf);
39951 return err;
39952 }
39953
39954 @@ -987,7 +991,7 @@ static int copy_out_args(struct fuse_cop
39955 * it from the list and copy the rest of the buffer to the request.
39956 * The request is finished by calling request_end()
39957 */
39958 -static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
39959 +ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
39960 unsigned long nr_segs, loff_t pos)
39961 {
39962 int err;
39963 @@ -1083,8 +1087,9 @@ static ssize_t fuse_dev_write(struct kio
39964 fuse_copy_finish(&cs);
39965 return err;
39966 }
39967 +EXPORT_SYMBOL_GPL(fuse_dev_write);
39968
39969 -static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
39970 +unsigned fuse_dev_poll(struct file *file, poll_table *wait)
39971 {
39972 unsigned mask = POLLOUT | POLLWRNORM;
39973 struct fuse_conn *fc = fuse_get_conn(file);
39974 @@ -1102,6 +1107,7 @@ static unsigned fuse_dev_poll(struct fil
39975
39976 return mask;
39977 }
39978 +EXPORT_SYMBOL_GPL(fuse_dev_poll);
39979
39980 /*
39981 * Abort all requests on the given list (pending or processing)
39982 @@ -1218,7 +1224,7 @@ int fuse_dev_release(struct inode *inode
39983 }
39984 EXPORT_SYMBOL_GPL(fuse_dev_release);
39985
39986 -static int fuse_dev_fasync(int fd, struct file *file, int on)
39987 +int fuse_dev_fasync(int fd, struct file *file, int on)
39988 {
39989 struct fuse_conn *fc = fuse_get_conn(file);
39990 if (!fc)
39991 @@ -1227,6 +1233,7 @@ static int fuse_dev_fasync(int fd, struc
39992 /* No locking - fasync_helper does its own locking */
39993 return fasync_helper(fd, file, on, &fc->fasync);
39994 }
39995 +EXPORT_SYMBOL_GPL(fuse_dev_fasync);
39996
39997 const struct file_operations fuse_dev_operations = {
39998 .owner = THIS_MODULE,
39999 diff -urNp linux-2.6.32.41/fs/fuse/dir.c linux-2.6.32.41/fs/fuse/dir.c
40000 --- linux-2.6.32.41/fs/fuse/dir.c 2011-03-27 14:31:47.000000000 -0400
40001 +++ linux-2.6.32.41/fs/fuse/dir.c 2011-04-17 15:56:46.000000000 -0400
40002 @@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *de
40003 return link;
40004 }
40005
40006 -static void free_link(char *link)
40007 +static void free_link(const char *link)
40008 {
40009 if (!IS_ERR(link))
40010 free_page((unsigned long) link);
40011 diff -urNp linux-2.6.32.41/fs/fuse/fuse_i.h linux-2.6.32.41/fs/fuse/fuse_i.h
40012 --- linux-2.6.32.41/fs/fuse/fuse_i.h 2011-03-27 14:31:47.000000000 -0400
40013 +++ linux-2.6.32.41/fs/fuse/fuse_i.h 2011-04-17 15:56:46.000000000 -0400
40014 @@ -525,6 +525,16 @@ extern const struct file_operations fuse
40015
40016 extern const struct dentry_operations fuse_dentry_operations;
40017
40018 +extern ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
40019 + unsigned long nr_segs, loff_t pos);
40020 +
40021 +extern ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
40022 + unsigned long nr_segs, loff_t pos);
40023 +
40024 +extern unsigned fuse_dev_poll(struct file *file, poll_table *wait);
40025 +
40026 +extern int fuse_dev_fasync(int fd, struct file *file, int on);
40027 +
40028 /**
40029 * Inode to nodeid comparison.
40030 */
40031 diff -urNp linux-2.6.32.41/fs/gfs2/ops_inode.c linux-2.6.32.41/fs/gfs2/ops_inode.c
40032 --- linux-2.6.32.41/fs/gfs2/ops_inode.c 2011-03-27 14:31:47.000000000 -0400
40033 +++ linux-2.6.32.41/fs/gfs2/ops_inode.c 2011-05-16 21:46:57.000000000 -0400
40034 @@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odi
40035 unsigned int x;
40036 int error;
40037
40038 + pax_track_stack();
40039 +
40040 if (ndentry->d_inode) {
40041 nip = GFS2_I(ndentry->d_inode);
40042 if (ip == nip)
40043 diff -urNp linux-2.6.32.41/fs/gfs2/sys.c linux-2.6.32.41/fs/gfs2/sys.c
40044 --- linux-2.6.32.41/fs/gfs2/sys.c 2011-03-27 14:31:47.000000000 -0400
40045 +++ linux-2.6.32.41/fs/gfs2/sys.c 2011-04-17 15:56:46.000000000 -0400
40046 @@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct ko
40047 return a->store ? a->store(sdp, buf, len) : len;
40048 }
40049
40050 -static struct sysfs_ops gfs2_attr_ops = {
40051 +static const struct sysfs_ops gfs2_attr_ops = {
40052 .show = gfs2_attr_show,
40053 .store = gfs2_attr_store,
40054 };
40055 @@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset
40056 return 0;
40057 }
40058
40059 -static struct kset_uevent_ops gfs2_uevent_ops = {
40060 +static const struct kset_uevent_ops gfs2_uevent_ops = {
40061 .uevent = gfs2_uevent,
40062 };
40063
40064 diff -urNp linux-2.6.32.41/fs/hfsplus/catalog.c linux-2.6.32.41/fs/hfsplus/catalog.c
40065 --- linux-2.6.32.41/fs/hfsplus/catalog.c 2011-03-27 14:31:47.000000000 -0400
40066 +++ linux-2.6.32.41/fs/hfsplus/catalog.c 2011-05-16 21:46:57.000000000 -0400
40067 @@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block
40068 int err;
40069 u16 type;
40070
40071 + pax_track_stack();
40072 +
40073 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
40074 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
40075 if (err)
40076 @@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct
40077 int entry_size;
40078 int err;
40079
40080 + pax_track_stack();
40081 +
40082 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
40083 sb = dir->i_sb;
40084 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
40085 @@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
40086 int entry_size, type;
40087 int err = 0;
40088
40089 + pax_track_stack();
40090 +
40091 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
40092 dst_dir->i_ino, dst_name->name);
40093 sb = src_dir->i_sb;
40094 diff -urNp linux-2.6.32.41/fs/hfsplus/dir.c linux-2.6.32.41/fs/hfsplus/dir.c
40095 --- linux-2.6.32.41/fs/hfsplus/dir.c 2011-03-27 14:31:47.000000000 -0400
40096 +++ linux-2.6.32.41/fs/hfsplus/dir.c 2011-05-16 21:46:57.000000000 -0400
40097 @@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *
40098 struct hfsplus_readdir_data *rd;
40099 u16 type;
40100
40101 + pax_track_stack();
40102 +
40103 if (filp->f_pos >= inode->i_size)
40104 return 0;
40105
40106 diff -urNp linux-2.6.32.41/fs/hfsplus/inode.c linux-2.6.32.41/fs/hfsplus/inode.c
40107 --- linux-2.6.32.41/fs/hfsplus/inode.c 2011-03-27 14:31:47.000000000 -0400
40108 +++ linux-2.6.32.41/fs/hfsplus/inode.c 2011-05-16 21:46:57.000000000 -0400
40109 @@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode
40110 int res = 0;
40111 u16 type;
40112
40113 + pax_track_stack();
40114 +
40115 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
40116
40117 HFSPLUS_I(inode).dev = 0;
40118 @@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode
40119 struct hfs_find_data fd;
40120 hfsplus_cat_entry entry;
40121
40122 + pax_track_stack();
40123 +
40124 if (HFSPLUS_IS_RSRC(inode))
40125 main_inode = HFSPLUS_I(inode).rsrc_inode;
40126
40127 diff -urNp linux-2.6.32.41/fs/hfsplus/ioctl.c linux-2.6.32.41/fs/hfsplus/ioctl.c
40128 --- linux-2.6.32.41/fs/hfsplus/ioctl.c 2011-03-27 14:31:47.000000000 -0400
40129 +++ linux-2.6.32.41/fs/hfsplus/ioctl.c 2011-05-16 21:46:57.000000000 -0400
40130 @@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dent
40131 struct hfsplus_cat_file *file;
40132 int res;
40133
40134 + pax_track_stack();
40135 +
40136 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
40137 return -EOPNOTSUPP;
40138
40139 @@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *
40140 struct hfsplus_cat_file *file;
40141 ssize_t res = 0;
40142
40143 + pax_track_stack();
40144 +
40145 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
40146 return -EOPNOTSUPP;
40147
40148 diff -urNp linux-2.6.32.41/fs/hfsplus/super.c linux-2.6.32.41/fs/hfsplus/super.c
40149 --- linux-2.6.32.41/fs/hfsplus/super.c 2011-03-27 14:31:47.000000000 -0400
40150 +++ linux-2.6.32.41/fs/hfsplus/super.c 2011-05-16 21:46:57.000000000 -0400
40151 @@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct sup
40152 struct nls_table *nls = NULL;
40153 int err = -EINVAL;
40154
40155 + pax_track_stack();
40156 +
40157 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
40158 if (!sbi)
40159 return -ENOMEM;
40160 diff -urNp linux-2.6.32.41/fs/hugetlbfs/inode.c linux-2.6.32.41/fs/hugetlbfs/inode.c
40161 --- linux-2.6.32.41/fs/hugetlbfs/inode.c 2011-03-27 14:31:47.000000000 -0400
40162 +++ linux-2.6.32.41/fs/hugetlbfs/inode.c 2011-04-17 15:56:46.000000000 -0400
40163 @@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs
40164 .kill_sb = kill_litter_super,
40165 };
40166
40167 -static struct vfsmount *hugetlbfs_vfsmount;
40168 +struct vfsmount *hugetlbfs_vfsmount;
40169
40170 static int can_do_hugetlb_shm(void)
40171 {
40172 diff -urNp linux-2.6.32.41/fs/ioctl.c linux-2.6.32.41/fs/ioctl.c
40173 --- linux-2.6.32.41/fs/ioctl.c 2011-03-27 14:31:47.000000000 -0400
40174 +++ linux-2.6.32.41/fs/ioctl.c 2011-04-17 15:56:46.000000000 -0400
40175 @@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiema
40176 u64 phys, u64 len, u32 flags)
40177 {
40178 struct fiemap_extent extent;
40179 - struct fiemap_extent *dest = fieinfo->fi_extents_start;
40180 + struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
40181
40182 /* only count the extents */
40183 if (fieinfo->fi_extents_max == 0) {
40184 @@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *fil
40185
40186 fieinfo.fi_flags = fiemap.fm_flags;
40187 fieinfo.fi_extents_max = fiemap.fm_extent_count;
40188 - fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
40189 + fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
40190
40191 if (fiemap.fm_extent_count != 0 &&
40192 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
40193 @@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *fil
40194 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
40195 fiemap.fm_flags = fieinfo.fi_flags;
40196 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
40197 - if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
40198 + if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
40199 error = -EFAULT;
40200
40201 return error;
40202 diff -urNp linux-2.6.32.41/fs/jbd/checkpoint.c linux-2.6.32.41/fs/jbd/checkpoint.c
40203 --- linux-2.6.32.41/fs/jbd/checkpoint.c 2011-03-27 14:31:47.000000000 -0400
40204 +++ linux-2.6.32.41/fs/jbd/checkpoint.c 2011-05-16 21:46:57.000000000 -0400
40205 @@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal
40206 tid_t this_tid;
40207 int result;
40208
40209 + pax_track_stack();
40210 +
40211 jbd_debug(1, "Start checkpoint\n");
40212
40213 /*
40214 diff -urNp linux-2.6.32.41/fs/jffs2/compr_rtime.c linux-2.6.32.41/fs/jffs2/compr_rtime.c
40215 --- linux-2.6.32.41/fs/jffs2/compr_rtime.c 2011-03-27 14:31:47.000000000 -0400
40216 +++ linux-2.6.32.41/fs/jffs2/compr_rtime.c 2011-05-16 21:46:57.000000000 -0400
40217 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
40218 int outpos = 0;
40219 int pos=0;
40220
40221 + pax_track_stack();
40222 +
40223 memset(positions,0,sizeof(positions));
40224
40225 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
40226 @@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsign
40227 int outpos = 0;
40228 int pos=0;
40229
40230 + pax_track_stack();
40231 +
40232 memset(positions,0,sizeof(positions));
40233
40234 while (outpos<destlen) {
40235 diff -urNp linux-2.6.32.41/fs/jffs2/compr_rubin.c linux-2.6.32.41/fs/jffs2/compr_rubin.c
40236 --- linux-2.6.32.41/fs/jffs2/compr_rubin.c 2011-03-27 14:31:47.000000000 -0400
40237 +++ linux-2.6.32.41/fs/jffs2/compr_rubin.c 2011-05-16 21:46:57.000000000 -0400
40238 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
40239 int ret;
40240 uint32_t mysrclen, mydstlen;
40241
40242 + pax_track_stack();
40243 +
40244 mysrclen = *sourcelen;
40245 mydstlen = *dstlen - 8;
40246
40247 diff -urNp linux-2.6.32.41/fs/jffs2/erase.c linux-2.6.32.41/fs/jffs2/erase.c
40248 --- linux-2.6.32.41/fs/jffs2/erase.c 2011-03-27 14:31:47.000000000 -0400
40249 +++ linux-2.6.32.41/fs/jffs2/erase.c 2011-04-17 15:56:46.000000000 -0400
40250 @@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(stru
40251 struct jffs2_unknown_node marker = {
40252 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
40253 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
40254 - .totlen = cpu_to_je32(c->cleanmarker_size)
40255 + .totlen = cpu_to_je32(c->cleanmarker_size),
40256 + .hdr_crc = cpu_to_je32(0)
40257 };
40258
40259 jffs2_prealloc_raw_node_refs(c, jeb, 1);
40260 diff -urNp linux-2.6.32.41/fs/jffs2/wbuf.c linux-2.6.32.41/fs/jffs2/wbuf.c
40261 --- linux-2.6.32.41/fs/jffs2/wbuf.c 2011-03-27 14:31:47.000000000 -0400
40262 +++ linux-2.6.32.41/fs/jffs2/wbuf.c 2011-04-17 15:56:46.000000000 -0400
40263 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
40264 {
40265 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
40266 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
40267 - .totlen = constant_cpu_to_je32(8)
40268 + .totlen = constant_cpu_to_je32(8),
40269 + .hdr_crc = constant_cpu_to_je32(0)
40270 };
40271
40272 /*
40273 diff -urNp linux-2.6.32.41/fs/jffs2/xattr.c linux-2.6.32.41/fs/jffs2/xattr.c
40274 --- linux-2.6.32.41/fs/jffs2/xattr.c 2011-03-27 14:31:47.000000000 -0400
40275 +++ linux-2.6.32.41/fs/jffs2/xattr.c 2011-05-16 21:46:57.000000000 -0400
40276 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
40277
40278 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
40279
40280 + pax_track_stack();
40281 +
40282 /* Phase.1 : Merge same xref */
40283 for (i=0; i < XREF_TMPHASH_SIZE; i++)
40284 xref_tmphash[i] = NULL;
40285 diff -urNp linux-2.6.32.41/fs/Kconfig.binfmt linux-2.6.32.41/fs/Kconfig.binfmt
40286 --- linux-2.6.32.41/fs/Kconfig.binfmt 2011-03-27 14:31:47.000000000 -0400
40287 +++ linux-2.6.32.41/fs/Kconfig.binfmt 2011-04-17 15:56:46.000000000 -0400
40288 @@ -86,7 +86,7 @@ config HAVE_AOUT
40289
40290 config BINFMT_AOUT
40291 tristate "Kernel support for a.out and ECOFF binaries"
40292 - depends on HAVE_AOUT
40293 + depends on HAVE_AOUT && BROKEN
40294 ---help---
40295 A.out (Assembler.OUTput) is a set of formats for libraries and
40296 executables used in the earliest versions of UNIX. Linux used
40297 diff -urNp linux-2.6.32.41/fs/libfs.c linux-2.6.32.41/fs/libfs.c
40298 --- linux-2.6.32.41/fs/libfs.c 2011-03-27 14:31:47.000000000 -0400
40299 +++ linux-2.6.32.41/fs/libfs.c 2011-05-11 18:25:15.000000000 -0400
40300 @@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, v
40301
40302 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
40303 struct dentry *next;
40304 + char d_name[sizeof(next->d_iname)];
40305 + const unsigned char *name;
40306 +
40307 next = list_entry(p, struct dentry, d_u.d_child);
40308 if (d_unhashed(next) || !next->d_inode)
40309 continue;
40310
40311 spin_unlock(&dcache_lock);
40312 - if (filldir(dirent, next->d_name.name,
40313 + name = next->d_name.name;
40314 + if (name == next->d_iname) {
40315 + memcpy(d_name, name, next->d_name.len);
40316 + name = d_name;
40317 + }
40318 + if (filldir(dirent, name,
40319 next->d_name.len, filp->f_pos,
40320 next->d_inode->i_ino,
40321 dt_type(next->d_inode)) < 0)
40322 diff -urNp linux-2.6.32.41/fs/lockd/clntproc.c linux-2.6.32.41/fs/lockd/clntproc.c
40323 --- linux-2.6.32.41/fs/lockd/clntproc.c 2011-03-27 14:31:47.000000000 -0400
40324 +++ linux-2.6.32.41/fs/lockd/clntproc.c 2011-05-16 21:46:57.000000000 -0400
40325 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
40326 /*
40327 * Cookie counter for NLM requests
40328 */
40329 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
40330 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
40331
40332 void nlmclnt_next_cookie(struct nlm_cookie *c)
40333 {
40334 - u32 cookie = atomic_inc_return(&nlm_cookie);
40335 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
40336
40337 memcpy(c->data, &cookie, 4);
40338 c->len=4;
40339 @@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
40340 struct nlm_rqst reqst, *req;
40341 int status;
40342
40343 + pax_track_stack();
40344 +
40345 req = &reqst;
40346 memset(req, 0, sizeof(*req));
40347 locks_init_lock(&req->a_args.lock.fl);
40348 diff -urNp linux-2.6.32.41/fs/lockd/svc.c linux-2.6.32.41/fs/lockd/svc.c
40349 --- linux-2.6.32.41/fs/lockd/svc.c 2011-03-27 14:31:47.000000000 -0400
40350 +++ linux-2.6.32.41/fs/lockd/svc.c 2011-04-17 15:56:46.000000000 -0400
40351 @@ -43,7 +43,7 @@
40352
40353 static struct svc_program nlmsvc_program;
40354
40355 -struct nlmsvc_binding * nlmsvc_ops;
40356 +const struct nlmsvc_binding * nlmsvc_ops;
40357 EXPORT_SYMBOL_GPL(nlmsvc_ops);
40358
40359 static DEFINE_MUTEX(nlmsvc_mutex);
40360 diff -urNp linux-2.6.32.41/fs/locks.c linux-2.6.32.41/fs/locks.c
40361 --- linux-2.6.32.41/fs/locks.c 2011-03-27 14:31:47.000000000 -0400
40362 +++ linux-2.6.32.41/fs/locks.c 2011-04-17 15:56:46.000000000 -0400
40363 @@ -2007,16 +2007,16 @@ void locks_remove_flock(struct file *fil
40364 return;
40365
40366 if (filp->f_op && filp->f_op->flock) {
40367 - struct file_lock fl = {
40368 + struct file_lock flock = {
40369 .fl_pid = current->tgid,
40370 .fl_file = filp,
40371 .fl_flags = FL_FLOCK,
40372 .fl_type = F_UNLCK,
40373 .fl_end = OFFSET_MAX,
40374 };
40375 - filp->f_op->flock(filp, F_SETLKW, &fl);
40376 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
40377 - fl.fl_ops->fl_release_private(&fl);
40378 + filp->f_op->flock(filp, F_SETLKW, &flock);
40379 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
40380 + flock.fl_ops->fl_release_private(&flock);
40381 }
40382
40383 lock_kernel();
40384 diff -urNp linux-2.6.32.41/fs/namei.c linux-2.6.32.41/fs/namei.c
40385 --- linux-2.6.32.41/fs/namei.c 2011-03-27 14:31:47.000000000 -0400
40386 +++ linux-2.6.32.41/fs/namei.c 2011-05-16 21:46:57.000000000 -0400
40387 @@ -224,14 +224,6 @@ int generic_permission(struct inode *ino
40388 return ret;
40389
40390 /*
40391 - * Read/write DACs are always overridable.
40392 - * Executable DACs are overridable if at least one exec bit is set.
40393 - */
40394 - if (!(mask & MAY_EXEC) || execute_ok(inode))
40395 - if (capable(CAP_DAC_OVERRIDE))
40396 - return 0;
40397 -
40398 - /*
40399 * Searching includes executable on directories, else just read.
40400 */
40401 mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
40402 @@ -239,6 +231,14 @@ int generic_permission(struct inode *ino
40403 if (capable(CAP_DAC_READ_SEARCH))
40404 return 0;
40405
40406 + /*
40407 + * Read/write DACs are always overridable.
40408 + * Executable DACs are overridable if at least one exec bit is set.
40409 + */
40410 + if (!(mask & MAY_EXEC) || execute_ok(inode))
40411 + if (capable(CAP_DAC_OVERRIDE))
40412 + return 0;
40413 +
40414 return -EACCES;
40415 }
40416
40417 @@ -458,7 +458,8 @@ static int exec_permission_lite(struct i
40418 if (!ret)
40419 goto ok;
40420
40421 - if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
40422 + if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
40423 + capable(CAP_DAC_OVERRIDE))
40424 goto ok;
40425
40426 return ret;
40427 @@ -638,7 +639,7 @@ static __always_inline int __do_follow_l
40428 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
40429 error = PTR_ERR(cookie);
40430 if (!IS_ERR(cookie)) {
40431 - char *s = nd_get_link(nd);
40432 + const char *s = nd_get_link(nd);
40433 error = 0;
40434 if (s)
40435 error = __vfs_follow_link(nd, s);
40436 @@ -669,6 +670,13 @@ static inline int do_follow_link(struct
40437 err = security_inode_follow_link(path->dentry, nd);
40438 if (err)
40439 goto loop;
40440 +
40441 + if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
40442 + path->dentry->d_inode, path->dentry, nd->path.mnt)) {
40443 + err = -EACCES;
40444 + goto loop;
40445 + }
40446 +
40447 current->link_count++;
40448 current->total_link_count++;
40449 nd->depth++;
40450 @@ -1016,11 +1024,18 @@ return_reval:
40451 break;
40452 }
40453 return_base:
40454 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
40455 + path_put(&nd->path);
40456 + return -ENOENT;
40457 + }
40458 return 0;
40459 out_dput:
40460 path_put_conditional(&next, nd);
40461 break;
40462 }
40463 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
40464 + err = -ENOENT;
40465 +
40466 path_put(&nd->path);
40467 return_err:
40468 return err;
40469 @@ -1091,13 +1106,20 @@ static int do_path_lookup(int dfd, const
40470 int retval = path_init(dfd, name, flags, nd);
40471 if (!retval)
40472 retval = path_walk(name, nd);
40473 - if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
40474 - nd->path.dentry->d_inode))
40475 - audit_inode(name, nd->path.dentry);
40476 +
40477 + if (likely(!retval)) {
40478 + if (nd->path.dentry && nd->path.dentry->d_inode) {
40479 + if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
40480 + retval = -ENOENT;
40481 + if (!audit_dummy_context())
40482 + audit_inode(name, nd->path.dentry);
40483 + }
40484 + }
40485 if (nd->root.mnt) {
40486 path_put(&nd->root);
40487 nd->root.mnt = NULL;
40488 }
40489 +
40490 return retval;
40491 }
40492
40493 @@ -1576,6 +1598,20 @@ int may_open(struct path *path, int acc_
40494 if (error)
40495 goto err_out;
40496
40497 +
40498 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
40499 + error = -EPERM;
40500 + goto err_out;
40501 + }
40502 + if (gr_handle_rawio(inode)) {
40503 + error = -EPERM;
40504 + goto err_out;
40505 + }
40506 + if (!gr_acl_handle_open(dentry, path->mnt, flag)) {
40507 + error = -EACCES;
40508 + goto err_out;
40509 + }
40510 +
40511 if (flag & O_TRUNC) {
40512 error = get_write_access(inode);
40513 if (error)
40514 @@ -1621,12 +1657,19 @@ static int __open_namei_create(struct na
40515 int error;
40516 struct dentry *dir = nd->path.dentry;
40517
40518 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, nd->path.mnt, flag, mode)) {
40519 + error = -EACCES;
40520 + goto out_unlock;
40521 + }
40522 +
40523 if (!IS_POSIXACL(dir->d_inode))
40524 mode &= ~current_umask();
40525 error = security_path_mknod(&nd->path, path->dentry, mode, 0);
40526 if (error)
40527 goto out_unlock;
40528 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
40529 + if (!error)
40530 + gr_handle_create(path->dentry, nd->path.mnt);
40531 out_unlock:
40532 mutex_unlock(&dir->d_inode->i_mutex);
40533 dput(nd->path.dentry);
40534 @@ -1709,6 +1752,22 @@ struct file *do_filp_open(int dfd, const
40535 &nd, flag);
40536 if (error)
40537 return ERR_PTR(error);
40538 +
40539 + if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
40540 + error = -EPERM;
40541 + goto exit;
40542 + }
40543 +
40544 + if (gr_handle_rawio(nd.path.dentry->d_inode)) {
40545 + error = -EPERM;
40546 + goto exit;
40547 + }
40548 +
40549 + if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, flag)) {
40550 + error = -EACCES;
40551 + goto exit;
40552 + }
40553 +
40554 goto ok;
40555 }
40556
40557 @@ -1795,6 +1854,14 @@ do_last:
40558 /*
40559 * It already exists.
40560 */
40561 +
40562 + /* only check if O_CREAT is specified, all other checks need
40563 + to go into may_open */
40564 + if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
40565 + error = -EACCES;
40566 + goto exit_mutex_unlock;
40567 + }
40568 +
40569 mutex_unlock(&dir->d_inode->i_mutex);
40570 audit_inode(pathname, path.dentry);
40571
40572 @@ -1887,6 +1954,13 @@ do_link:
40573 error = security_inode_follow_link(path.dentry, &nd);
40574 if (error)
40575 goto exit_dput;
40576 +
40577 + if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
40578 + path.dentry, nd.path.mnt)) {
40579 + error = -EACCES;
40580 + goto exit_dput;
40581 + }
40582 +
40583 error = __do_follow_link(&path, &nd);
40584 if (error) {
40585 /* Does someone understand code flow here? Or it is only
40586 @@ -2061,6 +2135,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
40587 error = may_mknod(mode);
40588 if (error)
40589 goto out_dput;
40590 +
40591 + if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
40592 + error = -EPERM;
40593 + goto out_dput;
40594 + }
40595 +
40596 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
40597 + error = -EACCES;
40598 + goto out_dput;
40599 + }
40600 +
40601 error = mnt_want_write(nd.path.mnt);
40602 if (error)
40603 goto out_dput;
40604 @@ -2081,6 +2166,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
40605 }
40606 out_drop_write:
40607 mnt_drop_write(nd.path.mnt);
40608 +
40609 + if (!error)
40610 + gr_handle_create(dentry, nd.path.mnt);
40611 out_dput:
40612 dput(dentry);
40613 out_unlock:
40614 @@ -2134,6 +2222,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
40615 if (IS_ERR(dentry))
40616 goto out_unlock;
40617
40618 + if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
40619 + error = -EACCES;
40620 + goto out_dput;
40621 + }
40622 +
40623 if (!IS_POSIXACL(nd.path.dentry->d_inode))
40624 mode &= ~current_umask();
40625 error = mnt_want_write(nd.path.mnt);
40626 @@ -2145,6 +2238,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
40627 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
40628 out_drop_write:
40629 mnt_drop_write(nd.path.mnt);
40630 +
40631 + if (!error)
40632 + gr_handle_create(dentry, nd.path.mnt);
40633 +
40634 out_dput:
40635 dput(dentry);
40636 out_unlock:
40637 @@ -2226,6 +2323,8 @@ static long do_rmdir(int dfd, const char
40638 char * name;
40639 struct dentry *dentry;
40640 struct nameidata nd;
40641 + ino_t saved_ino = 0;
40642 + dev_t saved_dev = 0;
40643
40644 error = user_path_parent(dfd, pathname, &nd, &name);
40645 if (error)
40646 @@ -2250,6 +2349,19 @@ static long do_rmdir(int dfd, const char
40647 error = PTR_ERR(dentry);
40648 if (IS_ERR(dentry))
40649 goto exit2;
40650 +
40651 + if (dentry->d_inode != NULL) {
40652 + if (dentry->d_inode->i_nlink <= 1) {
40653 + saved_ino = dentry->d_inode->i_ino;
40654 + saved_dev = gr_get_dev_from_dentry(dentry);
40655 + }
40656 +
40657 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
40658 + error = -EACCES;
40659 + goto exit3;
40660 + }
40661 + }
40662 +
40663 error = mnt_want_write(nd.path.mnt);
40664 if (error)
40665 goto exit3;
40666 @@ -2257,6 +2369,8 @@ static long do_rmdir(int dfd, const char
40667 if (error)
40668 goto exit4;
40669 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
40670 + if (!error && (saved_dev || saved_ino))
40671 + gr_handle_delete(saved_ino, saved_dev);
40672 exit4:
40673 mnt_drop_write(nd.path.mnt);
40674 exit3:
40675 @@ -2318,6 +2432,8 @@ static long do_unlinkat(int dfd, const c
40676 struct dentry *dentry;
40677 struct nameidata nd;
40678 struct inode *inode = NULL;
40679 + ino_t saved_ino = 0;
40680 + dev_t saved_dev = 0;
40681
40682 error = user_path_parent(dfd, pathname, &nd, &name);
40683 if (error)
40684 @@ -2337,8 +2453,19 @@ static long do_unlinkat(int dfd, const c
40685 if (nd.last.name[nd.last.len])
40686 goto slashes;
40687 inode = dentry->d_inode;
40688 - if (inode)
40689 + if (inode) {
40690 + if (inode->i_nlink <= 1) {
40691 + saved_ino = inode->i_ino;
40692 + saved_dev = gr_get_dev_from_dentry(dentry);
40693 + }
40694 +
40695 atomic_inc(&inode->i_count);
40696 +
40697 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
40698 + error = -EACCES;
40699 + goto exit2;
40700 + }
40701 + }
40702 error = mnt_want_write(nd.path.mnt);
40703 if (error)
40704 goto exit2;
40705 @@ -2346,6 +2473,8 @@ static long do_unlinkat(int dfd, const c
40706 if (error)
40707 goto exit3;
40708 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
40709 + if (!error && (saved_ino || saved_dev))
40710 + gr_handle_delete(saved_ino, saved_dev);
40711 exit3:
40712 mnt_drop_write(nd.path.mnt);
40713 exit2:
40714 @@ -2424,6 +2553,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
40715 if (IS_ERR(dentry))
40716 goto out_unlock;
40717
40718 + if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
40719 + error = -EACCES;
40720 + goto out_dput;
40721 + }
40722 +
40723 error = mnt_want_write(nd.path.mnt);
40724 if (error)
40725 goto out_dput;
40726 @@ -2431,6 +2565,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
40727 if (error)
40728 goto out_drop_write;
40729 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
40730 + if (!error)
40731 + gr_handle_create(dentry, nd.path.mnt);
40732 out_drop_write:
40733 mnt_drop_write(nd.path.mnt);
40734 out_dput:
40735 @@ -2524,6 +2660,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
40736 error = PTR_ERR(new_dentry);
40737 if (IS_ERR(new_dentry))
40738 goto out_unlock;
40739 +
40740 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
40741 + old_path.dentry->d_inode,
40742 + old_path.dentry->d_inode->i_mode, to)) {
40743 + error = -EACCES;
40744 + goto out_dput;
40745 + }
40746 +
40747 + if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
40748 + old_path.dentry, old_path.mnt, to)) {
40749 + error = -EACCES;
40750 + goto out_dput;
40751 + }
40752 +
40753 error = mnt_want_write(nd.path.mnt);
40754 if (error)
40755 goto out_dput;
40756 @@ -2531,6 +2681,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
40757 if (error)
40758 goto out_drop_write;
40759 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
40760 + if (!error)
40761 + gr_handle_create(new_dentry, nd.path.mnt);
40762 out_drop_write:
40763 mnt_drop_write(nd.path.mnt);
40764 out_dput:
40765 @@ -2708,6 +2860,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
40766 char *to;
40767 int error;
40768
40769 + pax_track_stack();
40770 +
40771 error = user_path_parent(olddfd, oldname, &oldnd, &from);
40772 if (error)
40773 goto exit;
40774 @@ -2764,6 +2918,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
40775 if (new_dentry == trap)
40776 goto exit5;
40777
40778 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
40779 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
40780 + to);
40781 + if (error)
40782 + goto exit5;
40783 +
40784 error = mnt_want_write(oldnd.path.mnt);
40785 if (error)
40786 goto exit5;
40787 @@ -2773,6 +2933,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
40788 goto exit6;
40789 error = vfs_rename(old_dir->d_inode, old_dentry,
40790 new_dir->d_inode, new_dentry);
40791 + if (!error)
40792 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
40793 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
40794 exit6:
40795 mnt_drop_write(oldnd.path.mnt);
40796 exit5:
40797 @@ -2798,6 +2961,8 @@ SYSCALL_DEFINE2(rename, const char __use
40798
40799 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
40800 {
40801 + char tmpbuf[64];
40802 + const char *newlink;
40803 int len;
40804
40805 len = PTR_ERR(link);
40806 @@ -2807,7 +2972,14 @@ int vfs_readlink(struct dentry *dentry,
40807 len = strlen(link);
40808 if (len > (unsigned) buflen)
40809 len = buflen;
40810 - if (copy_to_user(buffer, link, len))
40811 +
40812 + if (len < sizeof(tmpbuf)) {
40813 + memcpy(tmpbuf, link, len);
40814 + newlink = tmpbuf;
40815 + } else
40816 + newlink = link;
40817 +
40818 + if (copy_to_user(buffer, newlink, len))
40819 len = -EFAULT;
40820 out:
40821 return len;
40822 diff -urNp linux-2.6.32.41/fs/namespace.c linux-2.6.32.41/fs/namespace.c
40823 --- linux-2.6.32.41/fs/namespace.c 2011-03-27 14:31:47.000000000 -0400
40824 +++ linux-2.6.32.41/fs/namespace.c 2011-04-17 15:56:46.000000000 -0400
40825 @@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mn
40826 if (!(sb->s_flags & MS_RDONLY))
40827 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
40828 up_write(&sb->s_umount);
40829 +
40830 + gr_log_remount(mnt->mnt_devname, retval);
40831 +
40832 return retval;
40833 }
40834
40835 @@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mn
40836 security_sb_umount_busy(mnt);
40837 up_write(&namespace_sem);
40838 release_mounts(&umount_list);
40839 +
40840 + gr_log_unmount(mnt->mnt_devname, retval);
40841 +
40842 return retval;
40843 }
40844
40845 @@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_
40846 if (retval)
40847 goto dput_out;
40848
40849 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
40850 + retval = -EPERM;
40851 + goto dput_out;
40852 + }
40853 +
40854 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
40855 + retval = -EPERM;
40856 + goto dput_out;
40857 + }
40858 +
40859 if (flags & MS_REMOUNT)
40860 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
40861 data_page);
40862 @@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_
40863 dev_name, data_page);
40864 dput_out:
40865 path_put(&path);
40866 +
40867 + gr_log_mount(dev_name, dir_name, retval);
40868 +
40869 return retval;
40870 }
40871
40872 @@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char _
40873 goto out1;
40874 }
40875
40876 + if (gr_handle_chroot_pivot()) {
40877 + error = -EPERM;
40878 + path_put(&old);
40879 + goto out1;
40880 + }
40881 +
40882 read_lock(&current->fs->lock);
40883 root = current->fs->root;
40884 path_get(&current->fs->root);
40885 diff -urNp linux-2.6.32.41/fs/ncpfs/dir.c linux-2.6.32.41/fs/ncpfs/dir.c
40886 --- linux-2.6.32.41/fs/ncpfs/dir.c 2011-03-27 14:31:47.000000000 -0400
40887 +++ linux-2.6.32.41/fs/ncpfs/dir.c 2011-05-16 21:46:57.000000000 -0400
40888 @@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *den
40889 int res, val = 0, len;
40890 __u8 __name[NCP_MAXPATHLEN + 1];
40891
40892 + pax_track_stack();
40893 +
40894 parent = dget_parent(dentry);
40895 dir = parent->d_inode;
40896
40897 @@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct
40898 int error, res, len;
40899 __u8 __name[NCP_MAXPATHLEN + 1];
40900
40901 + pax_track_stack();
40902 +
40903 lock_kernel();
40904 error = -EIO;
40905 if (!ncp_conn_valid(server))
40906 @@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, st
40907 int error, result, len;
40908 int opmode;
40909 __u8 __name[NCP_MAXPATHLEN + 1];
40910 -
40911 +
40912 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
40913 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
40914
40915 + pax_track_stack();
40916 +
40917 error = -EIO;
40918 lock_kernel();
40919 if (!ncp_conn_valid(server))
40920 @@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir,
40921 int error, len;
40922 __u8 __name[NCP_MAXPATHLEN + 1];
40923
40924 + pax_track_stack();
40925 +
40926 DPRINTK("ncp_mkdir: making %s/%s\n",
40927 dentry->d_parent->d_name.name, dentry->d_name.name);
40928
40929 @@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir,
40930 if (!ncp_conn_valid(server))
40931 goto out;
40932
40933 + pax_track_stack();
40934 +
40935 ncp_age_dentry(server, dentry);
40936 len = sizeof(__name);
40937 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
40938 @@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_
40939 int old_len, new_len;
40940 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
40941
40942 + pax_track_stack();
40943 +
40944 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
40945 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
40946 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
40947 diff -urNp linux-2.6.32.41/fs/ncpfs/inode.c linux-2.6.32.41/fs/ncpfs/inode.c
40948 --- linux-2.6.32.41/fs/ncpfs/inode.c 2011-03-27 14:31:47.000000000 -0400
40949 +++ linux-2.6.32.41/fs/ncpfs/inode.c 2011-05-16 21:46:57.000000000 -0400
40950 @@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_b
40951 #endif
40952 struct ncp_entry_info finfo;
40953
40954 + pax_track_stack();
40955 +
40956 data.wdog_pid = NULL;
40957 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
40958 if (!server)
40959 diff -urNp linux-2.6.32.41/fs/nfs/inode.c linux-2.6.32.41/fs/nfs/inode.c
40960 --- linux-2.6.32.41/fs/nfs/inode.c 2011-05-10 22:12:01.000000000 -0400
40961 +++ linux-2.6.32.41/fs/nfs/inode.c 2011-05-10 22:12:33.000000000 -0400
40962 @@ -973,16 +973,16 @@ static int nfs_size_need_update(const st
40963 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
40964 }
40965
40966 -static atomic_long_t nfs_attr_generation_counter;
40967 +static atomic_long_unchecked_t nfs_attr_generation_counter;
40968
40969 static unsigned long nfs_read_attr_generation_counter(void)
40970 {
40971 - return atomic_long_read(&nfs_attr_generation_counter);
40972 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
40973 }
40974
40975 unsigned long nfs_inc_attr_generation_counter(void)
40976 {
40977 - return atomic_long_inc_return(&nfs_attr_generation_counter);
40978 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
40979 }
40980
40981 void nfs_fattr_init(struct nfs_fattr *fattr)
40982 diff -urNp linux-2.6.32.41/fs/nfsd/lockd.c linux-2.6.32.41/fs/nfsd/lockd.c
40983 --- linux-2.6.32.41/fs/nfsd/lockd.c 2011-04-17 17:00:52.000000000 -0400
40984 +++ linux-2.6.32.41/fs/nfsd/lockd.c 2011-04-17 17:03:15.000000000 -0400
40985 @@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
40986 fput(filp);
40987 }
40988
40989 -static struct nlmsvc_binding nfsd_nlm_ops = {
40990 +static const struct nlmsvc_binding nfsd_nlm_ops = {
40991 .fopen = nlm_fopen, /* open file for locking */
40992 .fclose = nlm_fclose, /* close file */
40993 };
40994 diff -urNp linux-2.6.32.41/fs/nfsd/nfs4state.c linux-2.6.32.41/fs/nfsd/nfs4state.c
40995 --- linux-2.6.32.41/fs/nfsd/nfs4state.c 2011-03-27 14:31:47.000000000 -0400
40996 +++ linux-2.6.32.41/fs/nfsd/nfs4state.c 2011-05-16 21:46:57.000000000 -0400
40997 @@ -3457,6 +3457,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
40998 unsigned int cmd;
40999 int err;
41000
41001 + pax_track_stack();
41002 +
41003 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
41004 (long long) lock->lk_offset,
41005 (long long) lock->lk_length);
41006 diff -urNp linux-2.6.32.41/fs/nfsd/nfs4xdr.c linux-2.6.32.41/fs/nfsd/nfs4xdr.c
41007 --- linux-2.6.32.41/fs/nfsd/nfs4xdr.c 2011-03-27 14:31:47.000000000 -0400
41008 +++ linux-2.6.32.41/fs/nfsd/nfs4xdr.c 2011-05-16 21:46:57.000000000 -0400
41009 @@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
41010 struct nfsd4_compoundres *resp = rqstp->rq_resp;
41011 u32 minorversion = resp->cstate.minorversion;
41012
41013 + pax_track_stack();
41014 +
41015 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
41016 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
41017 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
41018 diff -urNp linux-2.6.32.41/fs/nfsd/vfs.c linux-2.6.32.41/fs/nfsd/vfs.c
41019 --- linux-2.6.32.41/fs/nfsd/vfs.c 2011-05-10 22:12:01.000000000 -0400
41020 +++ linux-2.6.32.41/fs/nfsd/vfs.c 2011-05-10 22:12:33.000000000 -0400
41021 @@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
41022 } else {
41023 oldfs = get_fs();
41024 set_fs(KERNEL_DS);
41025 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
41026 + host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
41027 set_fs(oldfs);
41028 }
41029
41030 @@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
41031
41032 /* Write the data. */
41033 oldfs = get_fs(); set_fs(KERNEL_DS);
41034 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
41035 + host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
41036 set_fs(oldfs);
41037 if (host_err < 0)
41038 goto out_nfserr;
41039 @@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
41040 */
41041
41042 oldfs = get_fs(); set_fs(KERNEL_DS);
41043 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
41044 + host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
41045 set_fs(oldfs);
41046
41047 if (host_err < 0)
41048 diff -urNp linux-2.6.32.41/fs/nilfs2/ioctl.c linux-2.6.32.41/fs/nilfs2/ioctl.c
41049 --- linux-2.6.32.41/fs/nilfs2/ioctl.c 2011-03-27 14:31:47.000000000 -0400
41050 +++ linux-2.6.32.41/fs/nilfs2/ioctl.c 2011-05-04 17:56:28.000000000 -0400
41051 @@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(st
41052 unsigned int cmd, void __user *argp)
41053 {
41054 struct nilfs_argv argv[5];
41055 - const static size_t argsz[5] = {
41056 + static const size_t argsz[5] = {
41057 sizeof(struct nilfs_vdesc),
41058 sizeof(struct nilfs_period),
41059 sizeof(__u64),
41060 diff -urNp linux-2.6.32.41/fs/notify/dnotify/dnotify.c linux-2.6.32.41/fs/notify/dnotify/dnotify.c
41061 --- linux-2.6.32.41/fs/notify/dnotify/dnotify.c 2011-03-27 14:31:47.000000000 -0400
41062 +++ linux-2.6.32.41/fs/notify/dnotify/dnotify.c 2011-04-17 15:56:46.000000000 -0400
41063 @@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsn
41064 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
41065 }
41066
41067 -static struct fsnotify_ops dnotify_fsnotify_ops = {
41068 +static const struct fsnotify_ops dnotify_fsnotify_ops = {
41069 .handle_event = dnotify_handle_event,
41070 .should_send_event = dnotify_should_send_event,
41071 .free_group_priv = NULL,
41072 diff -urNp linux-2.6.32.41/fs/notify/notification.c linux-2.6.32.41/fs/notify/notification.c
41073 --- linux-2.6.32.41/fs/notify/notification.c 2011-03-27 14:31:47.000000000 -0400
41074 +++ linux-2.6.32.41/fs/notify/notification.c 2011-05-04 17:56:28.000000000 -0400
41075 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
41076 * get set to 0 so it will never get 'freed'
41077 */
41078 static struct fsnotify_event q_overflow_event;
41079 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
41080 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
41081
41082 /**
41083 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
41084 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
41085 */
41086 u32 fsnotify_get_cookie(void)
41087 {
41088 - return atomic_inc_return(&fsnotify_sync_cookie);
41089 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
41090 }
41091 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
41092
41093 diff -urNp linux-2.6.32.41/fs/ntfs/dir.c linux-2.6.32.41/fs/ntfs/dir.c
41094 --- linux-2.6.32.41/fs/ntfs/dir.c 2011-03-27 14:31:47.000000000 -0400
41095 +++ linux-2.6.32.41/fs/ntfs/dir.c 2011-04-17 15:56:46.000000000 -0400
41096 @@ -1328,7 +1328,7 @@ find_next_index_buffer:
41097 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
41098 ~(s64)(ndir->itype.index.block_size - 1)));
41099 /* Bounds checks. */
41100 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
41101 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
41102 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
41103 "inode 0x%lx or driver bug.", vdir->i_ino);
41104 goto err_out;
41105 diff -urNp linux-2.6.32.41/fs/ntfs/file.c linux-2.6.32.41/fs/ntfs/file.c
41106 --- linux-2.6.32.41/fs/ntfs/file.c 2011-03-27 14:31:47.000000000 -0400
41107 +++ linux-2.6.32.41/fs/ntfs/file.c 2011-04-17 15:56:46.000000000 -0400
41108 @@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_
41109 #endif /* NTFS_RW */
41110 };
41111
41112 -const struct file_operations ntfs_empty_file_ops = {};
41113 +const struct file_operations ntfs_empty_file_ops __read_only;
41114
41115 -const struct inode_operations ntfs_empty_inode_ops = {};
41116 +const struct inode_operations ntfs_empty_inode_ops __read_only;
41117 diff -urNp linux-2.6.32.41/fs/ocfs2/cluster/masklog.c linux-2.6.32.41/fs/ocfs2/cluster/masklog.c
41118 --- linux-2.6.32.41/fs/ocfs2/cluster/masklog.c 2011-03-27 14:31:47.000000000 -0400
41119 +++ linux-2.6.32.41/fs/ocfs2/cluster/masklog.c 2011-04-17 15:56:46.000000000 -0400
41120 @@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject
41121 return mlog_mask_store(mlog_attr->mask, buf, count);
41122 }
41123
41124 -static struct sysfs_ops mlog_attr_ops = {
41125 +static const struct sysfs_ops mlog_attr_ops = {
41126 .show = mlog_show,
41127 .store = mlog_store,
41128 };
41129 diff -urNp linux-2.6.32.41/fs/ocfs2/localalloc.c linux-2.6.32.41/fs/ocfs2/localalloc.c
41130 --- linux-2.6.32.41/fs/ocfs2/localalloc.c 2011-03-27 14:31:47.000000000 -0400
41131 +++ linux-2.6.32.41/fs/ocfs2/localalloc.c 2011-04-17 15:56:46.000000000 -0400
41132 @@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_windo
41133 goto bail;
41134 }
41135
41136 - atomic_inc(&osb->alloc_stats.moves);
41137 + atomic_inc_unchecked(&osb->alloc_stats.moves);
41138
41139 status = 0;
41140 bail:
41141 diff -urNp linux-2.6.32.41/fs/ocfs2/namei.c linux-2.6.32.41/fs/ocfs2/namei.c
41142 --- linux-2.6.32.41/fs/ocfs2/namei.c 2011-03-27 14:31:47.000000000 -0400
41143 +++ linux-2.6.32.41/fs/ocfs2/namei.c 2011-05-16 21:46:57.000000000 -0400
41144 @@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *ol
41145 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
41146 struct ocfs2_dir_lookup_result target_insert = { NULL, };
41147
41148 + pax_track_stack();
41149 +
41150 /* At some point it might be nice to break this function up a
41151 * bit. */
41152
41153 diff -urNp linux-2.6.32.41/fs/ocfs2/ocfs2.h linux-2.6.32.41/fs/ocfs2/ocfs2.h
41154 --- linux-2.6.32.41/fs/ocfs2/ocfs2.h 2011-03-27 14:31:47.000000000 -0400
41155 +++ linux-2.6.32.41/fs/ocfs2/ocfs2.h 2011-04-17 15:56:46.000000000 -0400
41156 @@ -217,11 +217,11 @@ enum ocfs2_vol_state
41157
41158 struct ocfs2_alloc_stats
41159 {
41160 - atomic_t moves;
41161 - atomic_t local_data;
41162 - atomic_t bitmap_data;
41163 - atomic_t bg_allocs;
41164 - atomic_t bg_extends;
41165 + atomic_unchecked_t moves;
41166 + atomic_unchecked_t local_data;
41167 + atomic_unchecked_t bitmap_data;
41168 + atomic_unchecked_t bg_allocs;
41169 + atomic_unchecked_t bg_extends;
41170 };
41171
41172 enum ocfs2_local_alloc_state
41173 diff -urNp linux-2.6.32.41/fs/ocfs2/suballoc.c linux-2.6.32.41/fs/ocfs2/suballoc.c
41174 --- linux-2.6.32.41/fs/ocfs2/suballoc.c 2011-03-27 14:31:47.000000000 -0400
41175 +++ linux-2.6.32.41/fs/ocfs2/suballoc.c 2011-04-17 15:56:46.000000000 -0400
41176 @@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(s
41177 mlog_errno(status);
41178 goto bail;
41179 }
41180 - atomic_inc(&osb->alloc_stats.bg_extends);
41181 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
41182
41183 /* You should never ask for this much metadata */
41184 BUG_ON(bits_wanted >
41185 @@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_su
41186 mlog_errno(status);
41187 goto bail;
41188 }
41189 - atomic_inc(&osb->alloc_stats.bg_allocs);
41190 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
41191
41192 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
41193 ac->ac_bits_given += (*num_bits);
41194 @@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_s
41195 mlog_errno(status);
41196 goto bail;
41197 }
41198 - atomic_inc(&osb->alloc_stats.bg_allocs);
41199 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
41200
41201 BUG_ON(num_bits != 1);
41202
41203 @@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
41204 cluster_start,
41205 num_clusters);
41206 if (!status)
41207 - atomic_inc(&osb->alloc_stats.local_data);
41208 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
41209 } else {
41210 if (min_clusters > (osb->bitmap_cpg - 1)) {
41211 /* The only paths asking for contiguousness
41212 @@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
41213 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
41214 bg_blkno,
41215 bg_bit_off);
41216 - atomic_inc(&osb->alloc_stats.bitmap_data);
41217 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
41218 }
41219 }
41220 if (status < 0) {
41221 diff -urNp linux-2.6.32.41/fs/ocfs2/super.c linux-2.6.32.41/fs/ocfs2/super.c
41222 --- linux-2.6.32.41/fs/ocfs2/super.c 2011-03-27 14:31:47.000000000 -0400
41223 +++ linux-2.6.32.41/fs/ocfs2/super.c 2011-04-17 15:56:46.000000000 -0400
41224 @@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
41225 "%10s => GlobalAllocs: %d LocalAllocs: %d "
41226 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
41227 "Stats",
41228 - atomic_read(&osb->alloc_stats.bitmap_data),
41229 - atomic_read(&osb->alloc_stats.local_data),
41230 - atomic_read(&osb->alloc_stats.bg_allocs),
41231 - atomic_read(&osb->alloc_stats.moves),
41232 - atomic_read(&osb->alloc_stats.bg_extends));
41233 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
41234 + atomic_read_unchecked(&osb->alloc_stats.local_data),
41235 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
41236 + atomic_read_unchecked(&osb->alloc_stats.moves),
41237 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
41238
41239 out += snprintf(buf + out, len - out,
41240 "%10s => State: %u Descriptor: %llu Size: %u bits "
41241 @@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct
41242 spin_lock_init(&osb->osb_xattr_lock);
41243 ocfs2_init_inode_steal_slot(osb);
41244
41245 - atomic_set(&osb->alloc_stats.moves, 0);
41246 - atomic_set(&osb->alloc_stats.local_data, 0);
41247 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
41248 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
41249 - atomic_set(&osb->alloc_stats.bg_extends, 0);
41250 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
41251 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
41252 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
41253 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
41254 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
41255
41256 /* Copy the blockcheck stats from the superblock probe */
41257 osb->osb_ecc_stats = *stats;
41258 diff -urNp linux-2.6.32.41/fs/open.c linux-2.6.32.41/fs/open.c
41259 --- linux-2.6.32.41/fs/open.c 2011-03-27 14:31:47.000000000 -0400
41260 +++ linux-2.6.32.41/fs/open.c 2011-04-17 15:56:46.000000000 -0400
41261 @@ -275,6 +275,10 @@ static long do_sys_truncate(const char _
41262 error = locks_verify_truncate(inode, NULL, length);
41263 if (!error)
41264 error = security_path_truncate(&path, length, 0);
41265 +
41266 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
41267 + error = -EACCES;
41268 +
41269 if (!error) {
41270 vfs_dq_init(inode);
41271 error = do_truncate(path.dentry, length, 0, NULL);
41272 @@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
41273 if (__mnt_is_readonly(path.mnt))
41274 res = -EROFS;
41275
41276 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
41277 + res = -EACCES;
41278 +
41279 out_path_release:
41280 path_put(&path);
41281 out:
41282 @@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user
41283 if (error)
41284 goto dput_and_out;
41285
41286 + gr_log_chdir(path.dentry, path.mnt);
41287 +
41288 set_fs_pwd(current->fs, &path);
41289
41290 dput_and_out:
41291 @@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
41292 goto out_putf;
41293
41294 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
41295 +
41296 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
41297 + error = -EPERM;
41298 +
41299 + if (!error)
41300 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
41301 +
41302 if (!error)
41303 set_fs_pwd(current->fs, &file->f_path);
41304 out_putf:
41305 @@ -588,7 +604,18 @@ SYSCALL_DEFINE1(chroot, const char __use
41306 if (!capable(CAP_SYS_CHROOT))
41307 goto dput_and_out;
41308
41309 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
41310 + goto dput_and_out;
41311 +
41312 + if (gr_handle_chroot_caps(&path)) {
41313 + error = -ENOMEM;
41314 + goto dput_and_out;
41315 + }
41316 +
41317 set_fs_root(current->fs, &path);
41318 +
41319 + gr_handle_chroot_chdir(&path);
41320 +
41321 error = 0;
41322 dput_and_out:
41323 path_put(&path);
41324 @@ -616,12 +643,27 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
41325 err = mnt_want_write_file(file);
41326 if (err)
41327 goto out_putf;
41328 +
41329 mutex_lock(&inode->i_mutex);
41330 +
41331 + if (!gr_acl_handle_fchmod(dentry, file->f_path.mnt, mode)) {
41332 + err = -EACCES;
41333 + goto out_unlock;
41334 + }
41335 +
41336 if (mode == (mode_t) -1)
41337 mode = inode->i_mode;
41338 +
41339 + if (gr_handle_chroot_chmod(dentry, file->f_path.mnt, mode)) {
41340 + err = -EPERM;
41341 + goto out_unlock;
41342 + }
41343 +
41344 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
41345 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
41346 err = notify_change(dentry, &newattrs);
41347 +
41348 +out_unlock:
41349 mutex_unlock(&inode->i_mutex);
41350 mnt_drop_write(file->f_path.mnt);
41351 out_putf:
41352 @@ -645,12 +687,27 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
41353 error = mnt_want_write(path.mnt);
41354 if (error)
41355 goto dput_and_out;
41356 +
41357 mutex_lock(&inode->i_mutex);
41358 +
41359 + if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
41360 + error = -EACCES;
41361 + goto out_unlock;
41362 + }
41363 +
41364 if (mode == (mode_t) -1)
41365 mode = inode->i_mode;
41366 +
41367 + if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
41368 + error = -EACCES;
41369 + goto out_unlock;
41370 + }
41371 +
41372 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
41373 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
41374 error = notify_change(path.dentry, &newattrs);
41375 +
41376 +out_unlock:
41377 mutex_unlock(&inode->i_mutex);
41378 mnt_drop_write(path.mnt);
41379 dput_and_out:
41380 @@ -664,12 +721,15 @@ SYSCALL_DEFINE2(chmod, const char __user
41381 return sys_fchmodat(AT_FDCWD, filename, mode);
41382 }
41383
41384 -static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
41385 +static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
41386 {
41387 struct inode *inode = dentry->d_inode;
41388 int error;
41389 struct iattr newattrs;
41390
41391 + if (!gr_acl_handle_chown(dentry, mnt))
41392 + return -EACCES;
41393 +
41394 newattrs.ia_valid = ATTR_CTIME;
41395 if (user != (uid_t) -1) {
41396 newattrs.ia_valid |= ATTR_UID;
41397 @@ -700,7 +760,7 @@ SYSCALL_DEFINE3(chown, const char __user
41398 error = mnt_want_write(path.mnt);
41399 if (error)
41400 goto out_release;
41401 - error = chown_common(path.dentry, user, group);
41402 + error = chown_common(path.dentry, user, group, path.mnt);
41403 mnt_drop_write(path.mnt);
41404 out_release:
41405 path_put(&path);
41406 @@ -725,7 +785,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, cons
41407 error = mnt_want_write(path.mnt);
41408 if (error)
41409 goto out_release;
41410 - error = chown_common(path.dentry, user, group);
41411 + error = chown_common(path.dentry, user, group, path.mnt);
41412 mnt_drop_write(path.mnt);
41413 out_release:
41414 path_put(&path);
41415 @@ -744,7 +804,7 @@ SYSCALL_DEFINE3(lchown, const char __use
41416 error = mnt_want_write(path.mnt);
41417 if (error)
41418 goto out_release;
41419 - error = chown_common(path.dentry, user, group);
41420 + error = chown_common(path.dentry, user, group, path.mnt);
41421 mnt_drop_write(path.mnt);
41422 out_release:
41423 path_put(&path);
41424 @@ -767,7 +827,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd
41425 goto out_fput;
41426 dentry = file->f_path.dentry;
41427 audit_inode(NULL, dentry);
41428 - error = chown_common(dentry, user, group);
41429 + error = chown_common(dentry, user, group, file->f_path.mnt);
41430 mnt_drop_write(file->f_path.mnt);
41431 out_fput:
41432 fput(file);
41433 @@ -1036,7 +1096,10 @@ long do_sys_open(int dfd, const char __u
41434 if (!IS_ERR(tmp)) {
41435 fd = get_unused_fd_flags(flags);
41436 if (fd >= 0) {
41437 - struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
41438 + struct file *f;
41439 + /* don't allow to be set by userland */
41440 + flags &= ~FMODE_GREXEC;
41441 + f = do_filp_open(dfd, tmp, flags, mode, 0);
41442 if (IS_ERR(f)) {
41443 put_unused_fd(fd);
41444 fd = PTR_ERR(f);
41445 diff -urNp linux-2.6.32.41/fs/partitions/ldm.c linux-2.6.32.41/fs/partitions/ldm.c
41446 --- linux-2.6.32.41/fs/partitions/ldm.c 2011-05-10 22:12:01.000000000 -0400
41447 +++ linux-2.6.32.41/fs/partitions/ldm.c 2011-04-18 19:31:12.000000000 -0400
41448 @@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
41449 ldm_error ("A VBLK claims to have %d parts.", num);
41450 return false;
41451 }
41452 +
41453 if (rec >= num) {
41454 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
41455 return false;
41456 @@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
41457 goto found;
41458 }
41459
41460 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
41461 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
41462 if (!f) {
41463 ldm_crit ("Out of memory.");
41464 return false;
41465 diff -urNp linux-2.6.32.41/fs/partitions/mac.c linux-2.6.32.41/fs/partitions/mac.c
41466 --- linux-2.6.32.41/fs/partitions/mac.c 2011-03-27 14:31:47.000000000 -0400
41467 +++ linux-2.6.32.41/fs/partitions/mac.c 2011-04-17 15:56:46.000000000 -0400
41468 @@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitio
41469 return 0; /* not a MacOS disk */
41470 }
41471 blocks_in_map = be32_to_cpu(part->map_count);
41472 + printk(" [mac]");
41473 if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
41474 put_dev_sector(sect);
41475 return 0;
41476 }
41477 - printk(" [mac]");
41478 for (slot = 1; slot <= blocks_in_map; ++slot) {
41479 int pos = slot * secsize;
41480 put_dev_sector(sect);
41481 diff -urNp linux-2.6.32.41/fs/pipe.c linux-2.6.32.41/fs/pipe.c
41482 --- linux-2.6.32.41/fs/pipe.c 2011-03-27 14:31:47.000000000 -0400
41483 +++ linux-2.6.32.41/fs/pipe.c 2011-04-23 13:37:17.000000000 -0400
41484 @@ -401,9 +401,9 @@ redo:
41485 }
41486 if (bufs) /* More to do? */
41487 continue;
41488 - if (!pipe->writers)
41489 + if (!atomic_read(&pipe->writers))
41490 break;
41491 - if (!pipe->waiting_writers) {
41492 + if (!atomic_read(&pipe->waiting_writers)) {
41493 /* syscall merging: Usually we must not sleep
41494 * if O_NONBLOCK is set, or if we got some data.
41495 * But if a writer sleeps in kernel space, then
41496 @@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const str
41497 mutex_lock(&inode->i_mutex);
41498 pipe = inode->i_pipe;
41499
41500 - if (!pipe->readers) {
41501 + if (!atomic_read(&pipe->readers)) {
41502 send_sig(SIGPIPE, current, 0);
41503 ret = -EPIPE;
41504 goto out;
41505 @@ -511,7 +511,7 @@ redo1:
41506 for (;;) {
41507 int bufs;
41508
41509 - if (!pipe->readers) {
41510 + if (!atomic_read(&pipe->readers)) {
41511 send_sig(SIGPIPE, current, 0);
41512 if (!ret)
41513 ret = -EPIPE;
41514 @@ -597,9 +597,9 @@ redo2:
41515 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
41516 do_wakeup = 0;
41517 }
41518 - pipe->waiting_writers++;
41519 + atomic_inc(&pipe->waiting_writers);
41520 pipe_wait(pipe);
41521 - pipe->waiting_writers--;
41522 + atomic_dec(&pipe->waiting_writers);
41523 }
41524 out:
41525 mutex_unlock(&inode->i_mutex);
41526 @@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table
41527 mask = 0;
41528 if (filp->f_mode & FMODE_READ) {
41529 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
41530 - if (!pipe->writers && filp->f_version != pipe->w_counter)
41531 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
41532 mask |= POLLHUP;
41533 }
41534
41535 @@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table
41536 * Most Unices do not set POLLERR for FIFOs but on Linux they
41537 * behave exactly like pipes for poll().
41538 */
41539 - if (!pipe->readers)
41540 + if (!atomic_read(&pipe->readers))
41541 mask |= POLLERR;
41542 }
41543
41544 @@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int de
41545
41546 mutex_lock(&inode->i_mutex);
41547 pipe = inode->i_pipe;
41548 - pipe->readers -= decr;
41549 - pipe->writers -= decw;
41550 + atomic_sub(decr, &pipe->readers);
41551 + atomic_sub(decw, &pipe->writers);
41552
41553 - if (!pipe->readers && !pipe->writers) {
41554 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
41555 free_pipe_info(inode);
41556 } else {
41557 wake_up_interruptible_sync(&pipe->wait);
41558 @@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, stru
41559
41560 if (inode->i_pipe) {
41561 ret = 0;
41562 - inode->i_pipe->readers++;
41563 + atomic_inc(&inode->i_pipe->readers);
41564 }
41565
41566 mutex_unlock(&inode->i_mutex);
41567 @@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, str
41568
41569 if (inode->i_pipe) {
41570 ret = 0;
41571 - inode->i_pipe->writers++;
41572 + atomic_inc(&inode->i_pipe->writers);
41573 }
41574
41575 mutex_unlock(&inode->i_mutex);
41576 @@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, stru
41577 if (inode->i_pipe) {
41578 ret = 0;
41579 if (filp->f_mode & FMODE_READ)
41580 - inode->i_pipe->readers++;
41581 + atomic_inc(&inode->i_pipe->readers);
41582 if (filp->f_mode & FMODE_WRITE)
41583 - inode->i_pipe->writers++;
41584 + atomic_inc(&inode->i_pipe->writers);
41585 }
41586
41587 mutex_unlock(&inode->i_mutex);
41588 @@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
41589 inode->i_pipe = NULL;
41590 }
41591
41592 -static struct vfsmount *pipe_mnt __read_mostly;
41593 +struct vfsmount *pipe_mnt __read_mostly;
41594 static int pipefs_delete_dentry(struct dentry *dentry)
41595 {
41596 /*
41597 @@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(voi
41598 goto fail_iput;
41599 inode->i_pipe = pipe;
41600
41601 - pipe->readers = pipe->writers = 1;
41602 + atomic_set(&pipe->readers, 1);
41603 + atomic_set(&pipe->writers, 1);
41604 inode->i_fop = &rdwr_pipefifo_fops;
41605
41606 /*
41607 diff -urNp linux-2.6.32.41/fs/proc/array.c linux-2.6.32.41/fs/proc/array.c
41608 --- linux-2.6.32.41/fs/proc/array.c 2011-03-27 14:31:47.000000000 -0400
41609 +++ linux-2.6.32.41/fs/proc/array.c 2011-05-16 21:46:57.000000000 -0400
41610 @@ -60,6 +60,7 @@
41611 #include <linux/tty.h>
41612 #include <linux/string.h>
41613 #include <linux/mman.h>
41614 +#include <linux/grsecurity.h>
41615 #include <linux/proc_fs.h>
41616 #include <linux/ioport.h>
41617 #include <linux/uaccess.h>
41618 @@ -321,6 +322,21 @@ static inline void task_context_switch_c
41619 p->nivcsw);
41620 }
41621
41622 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41623 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
41624 +{
41625 + if (p->mm)
41626 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
41627 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
41628 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
41629 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
41630 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
41631 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
41632 + else
41633 + seq_printf(m, "PaX:\t-----\n");
41634 +}
41635 +#endif
41636 +
41637 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
41638 struct pid *pid, struct task_struct *task)
41639 {
41640 @@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m,
41641 task_cap(m, task);
41642 cpuset_task_status_allowed(m, task);
41643 task_context_switch_counts(m, task);
41644 +
41645 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41646 + task_pax(m, task);
41647 +#endif
41648 +
41649 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
41650 + task_grsec_rbac(m, task);
41651 +#endif
41652 +
41653 return 0;
41654 }
41655
41656 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41657 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
41658 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
41659 + _mm->pax_flags & MF_PAX_SEGMEXEC))
41660 +#endif
41661 +
41662 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
41663 struct pid *pid, struct task_struct *task, int whole)
41664 {
41665 @@ -358,9 +389,11 @@ static int do_task_stat(struct seq_file
41666 cputime_t cutime, cstime, utime, stime;
41667 cputime_t cgtime, gtime;
41668 unsigned long rsslim = 0;
41669 - char tcomm[sizeof(task->comm)];
41670 + char tcomm[sizeof(task->comm)] = { 0 };
41671 unsigned long flags;
41672
41673 + pax_track_stack();
41674 +
41675 state = *get_task_state(task);
41676 vsize = eip = esp = 0;
41677 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
41678 @@ -433,6 +466,19 @@ static int do_task_stat(struct seq_file
41679 gtime = task_gtime(task);
41680 }
41681
41682 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41683 + if (PAX_RAND_FLAGS(mm)) {
41684 + eip = 0;
41685 + esp = 0;
41686 + wchan = 0;
41687 + }
41688 +#endif
41689 +#ifdef CONFIG_GRKERNSEC_HIDESYM
41690 + wchan = 0;
41691 + eip =0;
41692 + esp =0;
41693 +#endif
41694 +
41695 /* scale priority and nice values from timeslices to -20..20 */
41696 /* to make it look like a "normal" Unix priority/nice value */
41697 priority = task_prio(task);
41698 @@ -473,9 +519,15 @@ static int do_task_stat(struct seq_file
41699 vsize,
41700 mm ? get_mm_rss(mm) : 0,
41701 rsslim,
41702 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41703 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
41704 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
41705 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
41706 +#else
41707 mm ? (permitted ? mm->start_code : 1) : 0,
41708 mm ? (permitted ? mm->end_code : 1) : 0,
41709 (permitted && mm) ? mm->start_stack : 0,
41710 +#endif
41711 esp,
41712 eip,
41713 /* The signal information here is obsolete.
41714 @@ -528,3 +580,18 @@ int proc_pid_statm(struct seq_file *m, s
41715
41716 return 0;
41717 }
41718 +
41719 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
41720 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
41721 +{
41722 + u32 curr_ip = 0;
41723 + unsigned long flags;
41724 +
41725 + if (lock_task_sighand(task, &flags)) {
41726 + curr_ip = task->signal->curr_ip;
41727 + unlock_task_sighand(task, &flags);
41728 + }
41729 +
41730 + return sprintf(buffer, "%pI4\n", &curr_ip);
41731 +}
41732 +#endif
41733 diff -urNp linux-2.6.32.41/fs/proc/base.c linux-2.6.32.41/fs/proc/base.c
41734 --- linux-2.6.32.41/fs/proc/base.c 2011-04-22 19:16:29.000000000 -0400
41735 +++ linux-2.6.32.41/fs/proc/base.c 2011-06-04 21:20:50.000000000 -0400
41736 @@ -102,6 +102,22 @@ struct pid_entry {
41737 union proc_op op;
41738 };
41739
41740 +struct getdents_callback {
41741 + struct linux_dirent __user * current_dir;
41742 + struct linux_dirent __user * previous;
41743 + struct file * file;
41744 + int count;
41745 + int error;
41746 +};
41747 +
41748 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
41749 + loff_t offset, u64 ino, unsigned int d_type)
41750 +{
41751 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
41752 + buf->error = -EINVAL;
41753 + return 0;
41754 +}
41755 +
41756 #define NOD(NAME, MODE, IOP, FOP, OP) { \
41757 .name = (NAME), \
41758 .len = sizeof(NAME) - 1, \
41759 @@ -213,6 +229,9 @@ static int check_mem_permission(struct t
41760 if (task == current)
41761 return 0;
41762
41763 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
41764 + return -EPERM;
41765 +
41766 /*
41767 * If current is actively ptrace'ing, and would also be
41768 * permitted to freshly attach with ptrace now, permit it.
41769 @@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_
41770 if (!mm->arg_end)
41771 goto out_mm; /* Shh! No looking before we're done */
41772
41773 + if (gr_acl_handle_procpidmem(task))
41774 + goto out_mm;
41775 +
41776 len = mm->arg_end - mm->arg_start;
41777
41778 if (len > PAGE_SIZE)
41779 @@ -287,12 +309,28 @@ out:
41780 return res;
41781 }
41782
41783 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41784 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
41785 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
41786 + _mm->pax_flags & MF_PAX_SEGMEXEC))
41787 +#endif
41788 +
41789 static int proc_pid_auxv(struct task_struct *task, char *buffer)
41790 {
41791 int res = 0;
41792 struct mm_struct *mm = get_task_mm(task);
41793 if (mm) {
41794 unsigned int nwords = 0;
41795 +
41796 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41797 + /* allow if we're currently ptracing this task */
41798 + if (PAX_RAND_FLAGS(mm) &&
41799 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
41800 + mmput(mm);
41801 + return res;
41802 + }
41803 +#endif
41804 +
41805 do {
41806 nwords += 2;
41807 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
41808 @@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_str
41809 }
41810
41811
41812 -#ifdef CONFIG_KALLSYMS
41813 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
41814 /*
41815 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
41816 * Returns the resolved symbol. If that fails, simply return the address.
41817 @@ -328,7 +366,7 @@ static int proc_pid_wchan(struct task_st
41818 }
41819 #endif /* CONFIG_KALLSYMS */
41820
41821 -#ifdef CONFIG_STACKTRACE
41822 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
41823
41824 #define MAX_STACK_TRACE_DEPTH 64
41825
41826 @@ -522,7 +560,7 @@ static int proc_pid_limits(struct task_s
41827 return count;
41828 }
41829
41830 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
41831 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
41832 static int proc_pid_syscall(struct task_struct *task, char *buffer)
41833 {
41834 long nr;
41835 @@ -547,7 +585,7 @@ static int proc_pid_syscall(struct task_
41836 /************************************************************************/
41837
41838 /* permission checks */
41839 -static int proc_fd_access_allowed(struct inode *inode)
41840 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
41841 {
41842 struct task_struct *task;
41843 int allowed = 0;
41844 @@ -557,7 +595,10 @@ static int proc_fd_access_allowed(struct
41845 */
41846 task = get_proc_task(inode);
41847 if (task) {
41848 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
41849 + if (log)
41850 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
41851 + else
41852 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
41853 put_task_struct(task);
41854 }
41855 return allowed;
41856 @@ -936,6 +977,9 @@ static ssize_t environ_read(struct file
41857 if (!task)
41858 goto out_no_task;
41859
41860 + if (gr_acl_handle_procpidmem(task))
41861 + goto out;
41862 +
41863 if (!ptrace_may_access(task, PTRACE_MODE_READ))
41864 goto out;
41865
41866 @@ -1350,7 +1394,7 @@ static void *proc_pid_follow_link(struct
41867 path_put(&nd->path);
41868
41869 /* Are we allowed to snoop on the tasks file descriptors? */
41870 - if (!proc_fd_access_allowed(inode))
41871 + if (!proc_fd_access_allowed(inode,0))
41872 goto out;
41873
41874 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
41875 @@ -1390,8 +1434,18 @@ static int proc_pid_readlink(struct dent
41876 struct path path;
41877
41878 /* Are we allowed to snoop on the tasks file descriptors? */
41879 - if (!proc_fd_access_allowed(inode))
41880 - goto out;
41881 + /* logging this is needed for learning on chromium to work properly,
41882 + but we don't want to flood the logs from 'ps' which does a readlink
41883 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
41884 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
41885 + */
41886 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
41887 + if (!proc_fd_access_allowed(inode,0))
41888 + goto out;
41889 + } else {
41890 + if (!proc_fd_access_allowed(inode,1))
41891 + goto out;
41892 + }
41893
41894 error = PROC_I(inode)->op.proc_get_link(inode, &path);
41895 if (error)
41896 @@ -1456,7 +1510,11 @@ static struct inode *proc_pid_make_inode
41897 rcu_read_lock();
41898 cred = __task_cred(task);
41899 inode->i_uid = cred->euid;
41900 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
41901 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
41902 +#else
41903 inode->i_gid = cred->egid;
41904 +#endif
41905 rcu_read_unlock();
41906 }
41907 security_task_to_inode(task, inode);
41908 @@ -1474,6 +1532,9 @@ static int pid_getattr(struct vfsmount *
41909 struct inode *inode = dentry->d_inode;
41910 struct task_struct *task;
41911 const struct cred *cred;
41912 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
41913 + const struct cred *tmpcred = current_cred();
41914 +#endif
41915
41916 generic_fillattr(inode, stat);
41917
41918 @@ -1481,13 +1542,41 @@ static int pid_getattr(struct vfsmount *
41919 stat->uid = 0;
41920 stat->gid = 0;
41921 task = pid_task(proc_pid(inode), PIDTYPE_PID);
41922 +
41923 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
41924 + rcu_read_unlock();
41925 + return -ENOENT;
41926 + }
41927 +
41928 if (task) {
41929 + cred = __task_cred(task);
41930 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
41931 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
41932 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
41933 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
41934 +#endif
41935 + ) {
41936 +#endif
41937 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
41938 +#ifdef CONFIG_GRKERNSEC_PROC_USER
41939 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
41940 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
41941 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
41942 +#endif
41943 task_dumpable(task)) {
41944 - cred = __task_cred(task);
41945 stat->uid = cred->euid;
41946 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
41947 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
41948 +#else
41949 stat->gid = cred->egid;
41950 +#endif
41951 }
41952 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
41953 + } else {
41954 + rcu_read_unlock();
41955 + return -ENOENT;
41956 + }
41957 +#endif
41958 }
41959 rcu_read_unlock();
41960 return 0;
41961 @@ -1518,11 +1607,20 @@ static int pid_revalidate(struct dentry
41962
41963 if (task) {
41964 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
41965 +#ifdef CONFIG_GRKERNSEC_PROC_USER
41966 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
41967 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
41968 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
41969 +#endif
41970 task_dumpable(task)) {
41971 rcu_read_lock();
41972 cred = __task_cred(task);
41973 inode->i_uid = cred->euid;
41974 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
41975 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
41976 +#else
41977 inode->i_gid = cred->egid;
41978 +#endif
41979 rcu_read_unlock();
41980 } else {
41981 inode->i_uid = 0;
41982 @@ -1643,7 +1741,8 @@ static int proc_fd_info(struct inode *in
41983 int fd = proc_fd(inode);
41984
41985 if (task) {
41986 - files = get_files_struct(task);
41987 + if (!gr_acl_handle_procpidmem(task))
41988 + files = get_files_struct(task);
41989 put_task_struct(task);
41990 }
41991 if (files) {
41992 @@ -1895,12 +1994,22 @@ static const struct file_operations proc
41993 static int proc_fd_permission(struct inode *inode, int mask)
41994 {
41995 int rv;
41996 + struct task_struct *task;
41997
41998 rv = generic_permission(inode, mask, NULL);
41999 - if (rv == 0)
42000 - return 0;
42001 +
42002 if (task_pid(current) == proc_pid(inode))
42003 rv = 0;
42004 +
42005 + task = get_proc_task(inode);
42006 + if (task == NULL)
42007 + return rv;
42008 +
42009 + if (gr_acl_handle_procpidmem(task))
42010 + rv = -EACCES;
42011 +
42012 + put_task_struct(task);
42013 +
42014 return rv;
42015 }
42016
42017 @@ -2009,6 +2118,9 @@ static struct dentry *proc_pident_lookup
42018 if (!task)
42019 goto out_no_task;
42020
42021 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42022 + goto out;
42023 +
42024 /*
42025 * Yes, it does not scale. And it should not. Don't add
42026 * new entries into /proc/<tgid>/ without very good reasons.
42027 @@ -2053,6 +2165,9 @@ static int proc_pident_readdir(struct fi
42028 if (!task)
42029 goto out_no_task;
42030
42031 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42032 + goto out;
42033 +
42034 ret = 0;
42035 i = filp->f_pos;
42036 switch (i) {
42037 @@ -2320,7 +2435,7 @@ static void *proc_self_follow_link(struc
42038 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
42039 void *cookie)
42040 {
42041 - char *s = nd_get_link(nd);
42042 + const char *s = nd_get_link(nd);
42043 if (!IS_ERR(s))
42044 __putname(s);
42045 }
42046 @@ -2519,7 +2634,7 @@ static const struct pid_entry tgid_base_
42047 #ifdef CONFIG_SCHED_DEBUG
42048 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
42049 #endif
42050 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
42051 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
42052 INF("syscall", S_IRUSR, proc_pid_syscall),
42053 #endif
42054 INF("cmdline", S_IRUGO, proc_pid_cmdline),
42055 @@ -2544,10 +2659,10 @@ static const struct pid_entry tgid_base_
42056 #ifdef CONFIG_SECURITY
42057 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
42058 #endif
42059 -#ifdef CONFIG_KALLSYMS
42060 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42061 INF("wchan", S_IRUGO, proc_pid_wchan),
42062 #endif
42063 -#ifdef CONFIG_STACKTRACE
42064 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42065 ONE("stack", S_IRUSR, proc_pid_stack),
42066 #endif
42067 #ifdef CONFIG_SCHEDSTATS
42068 @@ -2577,6 +2692,9 @@ static const struct pid_entry tgid_base_
42069 #ifdef CONFIG_TASK_IO_ACCOUNTING
42070 INF("io", S_IRUGO, proc_tgid_io_accounting),
42071 #endif
42072 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
42073 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
42074 +#endif
42075 };
42076
42077 static int proc_tgid_base_readdir(struct file * filp,
42078 @@ -2701,7 +2819,14 @@ static struct dentry *proc_pid_instantia
42079 if (!inode)
42080 goto out;
42081
42082 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42083 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
42084 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42085 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42086 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
42087 +#else
42088 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
42089 +#endif
42090 inode->i_op = &proc_tgid_base_inode_operations;
42091 inode->i_fop = &proc_tgid_base_operations;
42092 inode->i_flags|=S_IMMUTABLE;
42093 @@ -2743,7 +2868,11 @@ struct dentry *proc_pid_lookup(struct in
42094 if (!task)
42095 goto out;
42096
42097 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42098 + goto out_put_task;
42099 +
42100 result = proc_pid_instantiate(dir, dentry, task, NULL);
42101 +out_put_task:
42102 put_task_struct(task);
42103 out:
42104 return result;
42105 @@ -2808,6 +2937,11 @@ int proc_pid_readdir(struct file * filp,
42106 {
42107 unsigned int nr;
42108 struct task_struct *reaper;
42109 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42110 + const struct cred *tmpcred = current_cred();
42111 + const struct cred *itercred;
42112 +#endif
42113 + filldir_t __filldir = filldir;
42114 struct tgid_iter iter;
42115 struct pid_namespace *ns;
42116
42117 @@ -2831,8 +2965,27 @@ int proc_pid_readdir(struct file * filp,
42118 for (iter = next_tgid(ns, iter);
42119 iter.task;
42120 iter.tgid += 1, iter = next_tgid(ns, iter)) {
42121 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42122 + rcu_read_lock();
42123 + itercred = __task_cred(iter.task);
42124 +#endif
42125 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
42126 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42127 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
42128 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42129 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
42130 +#endif
42131 + )
42132 +#endif
42133 + )
42134 + __filldir = &gr_fake_filldir;
42135 + else
42136 + __filldir = filldir;
42137 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42138 + rcu_read_unlock();
42139 +#endif
42140 filp->f_pos = iter.tgid + TGID_OFFSET;
42141 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
42142 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
42143 put_task_struct(iter.task);
42144 goto out;
42145 }
42146 @@ -2858,7 +3011,7 @@ static const struct pid_entry tid_base_s
42147 #ifdef CONFIG_SCHED_DEBUG
42148 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
42149 #endif
42150 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
42151 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
42152 INF("syscall", S_IRUSR, proc_pid_syscall),
42153 #endif
42154 INF("cmdline", S_IRUGO, proc_pid_cmdline),
42155 @@ -2882,10 +3035,10 @@ static const struct pid_entry tid_base_s
42156 #ifdef CONFIG_SECURITY
42157 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
42158 #endif
42159 -#ifdef CONFIG_KALLSYMS
42160 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42161 INF("wchan", S_IRUGO, proc_pid_wchan),
42162 #endif
42163 -#ifdef CONFIG_STACKTRACE
42164 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42165 ONE("stack", S_IRUSR, proc_pid_stack),
42166 #endif
42167 #ifdef CONFIG_SCHEDSTATS
42168 diff -urNp linux-2.6.32.41/fs/proc/cmdline.c linux-2.6.32.41/fs/proc/cmdline.c
42169 --- linux-2.6.32.41/fs/proc/cmdline.c 2011-03-27 14:31:47.000000000 -0400
42170 +++ linux-2.6.32.41/fs/proc/cmdline.c 2011-04-17 15:56:46.000000000 -0400
42171 @@ -23,7 +23,11 @@ static const struct file_operations cmdl
42172
42173 static int __init proc_cmdline_init(void)
42174 {
42175 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
42176 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
42177 +#else
42178 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
42179 +#endif
42180 return 0;
42181 }
42182 module_init(proc_cmdline_init);
42183 diff -urNp linux-2.6.32.41/fs/proc/devices.c linux-2.6.32.41/fs/proc/devices.c
42184 --- linux-2.6.32.41/fs/proc/devices.c 2011-03-27 14:31:47.000000000 -0400
42185 +++ linux-2.6.32.41/fs/proc/devices.c 2011-04-17 15:56:46.000000000 -0400
42186 @@ -64,7 +64,11 @@ static const struct file_operations proc
42187
42188 static int __init proc_devices_init(void)
42189 {
42190 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
42191 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
42192 +#else
42193 proc_create("devices", 0, NULL, &proc_devinfo_operations);
42194 +#endif
42195 return 0;
42196 }
42197 module_init(proc_devices_init);
42198 diff -urNp linux-2.6.32.41/fs/proc/inode.c linux-2.6.32.41/fs/proc/inode.c
42199 --- linux-2.6.32.41/fs/proc/inode.c 2011-03-27 14:31:47.000000000 -0400
42200 +++ linux-2.6.32.41/fs/proc/inode.c 2011-04-17 15:56:46.000000000 -0400
42201 @@ -457,7 +457,11 @@ struct inode *proc_get_inode(struct supe
42202 if (de->mode) {
42203 inode->i_mode = de->mode;
42204 inode->i_uid = de->uid;
42205 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42206 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42207 +#else
42208 inode->i_gid = de->gid;
42209 +#endif
42210 }
42211 if (de->size)
42212 inode->i_size = de->size;
42213 diff -urNp linux-2.6.32.41/fs/proc/internal.h linux-2.6.32.41/fs/proc/internal.h
42214 --- linux-2.6.32.41/fs/proc/internal.h 2011-03-27 14:31:47.000000000 -0400
42215 +++ linux-2.6.32.41/fs/proc/internal.h 2011-04-17 15:56:46.000000000 -0400
42216 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
42217 struct pid *pid, struct task_struct *task);
42218 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
42219 struct pid *pid, struct task_struct *task);
42220 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
42221 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
42222 +#endif
42223 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
42224
42225 extern const struct file_operations proc_maps_operations;
42226 diff -urNp linux-2.6.32.41/fs/proc/Kconfig linux-2.6.32.41/fs/proc/Kconfig
42227 --- linux-2.6.32.41/fs/proc/Kconfig 2011-03-27 14:31:47.000000000 -0400
42228 +++ linux-2.6.32.41/fs/proc/Kconfig 2011-04-17 15:56:46.000000000 -0400
42229 @@ -30,12 +30,12 @@ config PROC_FS
42230
42231 config PROC_KCORE
42232 bool "/proc/kcore support" if !ARM
42233 - depends on PROC_FS && MMU
42234 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
42235
42236 config PROC_VMCORE
42237 bool "/proc/vmcore support (EXPERIMENTAL)"
42238 - depends on PROC_FS && CRASH_DUMP
42239 - default y
42240 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
42241 + default n
42242 help
42243 Exports the dump image of crashed kernel in ELF format.
42244
42245 @@ -59,8 +59,8 @@ config PROC_SYSCTL
42246 limited in memory.
42247
42248 config PROC_PAGE_MONITOR
42249 - default y
42250 - depends on PROC_FS && MMU
42251 + default n
42252 + depends on PROC_FS && MMU && !GRKERNSEC
42253 bool "Enable /proc page monitoring" if EMBEDDED
42254 help
42255 Various /proc files exist to monitor process memory utilization:
42256 diff -urNp linux-2.6.32.41/fs/proc/kcore.c linux-2.6.32.41/fs/proc/kcore.c
42257 --- linux-2.6.32.41/fs/proc/kcore.c 2011-03-27 14:31:47.000000000 -0400
42258 +++ linux-2.6.32.41/fs/proc/kcore.c 2011-05-16 21:46:57.000000000 -0400
42259 @@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bu
42260 off_t offset = 0;
42261 struct kcore_list *m;
42262
42263 + pax_track_stack();
42264 +
42265 /* setup ELF header */
42266 elf = (struct elfhdr *) bufp;
42267 bufp += sizeof(struct elfhdr);
42268 @@ -477,9 +479,10 @@ read_kcore(struct file *file, char __use
42269 * the addresses in the elf_phdr on our list.
42270 */
42271 start = kc_offset_to_vaddr(*fpos - elf_buflen);
42272 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
42273 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
42274 + if (tsz > buflen)
42275 tsz = buflen;
42276 -
42277 +
42278 while (buflen) {
42279 struct kcore_list *m;
42280
42281 @@ -508,20 +511,23 @@ read_kcore(struct file *file, char __use
42282 kfree(elf_buf);
42283 } else {
42284 if (kern_addr_valid(start)) {
42285 - unsigned long n;
42286 + char *elf_buf;
42287 + mm_segment_t oldfs;
42288
42289 - n = copy_to_user(buffer, (char *)start, tsz);
42290 - /*
42291 - * We cannot distingush between fault on source
42292 - * and fault on destination. When this happens
42293 - * we clear too and hope it will trigger the
42294 - * EFAULT again.
42295 - */
42296 - if (n) {
42297 - if (clear_user(buffer + tsz - n,
42298 - n))
42299 + elf_buf = kmalloc(tsz, GFP_KERNEL);
42300 + if (!elf_buf)
42301 + return -ENOMEM;
42302 + oldfs = get_fs();
42303 + set_fs(KERNEL_DS);
42304 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
42305 + set_fs(oldfs);
42306 + if (copy_to_user(buffer, elf_buf, tsz)) {
42307 + kfree(elf_buf);
42308 return -EFAULT;
42309 + }
42310 }
42311 + set_fs(oldfs);
42312 + kfree(elf_buf);
42313 } else {
42314 if (clear_user(buffer, tsz))
42315 return -EFAULT;
42316 @@ -541,6 +547,9 @@ read_kcore(struct file *file, char __use
42317
42318 static int open_kcore(struct inode *inode, struct file *filp)
42319 {
42320 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
42321 + return -EPERM;
42322 +#endif
42323 if (!capable(CAP_SYS_RAWIO))
42324 return -EPERM;
42325 if (kcore_need_update)
42326 diff -urNp linux-2.6.32.41/fs/proc/meminfo.c linux-2.6.32.41/fs/proc/meminfo.c
42327 --- linux-2.6.32.41/fs/proc/meminfo.c 2011-03-27 14:31:47.000000000 -0400
42328 +++ linux-2.6.32.41/fs/proc/meminfo.c 2011-05-16 21:46:57.000000000 -0400
42329 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
42330 unsigned long pages[NR_LRU_LISTS];
42331 int lru;
42332
42333 + pax_track_stack();
42334 +
42335 /*
42336 * display in kilobytes.
42337 */
42338 @@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_
42339 vmi.used >> 10,
42340 vmi.largest_chunk >> 10
42341 #ifdef CONFIG_MEMORY_FAILURE
42342 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
42343 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
42344 #endif
42345 );
42346
42347 diff -urNp linux-2.6.32.41/fs/proc/nommu.c linux-2.6.32.41/fs/proc/nommu.c
42348 --- linux-2.6.32.41/fs/proc/nommu.c 2011-03-27 14:31:47.000000000 -0400
42349 +++ linux-2.6.32.41/fs/proc/nommu.c 2011-04-17 15:56:46.000000000 -0400
42350 @@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_
42351 if (len < 1)
42352 len = 1;
42353 seq_printf(m, "%*c", len, ' ');
42354 - seq_path(m, &file->f_path, "");
42355 + seq_path(m, &file->f_path, "\n\\");
42356 }
42357
42358 seq_putc(m, '\n');
42359 diff -urNp linux-2.6.32.41/fs/proc/proc_net.c linux-2.6.32.41/fs/proc/proc_net.c
42360 --- linux-2.6.32.41/fs/proc/proc_net.c 2011-03-27 14:31:47.000000000 -0400
42361 +++ linux-2.6.32.41/fs/proc/proc_net.c 2011-04-17 15:56:46.000000000 -0400
42362 @@ -104,6 +104,17 @@ static struct net *get_proc_task_net(str
42363 struct task_struct *task;
42364 struct nsproxy *ns;
42365 struct net *net = NULL;
42366 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42367 + const struct cred *cred = current_cred();
42368 +#endif
42369 +
42370 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42371 + if (cred->fsuid)
42372 + return net;
42373 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42374 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
42375 + return net;
42376 +#endif
42377
42378 rcu_read_lock();
42379 task = pid_task(proc_pid(dir), PIDTYPE_PID);
42380 diff -urNp linux-2.6.32.41/fs/proc/proc_sysctl.c linux-2.6.32.41/fs/proc/proc_sysctl.c
42381 --- linux-2.6.32.41/fs/proc/proc_sysctl.c 2011-03-27 14:31:47.000000000 -0400
42382 +++ linux-2.6.32.41/fs/proc/proc_sysctl.c 2011-04-17 15:56:46.000000000 -0400
42383 @@ -7,6 +7,8 @@
42384 #include <linux/security.h>
42385 #include "internal.h"
42386
42387 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
42388 +
42389 static const struct dentry_operations proc_sys_dentry_operations;
42390 static const struct file_operations proc_sys_file_operations;
42391 static const struct inode_operations proc_sys_inode_operations;
42392 @@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(st
42393 if (!p)
42394 goto out;
42395
42396 + if (gr_handle_sysctl(p, MAY_EXEC))
42397 + goto out;
42398 +
42399 err = ERR_PTR(-ENOMEM);
42400 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
42401 if (h)
42402 @@ -228,6 +233,9 @@ static int scan(struct ctl_table_header
42403 if (*pos < file->f_pos)
42404 continue;
42405
42406 + if (gr_handle_sysctl(table, 0))
42407 + continue;
42408 +
42409 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
42410 if (res)
42411 return res;
42412 @@ -344,6 +352,9 @@ static int proc_sys_getattr(struct vfsmo
42413 if (IS_ERR(head))
42414 return PTR_ERR(head);
42415
42416 + if (table && gr_handle_sysctl(table, MAY_EXEC))
42417 + return -ENOENT;
42418 +
42419 generic_fillattr(inode, stat);
42420 if (table)
42421 stat->mode = (stat->mode & S_IFMT) | table->mode;
42422 diff -urNp linux-2.6.32.41/fs/proc/root.c linux-2.6.32.41/fs/proc/root.c
42423 --- linux-2.6.32.41/fs/proc/root.c 2011-03-27 14:31:47.000000000 -0400
42424 +++ linux-2.6.32.41/fs/proc/root.c 2011-04-17 15:56:46.000000000 -0400
42425 @@ -134,7 +134,15 @@ void __init proc_root_init(void)
42426 #ifdef CONFIG_PROC_DEVICETREE
42427 proc_device_tree_init();
42428 #endif
42429 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
42430 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42431 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
42432 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42433 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
42434 +#endif
42435 +#else
42436 proc_mkdir("bus", NULL);
42437 +#endif
42438 proc_sys_init();
42439 }
42440
42441 diff -urNp linux-2.6.32.41/fs/proc/task_mmu.c linux-2.6.32.41/fs/proc/task_mmu.c
42442 --- linux-2.6.32.41/fs/proc/task_mmu.c 2011-03-27 14:31:47.000000000 -0400
42443 +++ linux-2.6.32.41/fs/proc/task_mmu.c 2011-04-23 13:38:09.000000000 -0400
42444 @@ -46,15 +46,26 @@ void task_mem(struct seq_file *m, struct
42445 "VmStk:\t%8lu kB\n"
42446 "VmExe:\t%8lu kB\n"
42447 "VmLib:\t%8lu kB\n"
42448 - "VmPTE:\t%8lu kB\n",
42449 - hiwater_vm << (PAGE_SHIFT-10),
42450 + "VmPTE:\t%8lu kB\n"
42451 +
42452 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
42453 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
42454 +#endif
42455 +
42456 + ,hiwater_vm << (PAGE_SHIFT-10),
42457 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
42458 mm->locked_vm << (PAGE_SHIFT-10),
42459 hiwater_rss << (PAGE_SHIFT-10),
42460 total_rss << (PAGE_SHIFT-10),
42461 data << (PAGE_SHIFT-10),
42462 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
42463 - (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
42464 + (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
42465 +
42466 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
42467 + , mm->context.user_cs_base, mm->context.user_cs_limit
42468 +#endif
42469 +
42470 + );
42471 }
42472
42473 unsigned long task_vsize(struct mm_struct *mm)
42474 @@ -175,7 +186,8 @@ static void m_stop(struct seq_file *m, v
42475 struct proc_maps_private *priv = m->private;
42476 struct vm_area_struct *vma = v;
42477
42478 - vma_stop(priv, vma);
42479 + if (!IS_ERR(vma))
42480 + vma_stop(priv, vma);
42481 if (priv->task)
42482 put_task_struct(priv->task);
42483 }
42484 @@ -199,6 +211,12 @@ static int do_maps_open(struct inode *in
42485 return ret;
42486 }
42487
42488 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42489 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
42490 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
42491 + _mm->pax_flags & MF_PAX_SEGMEXEC))
42492 +#endif
42493 +
42494 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
42495 {
42496 struct mm_struct *mm = vma->vm_mm;
42497 @@ -206,7 +224,6 @@ static void show_map_vma(struct seq_file
42498 int flags = vma->vm_flags;
42499 unsigned long ino = 0;
42500 unsigned long long pgoff = 0;
42501 - unsigned long start;
42502 dev_t dev = 0;
42503 int len;
42504
42505 @@ -217,20 +234,23 @@ static void show_map_vma(struct seq_file
42506 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
42507 }
42508
42509 - /* We don't show the stack guard page in /proc/maps */
42510 - start = vma->vm_start;
42511 - if (vma->vm_flags & VM_GROWSDOWN)
42512 - if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
42513 - start += PAGE_SIZE;
42514 -
42515 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
42516 - start,
42517 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42518 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
42519 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
42520 +#else
42521 + vma->vm_start,
42522 vma->vm_end,
42523 +#endif
42524 flags & VM_READ ? 'r' : '-',
42525 flags & VM_WRITE ? 'w' : '-',
42526 flags & VM_EXEC ? 'x' : '-',
42527 flags & VM_MAYSHARE ? 's' : 'p',
42528 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42529 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
42530 +#else
42531 pgoff,
42532 +#endif
42533 MAJOR(dev), MINOR(dev), ino, &len);
42534
42535 /*
42536 @@ -239,7 +259,7 @@ static void show_map_vma(struct seq_file
42537 */
42538 if (file) {
42539 pad_len_spaces(m, len);
42540 - seq_path(m, &file->f_path, "\n");
42541 + seq_path(m, &file->f_path, "\n\\");
42542 } else {
42543 const char *name = arch_vma_name(vma);
42544 if (!name) {
42545 @@ -247,8 +267,9 @@ static void show_map_vma(struct seq_file
42546 if (vma->vm_start <= mm->brk &&
42547 vma->vm_end >= mm->start_brk) {
42548 name = "[heap]";
42549 - } else if (vma->vm_start <= mm->start_stack &&
42550 - vma->vm_end >= mm->start_stack) {
42551 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
42552 + (vma->vm_start <= mm->start_stack &&
42553 + vma->vm_end >= mm->start_stack)) {
42554 name = "[stack]";
42555 }
42556 } else {
42557 @@ -391,9 +412,16 @@ static int show_smap(struct seq_file *m,
42558 };
42559
42560 memset(&mss, 0, sizeof mss);
42561 - mss.vma = vma;
42562 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
42563 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
42564 +
42565 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42566 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
42567 +#endif
42568 + mss.vma = vma;
42569 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
42570 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
42571 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42572 + }
42573 +#endif
42574
42575 show_map_vma(m, vma);
42576
42577 @@ -409,7 +437,11 @@ static int show_smap(struct seq_file *m,
42578 "Swap: %8lu kB\n"
42579 "KernelPageSize: %8lu kB\n"
42580 "MMUPageSize: %8lu kB\n",
42581 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42582 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
42583 +#else
42584 (vma->vm_end - vma->vm_start) >> 10,
42585 +#endif
42586 mss.resident >> 10,
42587 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
42588 mss.shared_clean >> 10,
42589 diff -urNp linux-2.6.32.41/fs/proc/task_nommu.c linux-2.6.32.41/fs/proc/task_nommu.c
42590 --- linux-2.6.32.41/fs/proc/task_nommu.c 2011-03-27 14:31:47.000000000 -0400
42591 +++ linux-2.6.32.41/fs/proc/task_nommu.c 2011-04-17 15:56:46.000000000 -0400
42592 @@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct
42593 else
42594 bytes += kobjsize(mm);
42595
42596 - if (current->fs && current->fs->users > 1)
42597 + if (current->fs && atomic_read(&current->fs->users) > 1)
42598 sbytes += kobjsize(current->fs);
42599 else
42600 bytes += kobjsize(current->fs);
42601 @@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_fil
42602 if (len < 1)
42603 len = 1;
42604 seq_printf(m, "%*c", len, ' ');
42605 - seq_path(m, &file->f_path, "");
42606 + seq_path(m, &file->f_path, "\n\\");
42607 }
42608
42609 seq_putc(m, '\n');
42610 diff -urNp linux-2.6.32.41/fs/readdir.c linux-2.6.32.41/fs/readdir.c
42611 --- linux-2.6.32.41/fs/readdir.c 2011-03-27 14:31:47.000000000 -0400
42612 +++ linux-2.6.32.41/fs/readdir.c 2011-04-17 15:56:46.000000000 -0400
42613 @@ -16,6 +16,7 @@
42614 #include <linux/security.h>
42615 #include <linux/syscalls.h>
42616 #include <linux/unistd.h>
42617 +#include <linux/namei.h>
42618
42619 #include <asm/uaccess.h>
42620
42621 @@ -67,6 +68,7 @@ struct old_linux_dirent {
42622
42623 struct readdir_callback {
42624 struct old_linux_dirent __user * dirent;
42625 + struct file * file;
42626 int result;
42627 };
42628
42629 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
42630 buf->result = -EOVERFLOW;
42631 return -EOVERFLOW;
42632 }
42633 +
42634 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
42635 + return 0;
42636 +
42637 buf->result++;
42638 dirent = buf->dirent;
42639 if (!access_ok(VERIFY_WRITE, dirent,
42640 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
42641
42642 buf.result = 0;
42643 buf.dirent = dirent;
42644 + buf.file = file;
42645
42646 error = vfs_readdir(file, fillonedir, &buf);
42647 if (buf.result)
42648 @@ -142,6 +149,7 @@ struct linux_dirent {
42649 struct getdents_callback {
42650 struct linux_dirent __user * current_dir;
42651 struct linux_dirent __user * previous;
42652 + struct file * file;
42653 int count;
42654 int error;
42655 };
42656 @@ -162,6 +170,10 @@ static int filldir(void * __buf, const c
42657 buf->error = -EOVERFLOW;
42658 return -EOVERFLOW;
42659 }
42660 +
42661 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
42662 + return 0;
42663 +
42664 dirent = buf->previous;
42665 if (dirent) {
42666 if (__put_user(offset, &dirent->d_off))
42667 @@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
42668 buf.previous = NULL;
42669 buf.count = count;
42670 buf.error = 0;
42671 + buf.file = file;
42672
42673 error = vfs_readdir(file, filldir, &buf);
42674 if (error >= 0)
42675 @@ -228,6 +241,7 @@ out:
42676 struct getdents_callback64 {
42677 struct linux_dirent64 __user * current_dir;
42678 struct linux_dirent64 __user * previous;
42679 + struct file *file;
42680 int count;
42681 int error;
42682 };
42683 @@ -242,6 +256,10 @@ static int filldir64(void * __buf, const
42684 buf->error = -EINVAL; /* only used if we fail.. */
42685 if (reclen > buf->count)
42686 return -EINVAL;
42687 +
42688 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
42689 + return 0;
42690 +
42691 dirent = buf->previous;
42692 if (dirent) {
42693 if (__put_user(offset, &dirent->d_off))
42694 @@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
42695
42696 buf.current_dir = dirent;
42697 buf.previous = NULL;
42698 + buf.file = file;
42699 buf.count = count;
42700 buf.error = 0;
42701
42702 diff -urNp linux-2.6.32.41/fs/reiserfs/dir.c linux-2.6.32.41/fs/reiserfs/dir.c
42703 --- linux-2.6.32.41/fs/reiserfs/dir.c 2011-03-27 14:31:47.000000000 -0400
42704 +++ linux-2.6.32.41/fs/reiserfs/dir.c 2011-05-16 21:46:57.000000000 -0400
42705 @@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
42706 struct reiserfs_dir_entry de;
42707 int ret = 0;
42708
42709 + pax_track_stack();
42710 +
42711 reiserfs_write_lock(inode->i_sb);
42712
42713 reiserfs_check_lock_depth(inode->i_sb, "readdir");
42714 diff -urNp linux-2.6.32.41/fs/reiserfs/do_balan.c linux-2.6.32.41/fs/reiserfs/do_balan.c
42715 --- linux-2.6.32.41/fs/reiserfs/do_balan.c 2011-03-27 14:31:47.000000000 -0400
42716 +++ linux-2.6.32.41/fs/reiserfs/do_balan.c 2011-04-17 15:56:46.000000000 -0400
42717 @@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb,
42718 return;
42719 }
42720
42721 - atomic_inc(&(fs_generation(tb->tb_sb)));
42722 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
42723 do_balance_starts(tb);
42724
42725 /* balance leaf returns 0 except if combining L R and S into
42726 diff -urNp linux-2.6.32.41/fs/reiserfs/item_ops.c linux-2.6.32.41/fs/reiserfs/item_ops.c
42727 --- linux-2.6.32.41/fs/reiserfs/item_ops.c 2011-03-27 14:31:47.000000000 -0400
42728 +++ linux-2.6.32.41/fs/reiserfs/item_ops.c 2011-04-17 15:56:46.000000000 -0400
42729 @@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_i
42730 vi->vi_index, vi->vi_type, vi->vi_ih);
42731 }
42732
42733 -static struct item_operations stat_data_ops = {
42734 +static const struct item_operations stat_data_ops = {
42735 .bytes_number = sd_bytes_number,
42736 .decrement_key = sd_decrement_key,
42737 .is_left_mergeable = sd_is_left_mergeable,
42738 @@ -196,7 +196,7 @@ static void direct_print_vi(struct virtu
42739 vi->vi_index, vi->vi_type, vi->vi_ih);
42740 }
42741
42742 -static struct item_operations direct_ops = {
42743 +static const struct item_operations direct_ops = {
42744 .bytes_number = direct_bytes_number,
42745 .decrement_key = direct_decrement_key,
42746 .is_left_mergeable = direct_is_left_mergeable,
42747 @@ -341,7 +341,7 @@ static void indirect_print_vi(struct vir
42748 vi->vi_index, vi->vi_type, vi->vi_ih);
42749 }
42750
42751 -static struct item_operations indirect_ops = {
42752 +static const struct item_operations indirect_ops = {
42753 .bytes_number = indirect_bytes_number,
42754 .decrement_key = indirect_decrement_key,
42755 .is_left_mergeable = indirect_is_left_mergeable,
42756 @@ -628,7 +628,7 @@ static void direntry_print_vi(struct vir
42757 printk("\n");
42758 }
42759
42760 -static struct item_operations direntry_ops = {
42761 +static const struct item_operations direntry_ops = {
42762 .bytes_number = direntry_bytes_number,
42763 .decrement_key = direntry_decrement_key,
42764 .is_left_mergeable = direntry_is_left_mergeable,
42765 @@ -724,7 +724,7 @@ static void errcatch_print_vi(struct vir
42766 "Invalid item type observed, run fsck ASAP");
42767 }
42768
42769 -static struct item_operations errcatch_ops = {
42770 +static const struct item_operations errcatch_ops = {
42771 errcatch_bytes_number,
42772 errcatch_decrement_key,
42773 errcatch_is_left_mergeable,
42774 @@ -746,7 +746,7 @@ static struct item_operations errcatch_o
42775 #error Item types must use disk-format assigned values.
42776 #endif
42777
42778 -struct item_operations *item_ops[TYPE_ANY + 1] = {
42779 +const struct item_operations * const item_ops[TYPE_ANY + 1] = {
42780 &stat_data_ops,
42781 &indirect_ops,
42782 &direct_ops,
42783 diff -urNp linux-2.6.32.41/fs/reiserfs/journal.c linux-2.6.32.41/fs/reiserfs/journal.c
42784 --- linux-2.6.32.41/fs/reiserfs/journal.c 2011-03-27 14:31:47.000000000 -0400
42785 +++ linux-2.6.32.41/fs/reiserfs/journal.c 2011-05-16 21:46:57.000000000 -0400
42786 @@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_brea
42787 struct buffer_head *bh;
42788 int i, j;
42789
42790 + pax_track_stack();
42791 +
42792 bh = __getblk(dev, block, bufsize);
42793 if (buffer_uptodate(bh))
42794 return (bh);
42795 diff -urNp linux-2.6.32.41/fs/reiserfs/namei.c linux-2.6.32.41/fs/reiserfs/namei.c
42796 --- linux-2.6.32.41/fs/reiserfs/namei.c 2011-03-27 14:31:47.000000000 -0400
42797 +++ linux-2.6.32.41/fs/reiserfs/namei.c 2011-05-16 21:46:57.000000000 -0400
42798 @@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode
42799 unsigned long savelink = 1;
42800 struct timespec ctime;
42801
42802 + pax_track_stack();
42803 +
42804 /* three balancings: (1) old name removal, (2) new name insertion
42805 and (3) maybe "save" link insertion
42806 stat data updates: (1) old directory,
42807 diff -urNp linux-2.6.32.41/fs/reiserfs/procfs.c linux-2.6.32.41/fs/reiserfs/procfs.c
42808 --- linux-2.6.32.41/fs/reiserfs/procfs.c 2011-03-27 14:31:47.000000000 -0400
42809 +++ linux-2.6.32.41/fs/reiserfs/procfs.c 2011-05-16 21:46:57.000000000 -0400
42810 @@ -123,7 +123,7 @@ static int show_super(struct seq_file *m
42811 "SMALL_TAILS " : "NO_TAILS ",
42812 replay_only(sb) ? "REPLAY_ONLY " : "",
42813 convert_reiserfs(sb) ? "CONV " : "",
42814 - atomic_read(&r->s_generation_counter),
42815 + atomic_read_unchecked(&r->s_generation_counter),
42816 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
42817 SF(s_do_balance), SF(s_unneeded_left_neighbor),
42818 SF(s_good_search_by_key_reada), SF(s_bmaps),
42819 @@ -309,6 +309,8 @@ static int show_journal(struct seq_file
42820 struct journal_params *jp = &rs->s_v1.s_journal;
42821 char b[BDEVNAME_SIZE];
42822
42823 + pax_track_stack();
42824 +
42825 seq_printf(m, /* on-disk fields */
42826 "jp_journal_1st_block: \t%i\n"
42827 "jp_journal_dev: \t%s[%x]\n"
42828 diff -urNp linux-2.6.32.41/fs/reiserfs/stree.c linux-2.6.32.41/fs/reiserfs/stree.c
42829 --- linux-2.6.32.41/fs/reiserfs/stree.c 2011-03-27 14:31:47.000000000 -0400
42830 +++ linux-2.6.32.41/fs/reiserfs/stree.c 2011-05-16 21:46:57.000000000 -0400
42831 @@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs
42832 int iter = 0;
42833 #endif
42834
42835 + pax_track_stack();
42836 +
42837 BUG_ON(!th->t_trans_id);
42838
42839 init_tb_struct(th, &s_del_balance, sb, path,
42840 @@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct r
42841 int retval;
42842 int quota_cut_bytes = 0;
42843
42844 + pax_track_stack();
42845 +
42846 BUG_ON(!th->t_trans_id);
42847
42848 le_key2cpu_key(&cpu_key, key);
42849 @@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiser
42850 int quota_cut_bytes;
42851 loff_t tail_pos = 0;
42852
42853 + pax_track_stack();
42854 +
42855 BUG_ON(!th->t_trans_id);
42856
42857 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
42858 @@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reis
42859 int retval;
42860 int fs_gen;
42861
42862 + pax_track_stack();
42863 +
42864 BUG_ON(!th->t_trans_id);
42865
42866 fs_gen = get_generation(inode->i_sb);
42867 @@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs
42868 int fs_gen = 0;
42869 int quota_bytes = 0;
42870
42871 + pax_track_stack();
42872 +
42873 BUG_ON(!th->t_trans_id);
42874
42875 if (inode) { /* Do we count quotas for item? */
42876 diff -urNp linux-2.6.32.41/fs/reiserfs/super.c linux-2.6.32.41/fs/reiserfs/super.c
42877 --- linux-2.6.32.41/fs/reiserfs/super.c 2011-03-27 14:31:47.000000000 -0400
42878 +++ linux-2.6.32.41/fs/reiserfs/super.c 2011-05-16 21:46:57.000000000 -0400
42879 @@ -912,6 +912,8 @@ static int reiserfs_parse_options(struct
42880 {.option_name = NULL}
42881 };
42882
42883 + pax_track_stack();
42884 +
42885 *blocks = 0;
42886 if (!options || !*options)
42887 /* use default configuration: create tails, journaling on, no
42888 diff -urNp linux-2.6.32.41/fs/select.c linux-2.6.32.41/fs/select.c
42889 --- linux-2.6.32.41/fs/select.c 2011-03-27 14:31:47.000000000 -0400
42890 +++ linux-2.6.32.41/fs/select.c 2011-05-16 21:46:57.000000000 -0400
42891 @@ -20,6 +20,7 @@
42892 #include <linux/module.h>
42893 #include <linux/slab.h>
42894 #include <linux/poll.h>
42895 +#include <linux/security.h>
42896 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
42897 #include <linux/file.h>
42898 #include <linux/fdtable.h>
42899 @@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, s
42900 int retval, i, timed_out = 0;
42901 unsigned long slack = 0;
42902
42903 + pax_track_stack();
42904 +
42905 rcu_read_lock();
42906 retval = max_select_fd(n, fds);
42907 rcu_read_unlock();
42908 @@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user
42909 /* Allocate small arguments on the stack to save memory and be faster */
42910 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
42911
42912 + pax_track_stack();
42913 +
42914 ret = -EINVAL;
42915 if (n < 0)
42916 goto out_nofds;
42917 @@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *uf
42918 struct poll_list *walk = head;
42919 unsigned long todo = nfds;
42920
42921 + pax_track_stack();
42922 +
42923 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
42924 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
42925 return -EINVAL;
42926
42927 diff -urNp linux-2.6.32.41/fs/seq_file.c linux-2.6.32.41/fs/seq_file.c
42928 --- linux-2.6.32.41/fs/seq_file.c 2011-03-27 14:31:47.000000000 -0400
42929 +++ linux-2.6.32.41/fs/seq_file.c 2011-04-17 15:56:46.000000000 -0400
42930 @@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
42931 return 0;
42932 }
42933 if (!m->buf) {
42934 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
42935 + m->size = PAGE_SIZE;
42936 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
42937 if (!m->buf)
42938 return -ENOMEM;
42939 }
42940 @@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
42941 Eoverflow:
42942 m->op->stop(m, p);
42943 kfree(m->buf);
42944 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
42945 + m->size <<= 1;
42946 + m->buf = kmalloc(m->size, GFP_KERNEL);
42947 return !m->buf ? -ENOMEM : -EAGAIN;
42948 }
42949
42950 @@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
42951 m->version = file->f_version;
42952 /* grab buffer if we didn't have one */
42953 if (!m->buf) {
42954 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
42955 + m->size = PAGE_SIZE;
42956 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
42957 if (!m->buf)
42958 goto Enomem;
42959 }
42960 @@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
42961 goto Fill;
42962 m->op->stop(m, p);
42963 kfree(m->buf);
42964 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
42965 + m->size <<= 1;
42966 + m->buf = kmalloc(m->size, GFP_KERNEL);
42967 if (!m->buf)
42968 goto Enomem;
42969 m->count = 0;
42970 diff -urNp linux-2.6.32.41/fs/smbfs/symlink.c linux-2.6.32.41/fs/smbfs/symlink.c
42971 --- linux-2.6.32.41/fs/smbfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
42972 +++ linux-2.6.32.41/fs/smbfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
42973 @@ -55,7 +55,7 @@ static void *smb_follow_link(struct dent
42974
42975 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
42976 {
42977 - char *s = nd_get_link(nd);
42978 + const char *s = nd_get_link(nd);
42979 if (!IS_ERR(s))
42980 __putname(s);
42981 }
42982 diff -urNp linux-2.6.32.41/fs/splice.c linux-2.6.32.41/fs/splice.c
42983 --- linux-2.6.32.41/fs/splice.c 2011-03-27 14:31:47.000000000 -0400
42984 +++ linux-2.6.32.41/fs/splice.c 2011-05-16 21:46:57.000000000 -0400
42985 @@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode
42986 pipe_lock(pipe);
42987
42988 for (;;) {
42989 - if (!pipe->readers) {
42990 + if (!atomic_read(&pipe->readers)) {
42991 send_sig(SIGPIPE, current, 0);
42992 if (!ret)
42993 ret = -EPIPE;
42994 @@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode
42995 do_wakeup = 0;
42996 }
42997
42998 - pipe->waiting_writers++;
42999 + atomic_inc(&pipe->waiting_writers);
43000 pipe_wait(pipe);
43001 - pipe->waiting_writers--;
43002 + atomic_dec(&pipe->waiting_writers);
43003 }
43004
43005 pipe_unlock(pipe);
43006 @@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *
43007 .spd_release = spd_release_page,
43008 };
43009
43010 + pax_track_stack();
43011 +
43012 index = *ppos >> PAGE_CACHE_SHIFT;
43013 loff = *ppos & ~PAGE_CACHE_MASK;
43014 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
43015 @@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file
43016 old_fs = get_fs();
43017 set_fs(get_ds());
43018 /* The cast to a user pointer is valid due to the set_fs() */
43019 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
43020 + res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
43021 set_fs(old_fs);
43022
43023 return res;
43024 @@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file
43025 old_fs = get_fs();
43026 set_fs(get_ds());
43027 /* The cast to a user pointer is valid due to the set_fs() */
43028 - res = vfs_write(file, (const char __user *)buf, count, &pos);
43029 + res = vfs_write(file, (__force const char __user *)buf, count, &pos);
43030 set_fs(old_fs);
43031
43032 return res;
43033 @@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct
43034 .spd_release = spd_release_page,
43035 };
43036
43037 + pax_track_stack();
43038 +
43039 index = *ppos >> PAGE_CACHE_SHIFT;
43040 offset = *ppos & ~PAGE_CACHE_MASK;
43041 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
43042 @@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct
43043 goto err;
43044
43045 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
43046 - vec[i].iov_base = (void __user *) page_address(page);
43047 + vec[i].iov_base = (__force void __user *) page_address(page);
43048 vec[i].iov_len = this_len;
43049 pages[i] = page;
43050 spd.nr_pages++;
43051 @@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
43052 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
43053 {
43054 while (!pipe->nrbufs) {
43055 - if (!pipe->writers)
43056 + if (!atomic_read(&pipe->writers))
43057 return 0;
43058
43059 - if (!pipe->waiting_writers && sd->num_spliced)
43060 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
43061 return 0;
43062
43063 if (sd->flags & SPLICE_F_NONBLOCK)
43064 @@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct fi
43065 * out of the pipe right after the splice_to_pipe(). So set
43066 * PIPE_READERS appropriately.
43067 */
43068 - pipe->readers = 1;
43069 + atomic_set(&pipe->readers, 1);
43070
43071 current->splice_pipe = pipe;
43072 }
43073 @@ -1592,6 +1596,8 @@ static long vmsplice_to_pipe(struct file
43074 .spd_release = spd_release_page,
43075 };
43076
43077 + pax_track_stack();
43078 +
43079 pipe = pipe_info(file->f_path.dentry->d_inode);
43080 if (!pipe)
43081 return -EBADF;
43082 @@ -1700,9 +1706,9 @@ static int ipipe_prep(struct pipe_inode_
43083 ret = -ERESTARTSYS;
43084 break;
43085 }
43086 - if (!pipe->writers)
43087 + if (!atomic_read(&pipe->writers))
43088 break;
43089 - if (!pipe->waiting_writers) {
43090 + if (!atomic_read(&pipe->waiting_writers)) {
43091 if (flags & SPLICE_F_NONBLOCK) {
43092 ret = -EAGAIN;
43093 break;
43094 @@ -1734,7 +1740,7 @@ static int opipe_prep(struct pipe_inode_
43095 pipe_lock(pipe);
43096
43097 while (pipe->nrbufs >= PIPE_BUFFERS) {
43098 - if (!pipe->readers) {
43099 + if (!atomic_read(&pipe->readers)) {
43100 send_sig(SIGPIPE, current, 0);
43101 ret = -EPIPE;
43102 break;
43103 @@ -1747,9 +1753,9 @@ static int opipe_prep(struct pipe_inode_
43104 ret = -ERESTARTSYS;
43105 break;
43106 }
43107 - pipe->waiting_writers++;
43108 + atomic_inc(&pipe->waiting_writers);
43109 pipe_wait(pipe);
43110 - pipe->waiting_writers--;
43111 + atomic_dec(&pipe->waiting_writers);
43112 }
43113
43114 pipe_unlock(pipe);
43115 @@ -1785,14 +1791,14 @@ retry:
43116 pipe_double_lock(ipipe, opipe);
43117
43118 do {
43119 - if (!opipe->readers) {
43120 + if (!atomic_read(&opipe->readers)) {
43121 send_sig(SIGPIPE, current, 0);
43122 if (!ret)
43123 ret = -EPIPE;
43124 break;
43125 }
43126
43127 - if (!ipipe->nrbufs && !ipipe->writers)
43128 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
43129 break;
43130
43131 /*
43132 @@ -1892,7 +1898,7 @@ static int link_pipe(struct pipe_inode_i
43133 pipe_double_lock(ipipe, opipe);
43134
43135 do {
43136 - if (!opipe->readers) {
43137 + if (!atomic_read(&opipe->readers)) {
43138 send_sig(SIGPIPE, current, 0);
43139 if (!ret)
43140 ret = -EPIPE;
43141 @@ -1937,7 +1943,7 @@ static int link_pipe(struct pipe_inode_i
43142 * return EAGAIN if we have the potential of some data in the
43143 * future, otherwise just return 0
43144 */
43145 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
43146 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
43147 ret = -EAGAIN;
43148
43149 pipe_unlock(ipipe);
43150 diff -urNp linux-2.6.32.41/fs/sysfs/file.c linux-2.6.32.41/fs/sysfs/file.c
43151 --- linux-2.6.32.41/fs/sysfs/file.c 2011-03-27 14:31:47.000000000 -0400
43152 +++ linux-2.6.32.41/fs/sysfs/file.c 2011-05-04 17:56:20.000000000 -0400
43153 @@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
43154
43155 struct sysfs_open_dirent {
43156 atomic_t refcnt;
43157 - atomic_t event;
43158 + atomic_unchecked_t event;
43159 wait_queue_head_t poll;
43160 struct list_head buffers; /* goes through sysfs_buffer.list */
43161 };
43162 @@ -53,7 +53,7 @@ struct sysfs_buffer {
43163 size_t count;
43164 loff_t pos;
43165 char * page;
43166 - struct sysfs_ops * ops;
43167 + const struct sysfs_ops * ops;
43168 struct mutex mutex;
43169 int needs_read_fill;
43170 int event;
43171 @@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentr
43172 {
43173 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
43174 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
43175 - struct sysfs_ops * ops = buffer->ops;
43176 + const struct sysfs_ops * ops = buffer->ops;
43177 int ret = 0;
43178 ssize_t count;
43179
43180 @@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentr
43181 if (!sysfs_get_active_two(attr_sd))
43182 return -ENODEV;
43183
43184 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
43185 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
43186 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
43187
43188 sysfs_put_active_two(attr_sd);
43189 @@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentr
43190 {
43191 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
43192 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
43193 - struct sysfs_ops * ops = buffer->ops;
43194 + const struct sysfs_ops * ops = buffer->ops;
43195 int rc;
43196
43197 /* need attr_sd for attr and ops, its parent for kobj */
43198 @@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct
43199 return -ENOMEM;
43200
43201 atomic_set(&new_od->refcnt, 0);
43202 - atomic_set(&new_od->event, 1);
43203 + atomic_set_unchecked(&new_od->event, 1);
43204 init_waitqueue_head(&new_od->poll);
43205 INIT_LIST_HEAD(&new_od->buffers);
43206 goto retry;
43207 @@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode
43208 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
43209 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
43210 struct sysfs_buffer *buffer;
43211 - struct sysfs_ops *ops;
43212 + const struct sysfs_ops *ops;
43213 int error = -EACCES;
43214 char *p;
43215
43216 @@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct fi
43217
43218 sysfs_put_active_two(attr_sd);
43219
43220 - if (buffer->event != atomic_read(&od->event))
43221 + if (buffer->event != atomic_read_unchecked(&od->event))
43222 goto trigger;
43223
43224 return DEFAULT_POLLMASK;
43225 @@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_di
43226
43227 od = sd->s_attr.open;
43228 if (od) {
43229 - atomic_inc(&od->event);
43230 + atomic_inc_unchecked(&od->event);
43231 wake_up_interruptible(&od->poll);
43232 }
43233
43234 diff -urNp linux-2.6.32.41/fs/sysfs/mount.c linux-2.6.32.41/fs/sysfs/mount.c
43235 --- linux-2.6.32.41/fs/sysfs/mount.c 2011-03-27 14:31:47.000000000 -0400
43236 +++ linux-2.6.32.41/fs/sysfs/mount.c 2011-04-17 15:56:46.000000000 -0400
43237 @@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
43238 .s_name = "",
43239 .s_count = ATOMIC_INIT(1),
43240 .s_flags = SYSFS_DIR,
43241 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
43242 + .s_mode = S_IFDIR | S_IRWXU,
43243 +#else
43244 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
43245 +#endif
43246 .s_ino = 1,
43247 };
43248
43249 diff -urNp linux-2.6.32.41/fs/sysfs/symlink.c linux-2.6.32.41/fs/sysfs/symlink.c
43250 --- linux-2.6.32.41/fs/sysfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
43251 +++ linux-2.6.32.41/fs/sysfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
43252 @@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct de
43253
43254 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
43255 {
43256 - char *page = nd_get_link(nd);
43257 + const char *page = nd_get_link(nd);
43258 if (!IS_ERR(page))
43259 free_page((unsigned long)page);
43260 }
43261 diff -urNp linux-2.6.32.41/fs/udf/balloc.c linux-2.6.32.41/fs/udf/balloc.c
43262 --- linux-2.6.32.41/fs/udf/balloc.c 2011-03-27 14:31:47.000000000 -0400
43263 +++ linux-2.6.32.41/fs/udf/balloc.c 2011-04-17 15:56:46.000000000 -0400
43264 @@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struc
43265
43266 mutex_lock(&sbi->s_alloc_mutex);
43267 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
43268 - if (bloc->logicalBlockNum < 0 ||
43269 - (bloc->logicalBlockNum + count) >
43270 - partmap->s_partition_len) {
43271 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
43272 udf_debug("%d < %d || %d + %d > %d\n",
43273 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
43274 count, partmap->s_partition_len);
43275 @@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct
43276
43277 mutex_lock(&sbi->s_alloc_mutex);
43278 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
43279 - if (bloc->logicalBlockNum < 0 ||
43280 - (bloc->logicalBlockNum + count) >
43281 - partmap->s_partition_len) {
43282 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
43283 udf_debug("%d < %d || %d + %d > %d\n",
43284 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
43285 partmap->s_partition_len);
43286 diff -urNp linux-2.6.32.41/fs/udf/inode.c linux-2.6.32.41/fs/udf/inode.c
43287 --- linux-2.6.32.41/fs/udf/inode.c 2011-03-27 14:31:47.000000000 -0400
43288 +++ linux-2.6.32.41/fs/udf/inode.c 2011-05-16 21:46:57.000000000 -0400
43289 @@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(
43290 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
43291 int lastblock = 0;
43292
43293 + pax_track_stack();
43294 +
43295 prev_epos.offset = udf_file_entry_alloc_offset(inode);
43296 prev_epos.block = iinfo->i_location;
43297 prev_epos.bh = NULL;
43298 diff -urNp linux-2.6.32.41/fs/udf/misc.c linux-2.6.32.41/fs/udf/misc.c
43299 --- linux-2.6.32.41/fs/udf/misc.c 2011-03-27 14:31:47.000000000 -0400
43300 +++ linux-2.6.32.41/fs/udf/misc.c 2011-04-23 12:56:11.000000000 -0400
43301 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
43302
43303 u8 udf_tag_checksum(const struct tag *t)
43304 {
43305 - u8 *data = (u8 *)t;
43306 + const u8 *data = (const u8 *)t;
43307 u8 checksum = 0;
43308 int i;
43309 for (i = 0; i < sizeof(struct tag); ++i)
43310 diff -urNp linux-2.6.32.41/fs/utimes.c linux-2.6.32.41/fs/utimes.c
43311 --- linux-2.6.32.41/fs/utimes.c 2011-03-27 14:31:47.000000000 -0400
43312 +++ linux-2.6.32.41/fs/utimes.c 2011-04-17 15:56:46.000000000 -0400
43313 @@ -1,6 +1,7 @@
43314 #include <linux/compiler.h>
43315 #include <linux/file.h>
43316 #include <linux/fs.h>
43317 +#include <linux/security.h>
43318 #include <linux/linkage.h>
43319 #include <linux/mount.h>
43320 #include <linux/namei.h>
43321 @@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
43322 goto mnt_drop_write_and_out;
43323 }
43324 }
43325 +
43326 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
43327 + error = -EACCES;
43328 + goto mnt_drop_write_and_out;
43329 + }
43330 +
43331 mutex_lock(&inode->i_mutex);
43332 error = notify_change(path->dentry, &newattrs);
43333 mutex_unlock(&inode->i_mutex);
43334 diff -urNp linux-2.6.32.41/fs/xattr_acl.c linux-2.6.32.41/fs/xattr_acl.c
43335 --- linux-2.6.32.41/fs/xattr_acl.c 2011-03-27 14:31:47.000000000 -0400
43336 +++ linux-2.6.32.41/fs/xattr_acl.c 2011-04-17 15:56:46.000000000 -0400
43337 @@ -17,8 +17,8 @@
43338 struct posix_acl *
43339 posix_acl_from_xattr(const void *value, size_t size)
43340 {
43341 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
43342 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
43343 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
43344 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
43345 int count;
43346 struct posix_acl *acl;
43347 struct posix_acl_entry *acl_e;
43348 diff -urNp linux-2.6.32.41/fs/xattr.c linux-2.6.32.41/fs/xattr.c
43349 --- linux-2.6.32.41/fs/xattr.c 2011-03-27 14:31:47.000000000 -0400
43350 +++ linux-2.6.32.41/fs/xattr.c 2011-04-17 15:56:46.000000000 -0400
43351 @@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
43352 * Extended attribute SET operations
43353 */
43354 static long
43355 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
43356 +setxattr(struct path *path, const char __user *name, const void __user *value,
43357 size_t size, int flags)
43358 {
43359 int error;
43360 @@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __
43361 return PTR_ERR(kvalue);
43362 }
43363
43364 - error = vfs_setxattr(d, kname, kvalue, size, flags);
43365 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
43366 + error = -EACCES;
43367 + goto out;
43368 + }
43369 +
43370 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
43371 +out:
43372 kfree(kvalue);
43373 return error;
43374 }
43375 @@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
43376 return error;
43377 error = mnt_want_write(path.mnt);
43378 if (!error) {
43379 - error = setxattr(path.dentry, name, value, size, flags);
43380 + error = setxattr(&path, name, value, size, flags);
43381 mnt_drop_write(path.mnt);
43382 }
43383 path_put(&path);
43384 @@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
43385 return error;
43386 error = mnt_want_write(path.mnt);
43387 if (!error) {
43388 - error = setxattr(path.dentry, name, value, size, flags);
43389 + error = setxattr(&path, name, value, size, flags);
43390 mnt_drop_write(path.mnt);
43391 }
43392 path_put(&path);
43393 @@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
43394 const void __user *,value, size_t, size, int, flags)
43395 {
43396 struct file *f;
43397 - struct dentry *dentry;
43398 int error = -EBADF;
43399
43400 f = fget(fd);
43401 if (!f)
43402 return error;
43403 - dentry = f->f_path.dentry;
43404 - audit_inode(NULL, dentry);
43405 + audit_inode(NULL, f->f_path.dentry);
43406 error = mnt_want_write_file(f);
43407 if (!error) {
43408 - error = setxattr(dentry, name, value, size, flags);
43409 + error = setxattr(&f->f_path, name, value, size, flags);
43410 mnt_drop_write(f->f_path.mnt);
43411 }
43412 fput(f);
43413 diff -urNp linux-2.6.32.41/fs/xfs/linux-2.6/xfs_ioctl32.c linux-2.6.32.41/fs/xfs/linux-2.6/xfs_ioctl32.c
43414 --- linux-2.6.32.41/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-03-27 14:31:47.000000000 -0400
43415 +++ linux-2.6.32.41/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-04-17 15:56:46.000000000 -0400
43416 @@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
43417 xfs_fsop_geom_t fsgeo;
43418 int error;
43419
43420 + memset(&fsgeo, 0, sizeof(fsgeo));
43421 error = xfs_fs_geometry(mp, &fsgeo, 3);
43422 if (error)
43423 return -error;
43424 diff -urNp linux-2.6.32.41/fs/xfs/linux-2.6/xfs_ioctl.c linux-2.6.32.41/fs/xfs/linux-2.6/xfs_ioctl.c
43425 --- linux-2.6.32.41/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 17:00:52.000000000 -0400
43426 +++ linux-2.6.32.41/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 20:07:09.000000000 -0400
43427 @@ -134,7 +134,7 @@ xfs_find_handle(
43428 }
43429
43430 error = -EFAULT;
43431 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
43432 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
43433 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
43434 goto out_put;
43435
43436 @@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
43437 if (IS_ERR(dentry))
43438 return PTR_ERR(dentry);
43439
43440 - kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
43441 + kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
43442 if (!kbuf)
43443 goto out_dput;
43444
43445 @@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
43446 xfs_mount_t *mp,
43447 void __user *arg)
43448 {
43449 - xfs_fsop_geom_t fsgeo;
43450 + xfs_fsop_geom_t fsgeo;
43451 int error;
43452
43453 error = xfs_fs_geometry(mp, &fsgeo, 3);
43454 diff -urNp linux-2.6.32.41/fs/xfs/linux-2.6/xfs_iops.c linux-2.6.32.41/fs/xfs/linux-2.6/xfs_iops.c
43455 --- linux-2.6.32.41/fs/xfs/linux-2.6/xfs_iops.c 2011-03-27 14:31:47.000000000 -0400
43456 +++ linux-2.6.32.41/fs/xfs/linux-2.6/xfs_iops.c 2011-04-17 15:56:46.000000000 -0400
43457 @@ -468,7 +468,7 @@ xfs_vn_put_link(
43458 struct nameidata *nd,
43459 void *p)
43460 {
43461 - char *s = nd_get_link(nd);
43462 + const char *s = nd_get_link(nd);
43463
43464 if (!IS_ERR(s))
43465 kfree(s);
43466 diff -urNp linux-2.6.32.41/fs/xfs/xfs_bmap.c linux-2.6.32.41/fs/xfs/xfs_bmap.c
43467 --- linux-2.6.32.41/fs/xfs/xfs_bmap.c 2011-03-27 14:31:47.000000000 -0400
43468 +++ linux-2.6.32.41/fs/xfs/xfs_bmap.c 2011-04-17 15:56:46.000000000 -0400
43469 @@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
43470 int nmap,
43471 int ret_nmap);
43472 #else
43473 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
43474 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
43475 #endif /* DEBUG */
43476
43477 #if defined(XFS_RW_TRACE)
43478 diff -urNp linux-2.6.32.41/fs/xfs/xfs_dir2_sf.c linux-2.6.32.41/fs/xfs/xfs_dir2_sf.c
43479 --- linux-2.6.32.41/fs/xfs/xfs_dir2_sf.c 2011-03-27 14:31:47.000000000 -0400
43480 +++ linux-2.6.32.41/fs/xfs/xfs_dir2_sf.c 2011-04-18 22:07:30.000000000 -0400
43481 @@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
43482 }
43483
43484 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
43485 - if (filldir(dirent, sfep->name, sfep->namelen,
43486 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
43487 + char name[sfep->namelen];
43488 + memcpy(name, sfep->name, sfep->namelen);
43489 + if (filldir(dirent, name, sfep->namelen,
43490 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
43491 + *offset = off & 0x7fffffff;
43492 + return 0;
43493 + }
43494 + } else if (filldir(dirent, sfep->name, sfep->namelen,
43495 off & 0x7fffffff, ino, DT_UNKNOWN)) {
43496 *offset = off & 0x7fffffff;
43497 return 0;
43498 diff -urNp linux-2.6.32.41/grsecurity/gracl_alloc.c linux-2.6.32.41/grsecurity/gracl_alloc.c
43499 --- linux-2.6.32.41/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
43500 +++ linux-2.6.32.41/grsecurity/gracl_alloc.c 2011-04-17 15:56:46.000000000 -0400
43501 @@ -0,0 +1,105 @@
43502 +#include <linux/kernel.h>
43503 +#include <linux/mm.h>
43504 +#include <linux/slab.h>
43505 +#include <linux/vmalloc.h>
43506 +#include <linux/gracl.h>
43507 +#include <linux/grsecurity.h>
43508 +
43509 +static unsigned long alloc_stack_next = 1;
43510 +static unsigned long alloc_stack_size = 1;
43511 +static void **alloc_stack;
43512 +
43513 +static __inline__ int
43514 +alloc_pop(void)
43515 +{
43516 + if (alloc_stack_next == 1)
43517 + return 0;
43518 +
43519 + kfree(alloc_stack[alloc_stack_next - 2]);
43520 +
43521 + alloc_stack_next--;
43522 +
43523 + return 1;
43524 +}
43525 +
43526 +static __inline__ int
43527 +alloc_push(void *buf)
43528 +{
43529 + if (alloc_stack_next >= alloc_stack_size)
43530 + return 1;
43531 +
43532 + alloc_stack[alloc_stack_next - 1] = buf;
43533 +
43534 + alloc_stack_next++;
43535 +
43536 + return 0;
43537 +}
43538 +
43539 +void *
43540 +acl_alloc(unsigned long len)
43541 +{
43542 + void *ret = NULL;
43543 +
43544 + if (!len || len > PAGE_SIZE)
43545 + goto out;
43546 +
43547 + ret = kmalloc(len, GFP_KERNEL);
43548 +
43549 + if (ret) {
43550 + if (alloc_push(ret)) {
43551 + kfree(ret);
43552 + ret = NULL;
43553 + }
43554 + }
43555 +
43556 +out:
43557 + return ret;
43558 +}
43559 +
43560 +void *
43561 +acl_alloc_num(unsigned long num, unsigned long len)
43562 +{
43563 + if (!len || (num > (PAGE_SIZE / len)))
43564 + return NULL;
43565 +
43566 + return acl_alloc(num * len);
43567 +}
43568 +
43569 +void
43570 +acl_free_all(void)
43571 +{
43572 + if (gr_acl_is_enabled() || !alloc_stack)
43573 + return;
43574 +
43575 + while (alloc_pop()) ;
43576 +
43577 + if (alloc_stack) {
43578 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
43579 + kfree(alloc_stack);
43580 + else
43581 + vfree(alloc_stack);
43582 + }
43583 +
43584 + alloc_stack = NULL;
43585 + alloc_stack_size = 1;
43586 + alloc_stack_next = 1;
43587 +
43588 + return;
43589 +}
43590 +
43591 +int
43592 +acl_alloc_stack_init(unsigned long size)
43593 +{
43594 + if ((size * sizeof (void *)) <= PAGE_SIZE)
43595 + alloc_stack =
43596 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
43597 + else
43598 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
43599 +
43600 + alloc_stack_size = size;
43601 +
43602 + if (!alloc_stack)
43603 + return 0;
43604 + else
43605 + return 1;
43606 +}
43607 diff -urNp linux-2.6.32.41/grsecurity/gracl.c linux-2.6.32.41/grsecurity/gracl.c
43608 --- linux-2.6.32.41/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
43609 +++ linux-2.6.32.41/grsecurity/gracl.c 2011-05-24 20:26:07.000000000 -0400
43610 @@ -0,0 +1,4079 @@
43611 +#include <linux/kernel.h>
43612 +#include <linux/module.h>
43613 +#include <linux/sched.h>
43614 +#include <linux/mm.h>
43615 +#include <linux/file.h>
43616 +#include <linux/fs.h>
43617 +#include <linux/namei.h>
43618 +#include <linux/mount.h>
43619 +#include <linux/tty.h>
43620 +#include <linux/proc_fs.h>
43621 +#include <linux/smp_lock.h>
43622 +#include <linux/slab.h>
43623 +#include <linux/vmalloc.h>
43624 +#include <linux/types.h>
43625 +#include <linux/sysctl.h>
43626 +#include <linux/netdevice.h>
43627 +#include <linux/ptrace.h>
43628 +#include <linux/gracl.h>
43629 +#include <linux/gralloc.h>
43630 +#include <linux/grsecurity.h>
43631 +#include <linux/grinternal.h>
43632 +#include <linux/pid_namespace.h>
43633 +#include <linux/fdtable.h>
43634 +#include <linux/percpu.h>
43635 +
43636 +#include <asm/uaccess.h>
43637 +#include <asm/errno.h>
43638 +#include <asm/mman.h>
43639 +
43640 +static struct acl_role_db acl_role_set;
43641 +static struct name_db name_set;
43642 +static struct inodev_db inodev_set;
43643 +
43644 +/* for keeping track of userspace pointers used for subjects, so we
43645 + can share references in the kernel as well
43646 +*/
43647 +
43648 +static struct dentry *real_root;
43649 +static struct vfsmount *real_root_mnt;
43650 +
43651 +static struct acl_subj_map_db subj_map_set;
43652 +
43653 +static struct acl_role_label *default_role;
43654 +
43655 +static struct acl_role_label *role_list;
43656 +
43657 +static u16 acl_sp_role_value;
43658 +
43659 +extern char *gr_shared_page[4];
43660 +static DEFINE_MUTEX(gr_dev_mutex);
43661 +DEFINE_RWLOCK(gr_inode_lock);
43662 +
43663 +struct gr_arg *gr_usermode;
43664 +
43665 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
43666 +
43667 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
43668 +extern void gr_clear_learn_entries(void);
43669 +
43670 +#ifdef CONFIG_GRKERNSEC_RESLOG
43671 +extern void gr_log_resource(const struct task_struct *task,
43672 + const int res, const unsigned long wanted, const int gt);
43673 +#endif
43674 +
43675 +unsigned char *gr_system_salt;
43676 +unsigned char *gr_system_sum;
43677 +
43678 +static struct sprole_pw **acl_special_roles = NULL;
43679 +static __u16 num_sprole_pws = 0;
43680 +
43681 +static struct acl_role_label *kernel_role = NULL;
43682 +
43683 +static unsigned int gr_auth_attempts = 0;
43684 +static unsigned long gr_auth_expires = 0UL;
43685 +
43686 +#ifdef CONFIG_NET
43687 +extern struct vfsmount *sock_mnt;
43688 +#endif
43689 +extern struct vfsmount *pipe_mnt;
43690 +extern struct vfsmount *shm_mnt;
43691 +#ifdef CONFIG_HUGETLBFS
43692 +extern struct vfsmount *hugetlbfs_vfsmount;
43693 +#endif
43694 +
43695 +static struct acl_object_label *fakefs_obj;
43696 +
43697 +extern int gr_init_uidset(void);
43698 +extern void gr_free_uidset(void);
43699 +extern void gr_remove_uid(uid_t uid);
43700 +extern int gr_find_uid(uid_t uid);
43701 +
43702 +__inline__ int
43703 +gr_acl_is_enabled(void)
43704 +{
43705 + return (gr_status & GR_READY);
43706 +}
43707 +
43708 +#ifdef CONFIG_BTRFS_FS
43709 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
43710 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
43711 +#endif
43712 +
43713 +static inline dev_t __get_dev(const struct dentry *dentry)
43714 +{
43715 +#ifdef CONFIG_BTRFS_FS
43716 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
43717 + return get_btrfs_dev_from_inode(dentry->d_inode);
43718 + else
43719 +#endif
43720 + return dentry->d_inode->i_sb->s_dev;
43721 +}
43722 +
43723 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
43724 +{
43725 + return __get_dev(dentry);
43726 +}
43727 +
43728 +static char gr_task_roletype_to_char(struct task_struct *task)
43729 +{
43730 + switch (task->role->roletype &
43731 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
43732 + GR_ROLE_SPECIAL)) {
43733 + case GR_ROLE_DEFAULT:
43734 + return 'D';
43735 + case GR_ROLE_USER:
43736 + return 'U';
43737 + case GR_ROLE_GROUP:
43738 + return 'G';
43739 + case GR_ROLE_SPECIAL:
43740 + return 'S';
43741 + }
43742 +
43743 + return 'X';
43744 +}
43745 +
43746 +char gr_roletype_to_char(void)
43747 +{
43748 + return gr_task_roletype_to_char(current);
43749 +}
43750 +
43751 +__inline__ int
43752 +gr_acl_tpe_check(void)
43753 +{
43754 + if (unlikely(!(gr_status & GR_READY)))
43755 + return 0;
43756 + if (current->role->roletype & GR_ROLE_TPE)
43757 + return 1;
43758 + else
43759 + return 0;
43760 +}
43761 +
43762 +int
43763 +gr_handle_rawio(const struct inode *inode)
43764 +{
43765 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
43766 + if (inode && S_ISBLK(inode->i_mode) &&
43767 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
43768 + !capable(CAP_SYS_RAWIO))
43769 + return 1;
43770 +#endif
43771 + return 0;
43772 +}
43773 +
43774 +static int
43775 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
43776 +{
43777 + if (likely(lena != lenb))
43778 + return 0;
43779 +
43780 + return !memcmp(a, b, lena);
43781 +}
43782 +
43783 +/* this must be called with vfsmount_lock and dcache_lock held */
43784 +
43785 +static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
43786 + struct dentry *root, struct vfsmount *rootmnt,
43787 + char *buffer, int buflen)
43788 +{
43789 + char * end = buffer+buflen;
43790 + char * retval;
43791 + int namelen;
43792 +
43793 + *--end = '\0';
43794 + buflen--;
43795 +
43796 + if (buflen < 1)
43797 + goto Elong;
43798 + /* Get '/' right */
43799 + retval = end-1;
43800 + *retval = '/';
43801 +
43802 + for (;;) {
43803 + struct dentry * parent;
43804 +
43805 + if (dentry == root && vfsmnt == rootmnt)
43806 + break;
43807 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
43808 + /* Global root? */
43809 + if (vfsmnt->mnt_parent == vfsmnt)
43810 + goto global_root;
43811 + dentry = vfsmnt->mnt_mountpoint;
43812 + vfsmnt = vfsmnt->mnt_parent;
43813 + continue;
43814 + }
43815 + parent = dentry->d_parent;
43816 + prefetch(parent);
43817 + namelen = dentry->d_name.len;
43818 + buflen -= namelen + 1;
43819 + if (buflen < 0)
43820 + goto Elong;
43821 + end -= namelen;
43822 + memcpy(end, dentry->d_name.name, namelen);
43823 + *--end = '/';
43824 + retval = end;
43825 + dentry = parent;
43826 + }
43827 +
43828 +out:
43829 + return retval;
43830 +
43831 +global_root:
43832 + namelen = dentry->d_name.len;
43833 + buflen -= namelen;
43834 + if (buflen < 0)
43835 + goto Elong;
43836 + retval -= namelen-1; /* hit the slash */
43837 + memcpy(retval, dentry->d_name.name, namelen);
43838 + goto out;
43839 +Elong:
43840 + retval = ERR_PTR(-ENAMETOOLONG);
43841 + goto out;
43842 +}
43843 +
43844 +static char *
43845 +gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
43846 + struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
43847 +{
43848 + char *retval;
43849 +
43850 + retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
43851 + if (unlikely(IS_ERR(retval)))
43852 + retval = strcpy(buf, "<path too long>");
43853 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
43854 + retval[1] = '\0';
43855 +
43856 + return retval;
43857 +}
43858 +
43859 +static char *
43860 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
43861 + char *buf, int buflen)
43862 +{
43863 + char *res;
43864 +
43865 + /* we can use real_root, real_root_mnt, because this is only called
43866 + by the RBAC system */
43867 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
43868 +
43869 + return res;
43870 +}
43871 +
43872 +static char *
43873 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
43874 + char *buf, int buflen)
43875 +{
43876 + char *res;
43877 + struct dentry *root;
43878 + struct vfsmount *rootmnt;
43879 + struct task_struct *reaper = &init_task;
43880 +
43881 + /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
43882 + read_lock(&reaper->fs->lock);
43883 + root = dget(reaper->fs->root.dentry);
43884 + rootmnt = mntget(reaper->fs->root.mnt);
43885 + read_unlock(&reaper->fs->lock);
43886 +
43887 + spin_lock(&dcache_lock);
43888 + spin_lock(&vfsmount_lock);
43889 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
43890 + spin_unlock(&vfsmount_lock);
43891 + spin_unlock(&dcache_lock);
43892 +
43893 + dput(root);
43894 + mntput(rootmnt);
43895 + return res;
43896 +}
43897 +
43898 +static char *
43899 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
43900 +{
43901 + char *ret;
43902 + spin_lock(&dcache_lock);
43903 + spin_lock(&vfsmount_lock);
43904 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
43905 + PAGE_SIZE);
43906 + spin_unlock(&vfsmount_lock);
43907 + spin_unlock(&dcache_lock);
43908 + return ret;
43909 +}
43910 +
43911 +char *
43912 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
43913 +{
43914 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
43915 + PAGE_SIZE);
43916 +}
43917 +
43918 +char *
43919 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
43920 +{
43921 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
43922 + PAGE_SIZE);
43923 +}
43924 +
43925 +char *
43926 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
43927 +{
43928 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
43929 + PAGE_SIZE);
43930 +}
43931 +
43932 +char *
43933 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
43934 +{
43935 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
43936 + PAGE_SIZE);
43937 +}
43938 +
43939 +char *
43940 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
43941 +{
43942 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
43943 + PAGE_SIZE);
43944 +}
43945 +
43946 +__inline__ __u32
43947 +to_gr_audit(const __u32 reqmode)
43948 +{
43949 + /* masks off auditable permission flags, then shifts them to create
43950 + auditing flags, and adds the special case of append auditing if
43951 + we're requesting write */
43952 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
43953 +}
43954 +
43955 +struct acl_subject_label *
43956 +lookup_subject_map(const struct acl_subject_label *userp)
43957 +{
43958 + unsigned int index = shash(userp, subj_map_set.s_size);
43959 + struct subject_map *match;
43960 +
43961 + match = subj_map_set.s_hash[index];
43962 +
43963 + while (match && match->user != userp)
43964 + match = match->next;
43965 +
43966 + if (match != NULL)
43967 + return match->kernel;
43968 + else
43969 + return NULL;
43970 +}
43971 +
43972 +static void
43973 +insert_subj_map_entry(struct subject_map *subjmap)
43974 +{
43975 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
43976 + struct subject_map **curr;
43977 +
43978 + subjmap->prev = NULL;
43979 +
43980 + curr = &subj_map_set.s_hash[index];
43981 + if (*curr != NULL)
43982 + (*curr)->prev = subjmap;
43983 +
43984 + subjmap->next = *curr;
43985 + *curr = subjmap;
43986 +
43987 + return;
43988 +}
43989 +
43990 +static struct acl_role_label *
43991 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
43992 + const gid_t gid)
43993 +{
43994 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
43995 + struct acl_role_label *match;
43996 + struct role_allowed_ip *ipp;
43997 + unsigned int x;
43998 + u32 curr_ip = task->signal->curr_ip;
43999 +
44000 + task->signal->saved_ip = curr_ip;
44001 +
44002 + match = acl_role_set.r_hash[index];
44003 +
44004 + while (match) {
44005 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
44006 + for (x = 0; x < match->domain_child_num; x++) {
44007 + if (match->domain_children[x] == uid)
44008 + goto found;
44009 + }
44010 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
44011 + break;
44012 + match = match->next;
44013 + }
44014 +found:
44015 + if (match == NULL) {
44016 + try_group:
44017 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
44018 + match = acl_role_set.r_hash[index];
44019 +
44020 + while (match) {
44021 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
44022 + for (x = 0; x < match->domain_child_num; x++) {
44023 + if (match->domain_children[x] == gid)
44024 + goto found2;
44025 + }
44026 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
44027 + break;
44028 + match = match->next;
44029 + }
44030 +found2:
44031 + if (match == NULL)
44032 + match = default_role;
44033 + if (match->allowed_ips == NULL)
44034 + return match;
44035 + else {
44036 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
44037 + if (likely
44038 + ((ntohl(curr_ip) & ipp->netmask) ==
44039 + (ntohl(ipp->addr) & ipp->netmask)))
44040 + return match;
44041 + }
44042 + match = default_role;
44043 + }
44044 + } else if (match->allowed_ips == NULL) {
44045 + return match;
44046 + } else {
44047 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
44048 + if (likely
44049 + ((ntohl(curr_ip) & ipp->netmask) ==
44050 + (ntohl(ipp->addr) & ipp->netmask)))
44051 + return match;
44052 + }
44053 + goto try_group;
44054 + }
44055 +
44056 + return match;
44057 +}
44058 +
44059 +struct acl_subject_label *
44060 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
44061 + const struct acl_role_label *role)
44062 +{
44063 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
44064 + struct acl_subject_label *match;
44065 +
44066 + match = role->subj_hash[index];
44067 +
44068 + while (match && (match->inode != ino || match->device != dev ||
44069 + (match->mode & GR_DELETED))) {
44070 + match = match->next;
44071 + }
44072 +
44073 + if (match && !(match->mode & GR_DELETED))
44074 + return match;
44075 + else
44076 + return NULL;
44077 +}
44078 +
44079 +struct acl_subject_label *
44080 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
44081 + const struct acl_role_label *role)
44082 +{
44083 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
44084 + struct acl_subject_label *match;
44085 +
44086 + match = role->subj_hash[index];
44087 +
44088 + while (match && (match->inode != ino || match->device != dev ||
44089 + !(match->mode & GR_DELETED))) {
44090 + match = match->next;
44091 + }
44092 +
44093 + if (match && (match->mode & GR_DELETED))
44094 + return match;
44095 + else
44096 + return NULL;
44097 +}
44098 +
44099 +static struct acl_object_label *
44100 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
44101 + const struct acl_subject_label *subj)
44102 +{
44103 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
44104 + struct acl_object_label *match;
44105 +
44106 + match = subj->obj_hash[index];
44107 +
44108 + while (match && (match->inode != ino || match->device != dev ||
44109 + (match->mode & GR_DELETED))) {
44110 + match = match->next;
44111 + }
44112 +
44113 + if (match && !(match->mode & GR_DELETED))
44114 + return match;
44115 + else
44116 + return NULL;
44117 +}
44118 +
44119 +static struct acl_object_label *
44120 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
44121 + const struct acl_subject_label *subj)
44122 +{
44123 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
44124 + struct acl_object_label *match;
44125 +
44126 + match = subj->obj_hash[index];
44127 +
44128 + while (match && (match->inode != ino || match->device != dev ||
44129 + !(match->mode & GR_DELETED))) {
44130 + match = match->next;
44131 + }
44132 +
44133 + if (match && (match->mode & GR_DELETED))
44134 + return match;
44135 +
44136 + match = subj->obj_hash[index];
44137 +
44138 + while (match && (match->inode != ino || match->device != dev ||
44139 + (match->mode & GR_DELETED))) {
44140 + match = match->next;
44141 + }
44142 +
44143 + if (match && !(match->mode & GR_DELETED))
44144 + return match;
44145 + else
44146 + return NULL;
44147 +}
44148 +
44149 +static struct name_entry *
44150 +lookup_name_entry(const char *name)
44151 +{
44152 + unsigned int len = strlen(name);
44153 + unsigned int key = full_name_hash(name, len);
44154 + unsigned int index = key % name_set.n_size;
44155 + struct name_entry *match;
44156 +
44157 + match = name_set.n_hash[index];
44158 +
44159 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
44160 + match = match->next;
44161 +
44162 + return match;
44163 +}
44164 +
44165 +static struct name_entry *
44166 +lookup_name_entry_create(const char *name)
44167 +{
44168 + unsigned int len = strlen(name);
44169 + unsigned int key = full_name_hash(name, len);
44170 + unsigned int index = key % name_set.n_size;
44171 + struct name_entry *match;
44172 +
44173 + match = name_set.n_hash[index];
44174 +
44175 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
44176 + !match->deleted))
44177 + match = match->next;
44178 +
44179 + if (match && match->deleted)
44180 + return match;
44181 +
44182 + match = name_set.n_hash[index];
44183 +
44184 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
44185 + match->deleted))
44186 + match = match->next;
44187 +
44188 + if (match && !match->deleted)
44189 + return match;
44190 + else
44191 + return NULL;
44192 +}
44193 +
44194 +static struct inodev_entry *
44195 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
44196 +{
44197 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
44198 + struct inodev_entry *match;
44199 +
44200 + match = inodev_set.i_hash[index];
44201 +
44202 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
44203 + match = match->next;
44204 +
44205 + return match;
44206 +}
44207 +
44208 +static void
44209 +insert_inodev_entry(struct inodev_entry *entry)
44210 +{
44211 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
44212 + inodev_set.i_size);
44213 + struct inodev_entry **curr;
44214 +
44215 + entry->prev = NULL;
44216 +
44217 + curr = &inodev_set.i_hash[index];
44218 + if (*curr != NULL)
44219 + (*curr)->prev = entry;
44220 +
44221 + entry->next = *curr;
44222 + *curr = entry;
44223 +
44224 + return;
44225 +}
44226 +
44227 +static void
44228 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
44229 +{
44230 + unsigned int index =
44231 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
44232 + struct acl_role_label **curr;
44233 + struct acl_role_label *tmp;
44234 +
44235 + curr = &acl_role_set.r_hash[index];
44236 +
44237 + /* if role was already inserted due to domains and already has
44238 + a role in the same bucket as it attached, then we need to
44239 + combine these two buckets
44240 + */
44241 + if (role->next) {
44242 + tmp = role->next;
44243 + while (tmp->next)
44244 + tmp = tmp->next;
44245 + tmp->next = *curr;
44246 + } else
44247 + role->next = *curr;
44248 + *curr = role;
44249 +
44250 + return;
44251 +}
44252 +
44253 +static void
44254 +insert_acl_role_label(struct acl_role_label *role)
44255 +{
44256 + int i;
44257 +
44258 + if (role_list == NULL) {
44259 + role_list = role;
44260 + role->prev = NULL;
44261 + } else {
44262 + role->prev = role_list;
44263 + role_list = role;
44264 + }
44265 +
44266 + /* used for hash chains */
44267 + role->next = NULL;
44268 +
44269 + if (role->roletype & GR_ROLE_DOMAIN) {
44270 + for (i = 0; i < role->domain_child_num; i++)
44271 + __insert_acl_role_label(role, role->domain_children[i]);
44272 + } else
44273 + __insert_acl_role_label(role, role->uidgid);
44274 +}
44275 +
44276 +static int
44277 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
44278 +{
44279 + struct name_entry **curr, *nentry;
44280 + struct inodev_entry *ientry;
44281 + unsigned int len = strlen(name);
44282 + unsigned int key = full_name_hash(name, len);
44283 + unsigned int index = key % name_set.n_size;
44284 +
44285 + curr = &name_set.n_hash[index];
44286 +
44287 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
44288 + curr = &((*curr)->next);
44289 +
44290 + if (*curr != NULL)
44291 + return 1;
44292 +
44293 + nentry = acl_alloc(sizeof (struct name_entry));
44294 + if (nentry == NULL)
44295 + return 0;
44296 + ientry = acl_alloc(sizeof (struct inodev_entry));
44297 + if (ientry == NULL)
44298 + return 0;
44299 + ientry->nentry = nentry;
44300 +
44301 + nentry->key = key;
44302 + nentry->name = name;
44303 + nentry->inode = inode;
44304 + nentry->device = device;
44305 + nentry->len = len;
44306 + nentry->deleted = deleted;
44307 +
44308 + nentry->prev = NULL;
44309 + curr = &name_set.n_hash[index];
44310 + if (*curr != NULL)
44311 + (*curr)->prev = nentry;
44312 + nentry->next = *curr;
44313 + *curr = nentry;
44314 +
44315 + /* insert us into the table searchable by inode/dev */
44316 + insert_inodev_entry(ientry);
44317 +
44318 + return 1;
44319 +}
44320 +
44321 +static void
44322 +insert_acl_obj_label(struct acl_object_label *obj,
44323 + struct acl_subject_label *subj)
44324 +{
44325 + unsigned int index =
44326 + fhash(obj->inode, obj->device, subj->obj_hash_size);
44327 + struct acl_object_label **curr;
44328 +
44329 +
44330 + obj->prev = NULL;
44331 +
44332 + curr = &subj->obj_hash[index];
44333 + if (*curr != NULL)
44334 + (*curr)->prev = obj;
44335 +
44336 + obj->next = *curr;
44337 + *curr = obj;
44338 +
44339 + return;
44340 +}
44341 +
44342 +static void
44343 +insert_acl_subj_label(struct acl_subject_label *obj,
44344 + struct acl_role_label *role)
44345 +{
44346 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
44347 + struct acl_subject_label **curr;
44348 +
44349 + obj->prev = NULL;
44350 +
44351 + curr = &role->subj_hash[index];
44352 + if (*curr != NULL)
44353 + (*curr)->prev = obj;
44354 +
44355 + obj->next = *curr;
44356 + *curr = obj;
44357 +
44358 + return;
44359 +}
44360 +
44361 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
44362 +
44363 +static void *
44364 +create_table(__u32 * len, int elementsize)
44365 +{
44366 + unsigned int table_sizes[] = {
44367 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
44368 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
44369 + 4194301, 8388593, 16777213, 33554393, 67108859
44370 + };
44371 + void *newtable = NULL;
44372 + unsigned int pwr = 0;
44373 +
44374 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
44375 + table_sizes[pwr] <= *len)
44376 + pwr++;
44377 +
44378 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
44379 + return newtable;
44380 +
44381 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
44382 + newtable =
44383 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
44384 + else
44385 + newtable = vmalloc(table_sizes[pwr] * elementsize);
44386 +
44387 + *len = table_sizes[pwr];
44388 +
44389 + return newtable;
44390 +}
44391 +
44392 +static int
44393 +init_variables(const struct gr_arg *arg)
44394 +{
44395 + struct task_struct *reaper = &init_task;
44396 + unsigned int stacksize;
44397 +
44398 + subj_map_set.s_size = arg->role_db.num_subjects;
44399 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
44400 + name_set.n_size = arg->role_db.num_objects;
44401 + inodev_set.i_size = arg->role_db.num_objects;
44402 +
44403 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
44404 + !name_set.n_size || !inodev_set.i_size)
44405 + return 1;
44406 +
44407 + if (!gr_init_uidset())
44408 + return 1;
44409 +
44410 + /* set up the stack that holds allocation info */
44411 +
44412 + stacksize = arg->role_db.num_pointers + 5;
44413 +
44414 + if (!acl_alloc_stack_init(stacksize))
44415 + return 1;
44416 +
44417 + /* grab reference for the real root dentry and vfsmount */
44418 + read_lock(&reaper->fs->lock);
44419 + real_root = dget(reaper->fs->root.dentry);
44420 + real_root_mnt = mntget(reaper->fs->root.mnt);
44421 + read_unlock(&reaper->fs->lock);
44422 +
44423 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44424 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
44425 +#endif
44426 +
44427 + fakefs_obj = acl_alloc(sizeof(struct acl_object_label));
44428 + if (fakefs_obj == NULL)
44429 + return 1;
44430 + fakefs_obj->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
44431 +
44432 + subj_map_set.s_hash =
44433 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
44434 + acl_role_set.r_hash =
44435 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
44436 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
44437 + inodev_set.i_hash =
44438 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
44439 +
44440 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
44441 + !name_set.n_hash || !inodev_set.i_hash)
44442 + return 1;
44443 +
44444 + memset(subj_map_set.s_hash, 0,
44445 + sizeof(struct subject_map *) * subj_map_set.s_size);
44446 + memset(acl_role_set.r_hash, 0,
44447 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
44448 + memset(name_set.n_hash, 0,
44449 + sizeof (struct name_entry *) * name_set.n_size);
44450 + memset(inodev_set.i_hash, 0,
44451 + sizeof (struct inodev_entry *) * inodev_set.i_size);
44452 +
44453 + return 0;
44454 +}
44455 +
44456 +/* free information not needed after startup
44457 + currently contains user->kernel pointer mappings for subjects
44458 +*/
44459 +
44460 +static void
44461 +free_init_variables(void)
44462 +{
44463 + __u32 i;
44464 +
44465 + if (subj_map_set.s_hash) {
44466 + for (i = 0; i < subj_map_set.s_size; i++) {
44467 + if (subj_map_set.s_hash[i]) {
44468 + kfree(subj_map_set.s_hash[i]);
44469 + subj_map_set.s_hash[i] = NULL;
44470 + }
44471 + }
44472 +
44473 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
44474 + PAGE_SIZE)
44475 + kfree(subj_map_set.s_hash);
44476 + else
44477 + vfree(subj_map_set.s_hash);
44478 + }
44479 +
44480 + return;
44481 +}
44482 +
44483 +static void
44484 +free_variables(void)
44485 +{
44486 + struct acl_subject_label *s;
44487 + struct acl_role_label *r;
44488 + struct task_struct *task, *task2;
44489 + unsigned int x;
44490 +
44491 + gr_clear_learn_entries();
44492 +
44493 + read_lock(&tasklist_lock);
44494 + do_each_thread(task2, task) {
44495 + task->acl_sp_role = 0;
44496 + task->acl_role_id = 0;
44497 + task->acl = NULL;
44498 + task->role = NULL;
44499 + } while_each_thread(task2, task);
44500 + read_unlock(&tasklist_lock);
44501 +
44502 + /* release the reference to the real root dentry and vfsmount */
44503 + if (real_root)
44504 + dput(real_root);
44505 + real_root = NULL;
44506 + if (real_root_mnt)
44507 + mntput(real_root_mnt);
44508 + real_root_mnt = NULL;
44509 +
44510 + /* free all object hash tables */
44511 +
44512 + FOR_EACH_ROLE_START(r)
44513 + if (r->subj_hash == NULL)
44514 + goto next_role;
44515 + FOR_EACH_SUBJECT_START(r, s, x)
44516 + if (s->obj_hash == NULL)
44517 + break;
44518 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
44519 + kfree(s->obj_hash);
44520 + else
44521 + vfree(s->obj_hash);
44522 + FOR_EACH_SUBJECT_END(s, x)
44523 + FOR_EACH_NESTED_SUBJECT_START(r, s)
44524 + if (s->obj_hash == NULL)
44525 + break;
44526 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
44527 + kfree(s->obj_hash);
44528 + else
44529 + vfree(s->obj_hash);
44530 + FOR_EACH_NESTED_SUBJECT_END(s)
44531 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
44532 + kfree(r->subj_hash);
44533 + else
44534 + vfree(r->subj_hash);
44535 + r->subj_hash = NULL;
44536 +next_role:
44537 + FOR_EACH_ROLE_END(r)
44538 +
44539 + acl_free_all();
44540 +
44541 + if (acl_role_set.r_hash) {
44542 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
44543 + PAGE_SIZE)
44544 + kfree(acl_role_set.r_hash);
44545 + else
44546 + vfree(acl_role_set.r_hash);
44547 + }
44548 + if (name_set.n_hash) {
44549 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
44550 + PAGE_SIZE)
44551 + kfree(name_set.n_hash);
44552 + else
44553 + vfree(name_set.n_hash);
44554 + }
44555 +
44556 + if (inodev_set.i_hash) {
44557 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
44558 + PAGE_SIZE)
44559 + kfree(inodev_set.i_hash);
44560 + else
44561 + vfree(inodev_set.i_hash);
44562 + }
44563 +
44564 + gr_free_uidset();
44565 +
44566 + memset(&name_set, 0, sizeof (struct name_db));
44567 + memset(&inodev_set, 0, sizeof (struct inodev_db));
44568 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
44569 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
44570 +
44571 + default_role = NULL;
44572 + role_list = NULL;
44573 +
44574 + return;
44575 +}
44576 +
44577 +static __u32
44578 +count_user_objs(struct acl_object_label *userp)
44579 +{
44580 + struct acl_object_label o_tmp;
44581 + __u32 num = 0;
44582 +
44583 + while (userp) {
44584 + if (copy_from_user(&o_tmp, userp,
44585 + sizeof (struct acl_object_label)))
44586 + break;
44587 +
44588 + userp = o_tmp.prev;
44589 + num++;
44590 + }
44591 +
44592 + return num;
44593 +}
44594 +
44595 +static struct acl_subject_label *
44596 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
44597 +
44598 +static int
44599 +copy_user_glob(struct acl_object_label *obj)
44600 +{
44601 + struct acl_object_label *g_tmp, **guser;
44602 + unsigned int len;
44603 + char *tmp;
44604 +
44605 + if (obj->globbed == NULL)
44606 + return 0;
44607 +
44608 + guser = &obj->globbed;
44609 + while (*guser) {
44610 + g_tmp = (struct acl_object_label *)
44611 + acl_alloc(sizeof (struct acl_object_label));
44612 + if (g_tmp == NULL)
44613 + return -ENOMEM;
44614 +
44615 + if (copy_from_user(g_tmp, *guser,
44616 + sizeof (struct acl_object_label)))
44617 + return -EFAULT;
44618 +
44619 + len = strnlen_user(g_tmp->filename, PATH_MAX);
44620 +
44621 + if (!len || len >= PATH_MAX)
44622 + return -EINVAL;
44623 +
44624 + if ((tmp = (char *) acl_alloc(len)) == NULL)
44625 + return -ENOMEM;
44626 +
44627 + if (copy_from_user(tmp, g_tmp->filename, len))
44628 + return -EFAULT;
44629 + tmp[len-1] = '\0';
44630 + g_tmp->filename = tmp;
44631 +
44632 + *guser = g_tmp;
44633 + guser = &(g_tmp->next);
44634 + }
44635 +
44636 + return 0;
44637 +}
44638 +
44639 +static int
44640 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
44641 + struct acl_role_label *role)
44642 +{
44643 + struct acl_object_label *o_tmp;
44644 + unsigned int len;
44645 + int ret;
44646 + char *tmp;
44647 +
44648 + while (userp) {
44649 + if ((o_tmp = (struct acl_object_label *)
44650 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
44651 + return -ENOMEM;
44652 +
44653 + if (copy_from_user(o_tmp, userp,
44654 + sizeof (struct acl_object_label)))
44655 + return -EFAULT;
44656 +
44657 + userp = o_tmp->prev;
44658 +
44659 + len = strnlen_user(o_tmp->filename, PATH_MAX);
44660 +
44661 + if (!len || len >= PATH_MAX)
44662 + return -EINVAL;
44663 +
44664 + if ((tmp = (char *) acl_alloc(len)) == NULL)
44665 + return -ENOMEM;
44666 +
44667 + if (copy_from_user(tmp, o_tmp->filename, len))
44668 + return -EFAULT;
44669 + tmp[len-1] = '\0';
44670 + o_tmp->filename = tmp;
44671 +
44672 + insert_acl_obj_label(o_tmp, subj);
44673 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
44674 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
44675 + return -ENOMEM;
44676 +
44677 + ret = copy_user_glob(o_tmp);
44678 + if (ret)
44679 + return ret;
44680 +
44681 + if (o_tmp->nested) {
44682 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
44683 + if (IS_ERR(o_tmp->nested))
44684 + return PTR_ERR(o_tmp->nested);
44685 +
44686 + /* insert into nested subject list */
44687 + o_tmp->nested->next = role->hash->first;
44688 + role->hash->first = o_tmp->nested;
44689 + }
44690 + }
44691 +
44692 + return 0;
44693 +}
44694 +
44695 +static __u32
44696 +count_user_subjs(struct acl_subject_label *userp)
44697 +{
44698 + struct acl_subject_label s_tmp;
44699 + __u32 num = 0;
44700 +
44701 + while (userp) {
44702 + if (copy_from_user(&s_tmp, userp,
44703 + sizeof (struct acl_subject_label)))
44704 + break;
44705 +
44706 + userp = s_tmp.prev;
44707 + /* do not count nested subjects against this count, since
44708 + they are not included in the hash table, but are
44709 + attached to objects. We have already counted
44710 + the subjects in userspace for the allocation
44711 + stack
44712 + */
44713 + if (!(s_tmp.mode & GR_NESTED))
44714 + num++;
44715 + }
44716 +
44717 + return num;
44718 +}
44719 +
44720 +static int
44721 +copy_user_allowedips(struct acl_role_label *rolep)
44722 +{
44723 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
44724 +
44725 + ruserip = rolep->allowed_ips;
44726 +
44727 + while (ruserip) {
44728 + rlast = rtmp;
44729 +
44730 + if ((rtmp = (struct role_allowed_ip *)
44731 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
44732 + return -ENOMEM;
44733 +
44734 + if (copy_from_user(rtmp, ruserip,
44735 + sizeof (struct role_allowed_ip)))
44736 + return -EFAULT;
44737 +
44738 + ruserip = rtmp->prev;
44739 +
44740 + if (!rlast) {
44741 + rtmp->prev = NULL;
44742 + rolep->allowed_ips = rtmp;
44743 + } else {
44744 + rlast->next = rtmp;
44745 + rtmp->prev = rlast;
44746 + }
44747 +
44748 + if (!ruserip)
44749 + rtmp->next = NULL;
44750 + }
44751 +
44752 + return 0;
44753 +}
44754 +
44755 +static int
44756 +copy_user_transitions(struct acl_role_label *rolep)
44757 +{
44758 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
44759 +
44760 + unsigned int len;
44761 + char *tmp;
44762 +
44763 + rusertp = rolep->transitions;
44764 +
44765 + while (rusertp) {
44766 + rlast = rtmp;
44767 +
44768 + if ((rtmp = (struct role_transition *)
44769 + acl_alloc(sizeof (struct role_transition))) == NULL)
44770 + return -ENOMEM;
44771 +
44772 + if (copy_from_user(rtmp, rusertp,
44773 + sizeof (struct role_transition)))
44774 + return -EFAULT;
44775 +
44776 + rusertp = rtmp->prev;
44777 +
44778 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
44779 +
44780 + if (!len || len >= GR_SPROLE_LEN)
44781 + return -EINVAL;
44782 +
44783 + if ((tmp = (char *) acl_alloc(len)) == NULL)
44784 + return -ENOMEM;
44785 +
44786 + if (copy_from_user(tmp, rtmp->rolename, len))
44787 + return -EFAULT;
44788 + tmp[len-1] = '\0';
44789 + rtmp->rolename = tmp;
44790 +
44791 + if (!rlast) {
44792 + rtmp->prev = NULL;
44793 + rolep->transitions = rtmp;
44794 + } else {
44795 + rlast->next = rtmp;
44796 + rtmp->prev = rlast;
44797 + }
44798 +
44799 + if (!rusertp)
44800 + rtmp->next = NULL;
44801 + }
44802 +
44803 + return 0;
44804 +}
44805 +
44806 +static struct acl_subject_label *
44807 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
44808 +{
44809 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
44810 + unsigned int len;
44811 + char *tmp;
44812 + __u32 num_objs;
44813 + struct acl_ip_label **i_tmp, *i_utmp2;
44814 + struct gr_hash_struct ghash;
44815 + struct subject_map *subjmap;
44816 + unsigned int i_num;
44817 + int err;
44818 +
44819 + s_tmp = lookup_subject_map(userp);
44820 +
44821 + /* we've already copied this subject into the kernel, just return
44822 + the reference to it, and don't copy it over again
44823 + */
44824 + if (s_tmp)
44825 + return(s_tmp);
44826 +
44827 + if ((s_tmp = (struct acl_subject_label *)
44828 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
44829 + return ERR_PTR(-ENOMEM);
44830 +
44831 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
44832 + if (subjmap == NULL)
44833 + return ERR_PTR(-ENOMEM);
44834 +
44835 + subjmap->user = userp;
44836 + subjmap->kernel = s_tmp;
44837 + insert_subj_map_entry(subjmap);
44838 +
44839 + if (copy_from_user(s_tmp, userp,
44840 + sizeof (struct acl_subject_label)))
44841 + return ERR_PTR(-EFAULT);
44842 +
44843 + len = strnlen_user(s_tmp->filename, PATH_MAX);
44844 +
44845 + if (!len || len >= PATH_MAX)
44846 + return ERR_PTR(-EINVAL);
44847 +
44848 + if ((tmp = (char *) acl_alloc(len)) == NULL)
44849 + return ERR_PTR(-ENOMEM);
44850 +
44851 + if (copy_from_user(tmp, s_tmp->filename, len))
44852 + return ERR_PTR(-EFAULT);
44853 + tmp[len-1] = '\0';
44854 + s_tmp->filename = tmp;
44855 +
44856 + if (!strcmp(s_tmp->filename, "/"))
44857 + role->root_label = s_tmp;
44858 +
44859 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
44860 + return ERR_PTR(-EFAULT);
44861 +
44862 + /* copy user and group transition tables */
44863 +
44864 + if (s_tmp->user_trans_num) {
44865 + uid_t *uidlist;
44866 +
44867 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
44868 + if (uidlist == NULL)
44869 + return ERR_PTR(-ENOMEM);
44870 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
44871 + return ERR_PTR(-EFAULT);
44872 +
44873 + s_tmp->user_transitions = uidlist;
44874 + }
44875 +
44876 + if (s_tmp->group_trans_num) {
44877 + gid_t *gidlist;
44878 +
44879 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
44880 + if (gidlist == NULL)
44881 + return ERR_PTR(-ENOMEM);
44882 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
44883 + return ERR_PTR(-EFAULT);
44884 +
44885 + s_tmp->group_transitions = gidlist;
44886 + }
44887 +
44888 + /* set up object hash table */
44889 + num_objs = count_user_objs(ghash.first);
44890 +
44891 + s_tmp->obj_hash_size = num_objs;
44892 + s_tmp->obj_hash =
44893 + (struct acl_object_label **)
44894 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
44895 +
44896 + if (!s_tmp->obj_hash)
44897 + return ERR_PTR(-ENOMEM);
44898 +
44899 + memset(s_tmp->obj_hash, 0,
44900 + s_tmp->obj_hash_size *
44901 + sizeof (struct acl_object_label *));
44902 +
44903 + /* add in objects */
44904 + err = copy_user_objs(ghash.first, s_tmp, role);
44905 +
44906 + if (err)
44907 + return ERR_PTR(err);
44908 +
44909 + /* set pointer for parent subject */
44910 + if (s_tmp->parent_subject) {
44911 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
44912 +
44913 + if (IS_ERR(s_tmp2))
44914 + return s_tmp2;
44915 +
44916 + s_tmp->parent_subject = s_tmp2;
44917 + }
44918 +
44919 + /* add in ip acls */
44920 +
44921 + if (!s_tmp->ip_num) {
44922 + s_tmp->ips = NULL;
44923 + goto insert;
44924 + }
44925 +
44926 + i_tmp =
44927 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
44928 + sizeof (struct acl_ip_label *));
44929 +
44930 + if (!i_tmp)
44931 + return ERR_PTR(-ENOMEM);
44932 +
44933 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
44934 + *(i_tmp + i_num) =
44935 + (struct acl_ip_label *)
44936 + acl_alloc(sizeof (struct acl_ip_label));
44937 + if (!*(i_tmp + i_num))
44938 + return ERR_PTR(-ENOMEM);
44939 +
44940 + if (copy_from_user
44941 + (&i_utmp2, s_tmp->ips + i_num,
44942 + sizeof (struct acl_ip_label *)))
44943 + return ERR_PTR(-EFAULT);
44944 +
44945 + if (copy_from_user
44946 + (*(i_tmp + i_num), i_utmp2,
44947 + sizeof (struct acl_ip_label)))
44948 + return ERR_PTR(-EFAULT);
44949 +
44950 + if ((*(i_tmp + i_num))->iface == NULL)
44951 + continue;
44952 +
44953 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
44954 + if (!len || len >= IFNAMSIZ)
44955 + return ERR_PTR(-EINVAL);
44956 + tmp = acl_alloc(len);
44957 + if (tmp == NULL)
44958 + return ERR_PTR(-ENOMEM);
44959 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
44960 + return ERR_PTR(-EFAULT);
44961 + (*(i_tmp + i_num))->iface = tmp;
44962 + }
44963 +
44964 + s_tmp->ips = i_tmp;
44965 +
44966 +insert:
44967 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
44968 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
44969 + return ERR_PTR(-ENOMEM);
44970 +
44971 + return s_tmp;
44972 +}
44973 +
44974 +static int
44975 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
44976 +{
44977 + struct acl_subject_label s_pre;
44978 + struct acl_subject_label * ret;
44979 + int err;
44980 +
44981 + while (userp) {
44982 + if (copy_from_user(&s_pre, userp,
44983 + sizeof (struct acl_subject_label)))
44984 + return -EFAULT;
44985 +
44986 + /* do not add nested subjects here, add
44987 + while parsing objects
44988 + */
44989 +
44990 + if (s_pre.mode & GR_NESTED) {
44991 + userp = s_pre.prev;
44992 + continue;
44993 + }
44994 +
44995 + ret = do_copy_user_subj(userp, role);
44996 +
44997 + err = PTR_ERR(ret);
44998 + if (IS_ERR(ret))
44999 + return err;
45000 +
45001 + insert_acl_subj_label(ret, role);
45002 +
45003 + userp = s_pre.prev;
45004 + }
45005 +
45006 + return 0;
45007 +}
45008 +
45009 +static int
45010 +copy_user_acl(struct gr_arg *arg)
45011 +{
45012 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
45013 + struct sprole_pw *sptmp;
45014 + struct gr_hash_struct *ghash;
45015 + uid_t *domainlist;
45016 + unsigned int r_num;
45017 + unsigned int len;
45018 + char *tmp;
45019 + int err = 0;
45020 + __u16 i;
45021 + __u32 num_subjs;
45022 +
45023 + /* we need a default and kernel role */
45024 + if (arg->role_db.num_roles < 2)
45025 + return -EINVAL;
45026 +
45027 + /* copy special role authentication info from userspace */
45028 +
45029 + num_sprole_pws = arg->num_sprole_pws;
45030 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
45031 +
45032 + if (!acl_special_roles) {
45033 + err = -ENOMEM;
45034 + goto cleanup;
45035 + }
45036 +
45037 + for (i = 0; i < num_sprole_pws; i++) {
45038 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
45039 + if (!sptmp) {
45040 + err = -ENOMEM;
45041 + goto cleanup;
45042 + }
45043 + if (copy_from_user(sptmp, arg->sprole_pws + i,
45044 + sizeof (struct sprole_pw))) {
45045 + err = -EFAULT;
45046 + goto cleanup;
45047 + }
45048 +
45049 + len =
45050 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
45051 +
45052 + if (!len || len >= GR_SPROLE_LEN) {
45053 + err = -EINVAL;
45054 + goto cleanup;
45055 + }
45056 +
45057 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
45058 + err = -ENOMEM;
45059 + goto cleanup;
45060 + }
45061 +
45062 + if (copy_from_user(tmp, sptmp->rolename, len)) {
45063 + err = -EFAULT;
45064 + goto cleanup;
45065 + }
45066 + tmp[len-1] = '\0';
45067 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
45068 + printk(KERN_ALERT "Copying special role %s\n", tmp);
45069 +#endif
45070 + sptmp->rolename = tmp;
45071 + acl_special_roles[i] = sptmp;
45072 + }
45073 +
45074 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
45075 +
45076 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
45077 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
45078 +
45079 + if (!r_tmp) {
45080 + err = -ENOMEM;
45081 + goto cleanup;
45082 + }
45083 +
45084 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
45085 + sizeof (struct acl_role_label *))) {
45086 + err = -EFAULT;
45087 + goto cleanup;
45088 + }
45089 +
45090 + if (copy_from_user(r_tmp, r_utmp2,
45091 + sizeof (struct acl_role_label))) {
45092 + err = -EFAULT;
45093 + goto cleanup;
45094 + }
45095 +
45096 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
45097 +
45098 + if (!len || len >= PATH_MAX) {
45099 + err = -EINVAL;
45100 + goto cleanup;
45101 + }
45102 +
45103 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
45104 + err = -ENOMEM;
45105 + goto cleanup;
45106 + }
45107 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
45108 + err = -EFAULT;
45109 + goto cleanup;
45110 + }
45111 + tmp[len-1] = '\0';
45112 + r_tmp->rolename = tmp;
45113 +
45114 + if (!strcmp(r_tmp->rolename, "default")
45115 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
45116 + default_role = r_tmp;
45117 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
45118 + kernel_role = r_tmp;
45119 + }
45120 +
45121 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
45122 + err = -ENOMEM;
45123 + goto cleanup;
45124 + }
45125 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
45126 + err = -EFAULT;
45127 + goto cleanup;
45128 + }
45129 +
45130 + r_tmp->hash = ghash;
45131 +
45132 + num_subjs = count_user_subjs(r_tmp->hash->first);
45133 +
45134 + r_tmp->subj_hash_size = num_subjs;
45135 + r_tmp->subj_hash =
45136 + (struct acl_subject_label **)
45137 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
45138 +
45139 + if (!r_tmp->subj_hash) {
45140 + err = -ENOMEM;
45141 + goto cleanup;
45142 + }
45143 +
45144 + err = copy_user_allowedips(r_tmp);
45145 + if (err)
45146 + goto cleanup;
45147 +
45148 + /* copy domain info */
45149 + if (r_tmp->domain_children != NULL) {
45150 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
45151 + if (domainlist == NULL) {
45152 + err = -ENOMEM;
45153 + goto cleanup;
45154 + }
45155 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
45156 + err = -EFAULT;
45157 + goto cleanup;
45158 + }
45159 + r_tmp->domain_children = domainlist;
45160 + }
45161 +
45162 + err = copy_user_transitions(r_tmp);
45163 + if (err)
45164 + goto cleanup;
45165 +
45166 + memset(r_tmp->subj_hash, 0,
45167 + r_tmp->subj_hash_size *
45168 + sizeof (struct acl_subject_label *));
45169 +
45170 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
45171 +
45172 + if (err)
45173 + goto cleanup;
45174 +
45175 + /* set nested subject list to null */
45176 + r_tmp->hash->first = NULL;
45177 +
45178 + insert_acl_role_label(r_tmp);
45179 + }
45180 +
45181 + goto return_err;
45182 + cleanup:
45183 + free_variables();
45184 + return_err:
45185 + return err;
45186 +
45187 +}
45188 +
45189 +static int
45190 +gracl_init(struct gr_arg *args)
45191 +{
45192 + int error = 0;
45193 +
45194 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
45195 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
45196 +
45197 + if (init_variables(args)) {
45198 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
45199 + error = -ENOMEM;
45200 + free_variables();
45201 + goto out;
45202 + }
45203 +
45204 + error = copy_user_acl(args);
45205 + free_init_variables();
45206 + if (error) {
45207 + free_variables();
45208 + goto out;
45209 + }
45210 +
45211 + if ((error = gr_set_acls(0))) {
45212 + free_variables();
45213 + goto out;
45214 + }
45215 +
45216 + pax_open_kernel();
45217 + gr_status |= GR_READY;
45218 + pax_close_kernel();
45219 +
45220 + out:
45221 + return error;
45222 +}
45223 +
45224 +/* derived from glibc fnmatch() 0: match, 1: no match*/
45225 +
45226 +static int
45227 +glob_match(const char *p, const char *n)
45228 +{
45229 + char c;
45230 +
45231 + while ((c = *p++) != '\0') {
45232 + switch (c) {
45233 + case '?':
45234 + if (*n == '\0')
45235 + return 1;
45236 + else if (*n == '/')
45237 + return 1;
45238 + break;
45239 + case '\\':
45240 + if (*n != c)
45241 + return 1;
45242 + break;
45243 + case '*':
45244 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
45245 + if (*n == '/')
45246 + return 1;
45247 + else if (c == '?') {
45248 + if (*n == '\0')
45249 + return 1;
45250 + else
45251 + ++n;
45252 + }
45253 + }
45254 + if (c == '\0') {
45255 + return 0;
45256 + } else {
45257 + const char *endp;
45258 +
45259 + if ((endp = strchr(n, '/')) == NULL)
45260 + endp = n + strlen(n);
45261 +
45262 + if (c == '[') {
45263 + for (--p; n < endp; ++n)
45264 + if (!glob_match(p, n))
45265 + return 0;
45266 + } else if (c == '/') {
45267 + while (*n != '\0' && *n != '/')
45268 + ++n;
45269 + if (*n == '/' && !glob_match(p, n + 1))
45270 + return 0;
45271 + } else {
45272 + for (--p; n < endp; ++n)
45273 + if (*n == c && !glob_match(p, n))
45274 + return 0;
45275 + }
45276 +
45277 + return 1;
45278 + }
45279 + case '[':
45280 + {
45281 + int not;
45282 + char cold;
45283 +
45284 + if (*n == '\0' || *n == '/')
45285 + return 1;
45286 +
45287 + not = (*p == '!' || *p == '^');
45288 + if (not)
45289 + ++p;
45290 +
45291 + c = *p++;
45292 + for (;;) {
45293 + unsigned char fn = (unsigned char)*n;
45294 +
45295 + if (c == '\0')
45296 + return 1;
45297 + else {
45298 + if (c == fn)
45299 + goto matched;
45300 + cold = c;
45301 + c = *p++;
45302 +
45303 + if (c == '-' && *p != ']') {
45304 + unsigned char cend = *p++;
45305 +
45306 + if (cend == '\0')
45307 + return 1;
45308 +
45309 + if (cold <= fn && fn <= cend)
45310 + goto matched;
45311 +
45312 + c = *p++;
45313 + }
45314 + }
45315 +
45316 + if (c == ']')
45317 + break;
45318 + }
45319 + if (!not)
45320 + return 1;
45321 + break;
45322 + matched:
45323 + while (c != ']') {
45324 + if (c == '\0')
45325 + return 1;
45326 +
45327 + c = *p++;
45328 + }
45329 + if (not)
45330 + return 1;
45331 + }
45332 + break;
45333 + default:
45334 + if (c != *n)
45335 + return 1;
45336 + }
45337 +
45338 + ++n;
45339 + }
45340 +
45341 + if (*n == '\0')
45342 + return 0;
45343 +
45344 + if (*n == '/')
45345 + return 0;
45346 +
45347 + return 1;
45348 +}
45349 +
45350 +static struct acl_object_label *
45351 +chk_glob_label(struct acl_object_label *globbed,
45352 + struct dentry *dentry, struct vfsmount *mnt, char **path)
45353 +{
45354 + struct acl_object_label *tmp;
45355 +
45356 + if (*path == NULL)
45357 + *path = gr_to_filename_nolock(dentry, mnt);
45358 +
45359 + tmp = globbed;
45360 +
45361 + while (tmp) {
45362 + if (!glob_match(tmp->filename, *path))
45363 + return tmp;
45364 + tmp = tmp->next;
45365 + }
45366 +
45367 + return NULL;
45368 +}
45369 +
45370 +static struct acl_object_label *
45371 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
45372 + const ino_t curr_ino, const dev_t curr_dev,
45373 + const struct acl_subject_label *subj, char **path, const int checkglob)
45374 +{
45375 + struct acl_subject_label *tmpsubj;
45376 + struct acl_object_label *retval;
45377 + struct acl_object_label *retval2;
45378 +
45379 + tmpsubj = (struct acl_subject_label *) subj;
45380 + read_lock(&gr_inode_lock);
45381 + do {
45382 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
45383 + if (retval) {
45384 + if (checkglob && retval->globbed) {
45385 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
45386 + (struct vfsmount *)orig_mnt, path);
45387 + if (retval2)
45388 + retval = retval2;
45389 + }
45390 + break;
45391 + }
45392 + } while ((tmpsubj = tmpsubj->parent_subject));
45393 + read_unlock(&gr_inode_lock);
45394 +
45395 + return retval;
45396 +}
45397 +
45398 +static __inline__ struct acl_object_label *
45399 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
45400 + const struct dentry *curr_dentry,
45401 + const struct acl_subject_label *subj, char **path, const int checkglob)
45402 +{
45403 + int newglob = checkglob;
45404 +
45405 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
45406 + as we don't want a / * rule to match instead of the / object
45407 + don't do this for create lookups that call this function though, since they're looking up
45408 + on the parent and thus need globbing checks on all paths
45409 + */
45410 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
45411 + newglob = GR_NO_GLOB;
45412 +
45413 + return __full_lookup(orig_dentry, orig_mnt,
45414 + curr_dentry->d_inode->i_ino,
45415 + __get_dev(curr_dentry), subj, path, newglob);
45416 +}
45417 +
45418 +static struct acl_object_label *
45419 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45420 + const struct acl_subject_label *subj, char *path, const int checkglob)
45421 +{
45422 + struct dentry *dentry = (struct dentry *) l_dentry;
45423 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
45424 + struct acl_object_label *retval;
45425 +
45426 + spin_lock(&dcache_lock);
45427 + spin_lock(&vfsmount_lock);
45428 +
45429 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
45430 +#ifdef CONFIG_NET
45431 + mnt == sock_mnt ||
45432 +#endif
45433 +#ifdef CONFIG_HUGETLBFS
45434 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
45435 +#endif
45436 + /* ignore Eric Biederman */
45437 + IS_PRIVATE(l_dentry->d_inode))) {
45438 + retval = fakefs_obj;
45439 + goto out;
45440 + }
45441 +
45442 + for (;;) {
45443 + if (dentry == real_root && mnt == real_root_mnt)
45444 + break;
45445 +
45446 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
45447 + if (mnt->mnt_parent == mnt)
45448 + break;
45449 +
45450 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
45451 + if (retval != NULL)
45452 + goto out;
45453 +
45454 + dentry = mnt->mnt_mountpoint;
45455 + mnt = mnt->mnt_parent;
45456 + continue;
45457 + }
45458 +
45459 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
45460 + if (retval != NULL)
45461 + goto out;
45462 +
45463 + dentry = dentry->d_parent;
45464 + }
45465 +
45466 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
45467 +
45468 + if (retval == NULL)
45469 + retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
45470 +out:
45471 + spin_unlock(&vfsmount_lock);
45472 + spin_unlock(&dcache_lock);
45473 +
45474 + BUG_ON(retval == NULL);
45475 +
45476 + return retval;
45477 +}
45478 +
45479 +static __inline__ struct acl_object_label *
45480 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45481 + const struct acl_subject_label *subj)
45482 +{
45483 + char *path = NULL;
45484 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
45485 +}
45486 +
45487 +static __inline__ struct acl_object_label *
45488 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45489 + const struct acl_subject_label *subj)
45490 +{
45491 + char *path = NULL;
45492 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
45493 +}
45494 +
45495 +static __inline__ struct acl_object_label *
45496 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45497 + const struct acl_subject_label *subj, char *path)
45498 +{
45499 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
45500 +}
45501 +
45502 +static struct acl_subject_label *
45503 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45504 + const struct acl_role_label *role)
45505 +{
45506 + struct dentry *dentry = (struct dentry *) l_dentry;
45507 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
45508 + struct acl_subject_label *retval;
45509 +
45510 + spin_lock(&dcache_lock);
45511 + spin_lock(&vfsmount_lock);
45512 +
45513 + for (;;) {
45514 + if (dentry == real_root && mnt == real_root_mnt)
45515 + break;
45516 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
45517 + if (mnt->mnt_parent == mnt)
45518 + break;
45519 +
45520 + read_lock(&gr_inode_lock);
45521 + retval =
45522 + lookup_acl_subj_label(dentry->d_inode->i_ino,
45523 + __get_dev(dentry), role);
45524 + read_unlock(&gr_inode_lock);
45525 + if (retval != NULL)
45526 + goto out;
45527 +
45528 + dentry = mnt->mnt_mountpoint;
45529 + mnt = mnt->mnt_parent;
45530 + continue;
45531 + }
45532 +
45533 + read_lock(&gr_inode_lock);
45534 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
45535 + __get_dev(dentry), role);
45536 + read_unlock(&gr_inode_lock);
45537 + if (retval != NULL)
45538 + goto out;
45539 +
45540 + dentry = dentry->d_parent;
45541 + }
45542 +
45543 + read_lock(&gr_inode_lock);
45544 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
45545 + __get_dev(dentry), role);
45546 + read_unlock(&gr_inode_lock);
45547 +
45548 + if (unlikely(retval == NULL)) {
45549 + read_lock(&gr_inode_lock);
45550 + retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
45551 + __get_dev(real_root), role);
45552 + read_unlock(&gr_inode_lock);
45553 + }
45554 +out:
45555 + spin_unlock(&vfsmount_lock);
45556 + spin_unlock(&dcache_lock);
45557 +
45558 + BUG_ON(retval == NULL);
45559 +
45560 + return retval;
45561 +}
45562 +
45563 +static void
45564 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
45565 +{
45566 + struct task_struct *task = current;
45567 + const struct cred *cred = current_cred();
45568 +
45569 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
45570 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
45571 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
45572 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
45573 +
45574 + return;
45575 +}
45576 +
45577 +static void
45578 +gr_log_learn_sysctl(const char *path, const __u32 mode)
45579 +{
45580 + struct task_struct *task = current;
45581 + const struct cred *cred = current_cred();
45582 +
45583 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
45584 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
45585 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
45586 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
45587 +
45588 + return;
45589 +}
45590 +
45591 +static void
45592 +gr_log_learn_id_change(const char type, const unsigned int real,
45593 + const unsigned int effective, const unsigned int fs)
45594 +{
45595 + struct task_struct *task = current;
45596 + const struct cred *cred = current_cred();
45597 +
45598 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
45599 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
45600 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
45601 + type, real, effective, fs, &task->signal->saved_ip);
45602 +
45603 + return;
45604 +}
45605 +
45606 +__u32
45607 +gr_check_link(const struct dentry * new_dentry,
45608 + const struct dentry * parent_dentry,
45609 + const struct vfsmount * parent_mnt,
45610 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
45611 +{
45612 + struct acl_object_label *obj;
45613 + __u32 oldmode, newmode;
45614 + __u32 needmode;
45615 +
45616 + if (unlikely(!(gr_status & GR_READY)))
45617 + return (GR_CREATE | GR_LINK);
45618 +
45619 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
45620 + oldmode = obj->mode;
45621 +
45622 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
45623 + oldmode |= (GR_CREATE | GR_LINK);
45624 +
45625 + needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
45626 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
45627 + needmode |= GR_SETID | GR_AUDIT_SETID;
45628 +
45629 + newmode =
45630 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
45631 + oldmode | needmode);
45632 +
45633 + needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
45634 + GR_SETID | GR_READ | GR_FIND | GR_DELETE |
45635 + GR_INHERIT | GR_AUDIT_INHERIT);
45636 +
45637 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
45638 + goto bad;
45639 +
45640 + if ((oldmode & needmode) != needmode)
45641 + goto bad;
45642 +
45643 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
45644 + if ((newmode & needmode) != needmode)
45645 + goto bad;
45646 +
45647 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
45648 + return newmode;
45649 +bad:
45650 + needmode = oldmode;
45651 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
45652 + needmode |= GR_SETID;
45653 +
45654 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
45655 + gr_log_learn(old_dentry, old_mnt, needmode);
45656 + return (GR_CREATE | GR_LINK);
45657 + } else if (newmode & GR_SUPPRESS)
45658 + return GR_SUPPRESS;
45659 + else
45660 + return 0;
45661 +}
45662 +
45663 +__u32
45664 +gr_search_file(const struct dentry * dentry, const __u32 mode,
45665 + const struct vfsmount * mnt)
45666 +{
45667 + __u32 retval = mode;
45668 + struct acl_subject_label *curracl;
45669 + struct acl_object_label *currobj;
45670 +
45671 + if (unlikely(!(gr_status & GR_READY)))
45672 + return (mode & ~GR_AUDITS);
45673 +
45674 + curracl = current->acl;
45675 +
45676 + currobj = chk_obj_label(dentry, mnt, curracl);
45677 + retval = currobj->mode & mode;
45678 +
45679 + /* if we're opening a specified transfer file for writing
45680 + (e.g. /dev/initctl), then transfer our role to init
45681 + */
45682 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
45683 + current->role->roletype & GR_ROLE_PERSIST)) {
45684 + struct task_struct *task = init_pid_ns.child_reaper;
45685 +
45686 + if (task->role != current->role) {
45687 + task->acl_sp_role = 0;
45688 + task->acl_role_id = current->acl_role_id;
45689 + task->role = current->role;
45690 + rcu_read_lock();
45691 + read_lock(&grsec_exec_file_lock);
45692 + gr_apply_subject_to_task(task);
45693 + read_unlock(&grsec_exec_file_lock);
45694 + rcu_read_unlock();
45695 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
45696 + }
45697 + }
45698 +
45699 + if (unlikely
45700 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
45701 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
45702 + __u32 new_mode = mode;
45703 +
45704 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
45705 +
45706 + retval = new_mode;
45707 +
45708 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
45709 + new_mode |= GR_INHERIT;
45710 +
45711 + if (!(mode & GR_NOLEARN))
45712 + gr_log_learn(dentry, mnt, new_mode);
45713 + }
45714 +
45715 + return retval;
45716 +}
45717 +
45718 +__u32
45719 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
45720 + const struct vfsmount * mnt, const __u32 mode)
45721 +{
45722 + struct name_entry *match;
45723 + struct acl_object_label *matchpo;
45724 + struct acl_subject_label *curracl;
45725 + char *path;
45726 + __u32 retval;
45727 +
45728 + if (unlikely(!(gr_status & GR_READY)))
45729 + return (mode & ~GR_AUDITS);
45730 +
45731 + preempt_disable();
45732 + path = gr_to_filename_rbac(new_dentry, mnt);
45733 + match = lookup_name_entry_create(path);
45734 +
45735 + if (!match)
45736 + goto check_parent;
45737 +
45738 + curracl = current->acl;
45739 +
45740 + read_lock(&gr_inode_lock);
45741 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
45742 + read_unlock(&gr_inode_lock);
45743 +
45744 + if (matchpo) {
45745 + if ((matchpo->mode & mode) !=
45746 + (mode & ~(GR_AUDITS | GR_SUPPRESS))
45747 + && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
45748 + __u32 new_mode = mode;
45749 +
45750 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
45751 +
45752 + gr_log_learn(new_dentry, mnt, new_mode);
45753 +
45754 + preempt_enable();
45755 + return new_mode;
45756 + }
45757 + preempt_enable();
45758 + return (matchpo->mode & mode);
45759 + }
45760 +
45761 + check_parent:
45762 + curracl = current->acl;
45763 +
45764 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
45765 + retval = matchpo->mode & mode;
45766 +
45767 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
45768 + && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
45769 + __u32 new_mode = mode;
45770 +
45771 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
45772 +
45773 + gr_log_learn(new_dentry, mnt, new_mode);
45774 + preempt_enable();
45775 + return new_mode;
45776 + }
45777 +
45778 + preempt_enable();
45779 + return retval;
45780 +}
45781 +
45782 +int
45783 +gr_check_hidden_task(const struct task_struct *task)
45784 +{
45785 + if (unlikely(!(gr_status & GR_READY)))
45786 + return 0;
45787 +
45788 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
45789 + return 1;
45790 +
45791 + return 0;
45792 +}
45793 +
45794 +int
45795 +gr_check_protected_task(const struct task_struct *task)
45796 +{
45797 + if (unlikely(!(gr_status & GR_READY) || !task))
45798 + return 0;
45799 +
45800 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
45801 + task->acl != current->acl)
45802 + return 1;
45803 +
45804 + return 0;
45805 +}
45806 +
45807 +int
45808 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
45809 +{
45810 + struct task_struct *p;
45811 + int ret = 0;
45812 +
45813 + if (unlikely(!(gr_status & GR_READY) || !pid))
45814 + return ret;
45815 +
45816 + read_lock(&tasklist_lock);
45817 + do_each_pid_task(pid, type, p) {
45818 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
45819 + p->acl != current->acl) {
45820 + ret = 1;
45821 + goto out;
45822 + }
45823 + } while_each_pid_task(pid, type, p);
45824 +out:
45825 + read_unlock(&tasklist_lock);
45826 +
45827 + return ret;
45828 +}
45829 +
45830 +void
45831 +gr_copy_label(struct task_struct *tsk)
45832 +{
45833 + tsk->signal->used_accept = 0;
45834 + tsk->acl_sp_role = 0;
45835 + tsk->acl_role_id = current->acl_role_id;
45836 + tsk->acl = current->acl;
45837 + tsk->role = current->role;
45838 + tsk->signal->curr_ip = current->signal->curr_ip;
45839 + tsk->signal->saved_ip = current->signal->saved_ip;
45840 + if (current->exec_file)
45841 + get_file(current->exec_file);
45842 + tsk->exec_file = current->exec_file;
45843 + tsk->is_writable = current->is_writable;
45844 + if (unlikely(current->signal->used_accept)) {
45845 + current->signal->curr_ip = 0;
45846 + current->signal->saved_ip = 0;
45847 + }
45848 +
45849 + return;
45850 +}
45851 +
45852 +static void
45853 +gr_set_proc_res(struct task_struct *task)
45854 +{
45855 + struct acl_subject_label *proc;
45856 + unsigned short i;
45857 +
45858 + proc = task->acl;
45859 +
45860 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
45861 + return;
45862 +
45863 + for (i = 0; i < RLIM_NLIMITS; i++) {
45864 + if (!(proc->resmask & (1 << i)))
45865 + continue;
45866 +
45867 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
45868 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
45869 + }
45870 +
45871 + return;
45872 +}
45873 +
45874 +extern int __gr_process_user_ban(struct user_struct *user);
45875 +
45876 +int
45877 +gr_check_user_change(int real, int effective, int fs)
45878 +{
45879 + unsigned int i;
45880 + __u16 num;
45881 + uid_t *uidlist;
45882 + int curuid;
45883 + int realok = 0;
45884 + int effectiveok = 0;
45885 + int fsok = 0;
45886 +
45887 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
45888 + struct user_struct *user;
45889 +
45890 + if (real == -1)
45891 + goto skipit;
45892 +
45893 + user = find_user(real);
45894 + if (user == NULL)
45895 + goto skipit;
45896 +
45897 + if (__gr_process_user_ban(user)) {
45898 + /* for find_user */
45899 + free_uid(user);
45900 + return 1;
45901 + }
45902 +
45903 + /* for find_user */
45904 + free_uid(user);
45905 +
45906 +skipit:
45907 +#endif
45908 +
45909 + if (unlikely(!(gr_status & GR_READY)))
45910 + return 0;
45911 +
45912 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
45913 + gr_log_learn_id_change('u', real, effective, fs);
45914 +
45915 + num = current->acl->user_trans_num;
45916 + uidlist = current->acl->user_transitions;
45917 +
45918 + if (uidlist == NULL)
45919 + return 0;
45920 +
45921 + if (real == -1)
45922 + realok = 1;
45923 + if (effective == -1)
45924 + effectiveok = 1;
45925 + if (fs == -1)
45926 + fsok = 1;
45927 +
45928 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
45929 + for (i = 0; i < num; i++) {
45930 + curuid = (int)uidlist[i];
45931 + if (real == curuid)
45932 + realok = 1;
45933 + if (effective == curuid)
45934 + effectiveok = 1;
45935 + if (fs == curuid)
45936 + fsok = 1;
45937 + }
45938 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
45939 + for (i = 0; i < num; i++) {
45940 + curuid = (int)uidlist[i];
45941 + if (real == curuid)
45942 + break;
45943 + if (effective == curuid)
45944 + break;
45945 + if (fs == curuid)
45946 + break;
45947 + }
45948 + /* not in deny list */
45949 + if (i == num) {
45950 + realok = 1;
45951 + effectiveok = 1;
45952 + fsok = 1;
45953 + }
45954 + }
45955 +
45956 + if (realok && effectiveok && fsok)
45957 + return 0;
45958 + else {
45959 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
45960 + return 1;
45961 + }
45962 +}
45963 +
45964 +int
45965 +gr_check_group_change(int real, int effective, int fs)
45966 +{
45967 + unsigned int i;
45968 + __u16 num;
45969 + gid_t *gidlist;
45970 + int curgid;
45971 + int realok = 0;
45972 + int effectiveok = 0;
45973 + int fsok = 0;
45974 +
45975 + if (unlikely(!(gr_status & GR_READY)))
45976 + return 0;
45977 +
45978 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
45979 + gr_log_learn_id_change('g', real, effective, fs);
45980 +
45981 + num = current->acl->group_trans_num;
45982 + gidlist = current->acl->group_transitions;
45983 +
45984 + if (gidlist == NULL)
45985 + return 0;
45986 +
45987 + if (real == -1)
45988 + realok = 1;
45989 + if (effective == -1)
45990 + effectiveok = 1;
45991 + if (fs == -1)
45992 + fsok = 1;
45993 +
45994 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
45995 + for (i = 0; i < num; i++) {
45996 + curgid = (int)gidlist[i];
45997 + if (real == curgid)
45998 + realok = 1;
45999 + if (effective == curgid)
46000 + effectiveok = 1;
46001 + if (fs == curgid)
46002 + fsok = 1;
46003 + }
46004 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
46005 + for (i = 0; i < num; i++) {
46006 + curgid = (int)gidlist[i];
46007 + if (real == curgid)
46008 + break;
46009 + if (effective == curgid)
46010 + break;
46011 + if (fs == curgid)
46012 + break;
46013 + }
46014 + /* not in deny list */
46015 + if (i == num) {
46016 + realok = 1;
46017 + effectiveok = 1;
46018 + fsok = 1;
46019 + }
46020 + }
46021 +
46022 + if (realok && effectiveok && fsok)
46023 + return 0;
46024 + else {
46025 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
46026 + return 1;
46027 + }
46028 +}
46029 +
46030 +void
46031 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
46032 +{
46033 + struct acl_role_label *role = task->role;
46034 + struct acl_subject_label *subj = NULL;
46035 + struct acl_object_label *obj;
46036 + struct file *filp;
46037 +
46038 + if (unlikely(!(gr_status & GR_READY)))
46039 + return;
46040 +
46041 + filp = task->exec_file;
46042 +
46043 + /* kernel process, we'll give them the kernel role */
46044 + if (unlikely(!filp)) {
46045 + task->role = kernel_role;
46046 + task->acl = kernel_role->root_label;
46047 + return;
46048 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
46049 + role = lookup_acl_role_label(task, uid, gid);
46050 +
46051 + /* perform subject lookup in possibly new role
46052 + we can use this result below in the case where role == task->role
46053 + */
46054 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
46055 +
46056 + /* if we changed uid/gid, but result in the same role
46057 + and are using inheritance, don't lose the inherited subject
46058 + if current subject is other than what normal lookup
46059 + would result in, we arrived via inheritance, don't
46060 + lose subject
46061 + */
46062 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
46063 + (subj == task->acl)))
46064 + task->acl = subj;
46065 +
46066 + task->role = role;
46067 +
46068 + task->is_writable = 0;
46069 +
46070 + /* ignore additional mmap checks for processes that are writable
46071 + by the default ACL */
46072 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
46073 + if (unlikely(obj->mode & GR_WRITE))
46074 + task->is_writable = 1;
46075 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
46076 + if (unlikely(obj->mode & GR_WRITE))
46077 + task->is_writable = 1;
46078 +
46079 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46080 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
46081 +#endif
46082 +
46083 + gr_set_proc_res(task);
46084 +
46085 + return;
46086 +}
46087 +
46088 +int
46089 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
46090 + const int unsafe_share)
46091 +{
46092 + struct task_struct *task = current;
46093 + struct acl_subject_label *newacl;
46094 + struct acl_object_label *obj;
46095 + __u32 retmode;
46096 +
46097 + if (unlikely(!(gr_status & GR_READY)))
46098 + return 0;
46099 +
46100 + newacl = chk_subj_label(dentry, mnt, task->role);
46101 +
46102 + task_lock(task);
46103 + if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
46104 + !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
46105 + !(task->role->roletype & GR_ROLE_GOD) &&
46106 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
46107 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
46108 + task_unlock(task);
46109 + if (unsafe_share)
46110 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
46111 + else
46112 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
46113 + return -EACCES;
46114 + }
46115 + task_unlock(task);
46116 +
46117 + obj = chk_obj_label(dentry, mnt, task->acl);
46118 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
46119 +
46120 + if (!(task->acl->mode & GR_INHERITLEARN) &&
46121 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
46122 + if (obj->nested)
46123 + task->acl = obj->nested;
46124 + else
46125 + task->acl = newacl;
46126 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
46127 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
46128 +
46129 + task->is_writable = 0;
46130 +
46131 + /* ignore additional mmap checks for processes that are writable
46132 + by the default ACL */
46133 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
46134 + if (unlikely(obj->mode & GR_WRITE))
46135 + task->is_writable = 1;
46136 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
46137 + if (unlikely(obj->mode & GR_WRITE))
46138 + task->is_writable = 1;
46139 +
46140 + gr_set_proc_res(task);
46141 +
46142 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46143 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
46144 +#endif
46145 + return 0;
46146 +}
46147 +
46148 +/* always called with valid inodev ptr */
46149 +static void
46150 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
46151 +{
46152 + struct acl_object_label *matchpo;
46153 + struct acl_subject_label *matchps;
46154 + struct acl_subject_label *subj;
46155 + struct acl_role_label *role;
46156 + unsigned int x;
46157 +
46158 + FOR_EACH_ROLE_START(role)
46159 + FOR_EACH_SUBJECT_START(role, subj, x)
46160 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
46161 + matchpo->mode |= GR_DELETED;
46162 + FOR_EACH_SUBJECT_END(subj,x)
46163 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
46164 + if (subj->inode == ino && subj->device == dev)
46165 + subj->mode |= GR_DELETED;
46166 + FOR_EACH_NESTED_SUBJECT_END(subj)
46167 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
46168 + matchps->mode |= GR_DELETED;
46169 + FOR_EACH_ROLE_END(role)
46170 +
46171 + inodev->nentry->deleted = 1;
46172 +
46173 + return;
46174 +}
46175 +
46176 +void
46177 +gr_handle_delete(const ino_t ino, const dev_t dev)
46178 +{
46179 + struct inodev_entry *inodev;
46180 +
46181 + if (unlikely(!(gr_status & GR_READY)))
46182 + return;
46183 +
46184 + write_lock(&gr_inode_lock);
46185 + inodev = lookup_inodev_entry(ino, dev);
46186 + if (inodev != NULL)
46187 + do_handle_delete(inodev, ino, dev);
46188 + write_unlock(&gr_inode_lock);
46189 +
46190 + return;
46191 +}
46192 +
46193 +static void
46194 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
46195 + const ino_t newinode, const dev_t newdevice,
46196 + struct acl_subject_label *subj)
46197 +{
46198 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
46199 + struct acl_object_label *match;
46200 +
46201 + match = subj->obj_hash[index];
46202 +
46203 + while (match && (match->inode != oldinode ||
46204 + match->device != olddevice ||
46205 + !(match->mode & GR_DELETED)))
46206 + match = match->next;
46207 +
46208 + if (match && (match->inode == oldinode)
46209 + && (match->device == olddevice)
46210 + && (match->mode & GR_DELETED)) {
46211 + if (match->prev == NULL) {
46212 + subj->obj_hash[index] = match->next;
46213 + if (match->next != NULL)
46214 + match->next->prev = NULL;
46215 + } else {
46216 + match->prev->next = match->next;
46217 + if (match->next != NULL)
46218 + match->next->prev = match->prev;
46219 + }
46220 + match->prev = NULL;
46221 + match->next = NULL;
46222 + match->inode = newinode;
46223 + match->device = newdevice;
46224 + match->mode &= ~GR_DELETED;
46225 +
46226 + insert_acl_obj_label(match, subj);
46227 + }
46228 +
46229 + return;
46230 +}
46231 +
46232 +static void
46233 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
46234 + const ino_t newinode, const dev_t newdevice,
46235 + struct acl_role_label *role)
46236 +{
46237 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
46238 + struct acl_subject_label *match;
46239 +
46240 + match = role->subj_hash[index];
46241 +
46242 + while (match && (match->inode != oldinode ||
46243 + match->device != olddevice ||
46244 + !(match->mode & GR_DELETED)))
46245 + match = match->next;
46246 +
46247 + if (match && (match->inode == oldinode)
46248 + && (match->device == olddevice)
46249 + && (match->mode & GR_DELETED)) {
46250 + if (match->prev == NULL) {
46251 + role->subj_hash[index] = match->next;
46252 + if (match->next != NULL)
46253 + match->next->prev = NULL;
46254 + } else {
46255 + match->prev->next = match->next;
46256 + if (match->next != NULL)
46257 + match->next->prev = match->prev;
46258 + }
46259 + match->prev = NULL;
46260 + match->next = NULL;
46261 + match->inode = newinode;
46262 + match->device = newdevice;
46263 + match->mode &= ~GR_DELETED;
46264 +
46265 + insert_acl_subj_label(match, role);
46266 + }
46267 +
46268 + return;
46269 +}
46270 +
46271 +static void
46272 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
46273 + const ino_t newinode, const dev_t newdevice)
46274 +{
46275 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
46276 + struct inodev_entry *match;
46277 +
46278 + match = inodev_set.i_hash[index];
46279 +
46280 + while (match && (match->nentry->inode != oldinode ||
46281 + match->nentry->device != olddevice || !match->nentry->deleted))
46282 + match = match->next;
46283 +
46284 + if (match && (match->nentry->inode == oldinode)
46285 + && (match->nentry->device == olddevice) &&
46286 + match->nentry->deleted) {
46287 + if (match->prev == NULL) {
46288 + inodev_set.i_hash[index] = match->next;
46289 + if (match->next != NULL)
46290 + match->next->prev = NULL;
46291 + } else {
46292 + match->prev->next = match->next;
46293 + if (match->next != NULL)
46294 + match->next->prev = match->prev;
46295 + }
46296 + match->prev = NULL;
46297 + match->next = NULL;
46298 + match->nentry->inode = newinode;
46299 + match->nentry->device = newdevice;
46300 + match->nentry->deleted = 0;
46301 +
46302 + insert_inodev_entry(match);
46303 + }
46304 +
46305 + return;
46306 +}
46307 +
46308 +static void
46309 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
46310 + const struct vfsmount *mnt)
46311 +{
46312 + struct acl_subject_label *subj;
46313 + struct acl_role_label *role;
46314 + unsigned int x;
46315 + ino_t inode = dentry->d_inode->i_ino;
46316 + dev_t dev = __get_dev(dentry);
46317 +
46318 + FOR_EACH_ROLE_START(role)
46319 + update_acl_subj_label(matchn->inode, matchn->device,
46320 + inode, dev, role);
46321 +
46322 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
46323 + if ((subj->inode == inode) && (subj->device == dev)) {
46324 + subj->inode = inode;
46325 + subj->device = dev;
46326 + }
46327 + FOR_EACH_NESTED_SUBJECT_END(subj)
46328 + FOR_EACH_SUBJECT_START(role, subj, x)
46329 + update_acl_obj_label(matchn->inode, matchn->device,
46330 + inode, dev, subj);
46331 + FOR_EACH_SUBJECT_END(subj,x)
46332 + FOR_EACH_ROLE_END(role)
46333 +
46334 + update_inodev_entry(matchn->inode, matchn->device, inode, dev);
46335 +
46336 + return;
46337 +}
46338 +
46339 +void
46340 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
46341 +{
46342 + struct name_entry *matchn;
46343 +
46344 + if (unlikely(!(gr_status & GR_READY)))
46345 + return;
46346 +
46347 + preempt_disable();
46348 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
46349 +
46350 + if (unlikely((unsigned long)matchn)) {
46351 + write_lock(&gr_inode_lock);
46352 + do_handle_create(matchn, dentry, mnt);
46353 + write_unlock(&gr_inode_lock);
46354 + }
46355 + preempt_enable();
46356 +
46357 + return;
46358 +}
46359 +
46360 +void
46361 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
46362 + struct dentry *old_dentry,
46363 + struct dentry *new_dentry,
46364 + struct vfsmount *mnt, const __u8 replace)
46365 +{
46366 + struct name_entry *matchn;
46367 + struct inodev_entry *inodev;
46368 + ino_t oldinode = old_dentry->d_inode->i_ino;
46369 + dev_t olddev = __get_dev(old_dentry);
46370 +
46371 + /* vfs_rename swaps the name and parent link for old_dentry and
46372 + new_dentry
46373 + at this point, old_dentry has the new name, parent link, and inode
46374 + for the renamed file
46375 + if a file is being replaced by a rename, new_dentry has the inode
46376 + and name for the replaced file
46377 + */
46378 +
46379 + if (unlikely(!(gr_status & GR_READY)))
46380 + return;
46381 +
46382 + preempt_disable();
46383 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
46384 +
46385 + /* we wouldn't have to check d_inode if it weren't for
46386 + NFS silly-renaming
46387 + */
46388 +
46389 + write_lock(&gr_inode_lock);
46390 + if (unlikely(replace && new_dentry->d_inode)) {
46391 + ino_t newinode = new_dentry->d_inode->i_ino;
46392 + dev_t newdev = __get_dev(new_dentry);
46393 + inodev = lookup_inodev_entry(newinode, newdev);
46394 + if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
46395 + do_handle_delete(inodev, newinode, newdev);
46396 + }
46397 +
46398 + inodev = lookup_inodev_entry(oldinode, olddev);
46399 + if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
46400 + do_handle_delete(inodev, oldinode, olddev);
46401 +
46402 + if (unlikely((unsigned long)matchn))
46403 + do_handle_create(matchn, old_dentry, mnt);
46404 +
46405 + write_unlock(&gr_inode_lock);
46406 + preempt_enable();
46407 +
46408 + return;
46409 +}
46410 +
46411 +static int
46412 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
46413 + unsigned char **sum)
46414 +{
46415 + struct acl_role_label *r;
46416 + struct role_allowed_ip *ipp;
46417 + struct role_transition *trans;
46418 + unsigned int i;
46419 + int found = 0;
46420 + u32 curr_ip = current->signal->curr_ip;
46421 +
46422 + current->signal->saved_ip = curr_ip;
46423 +
46424 + /* check transition table */
46425 +
46426 + for (trans = current->role->transitions; trans; trans = trans->next) {
46427 + if (!strcmp(rolename, trans->rolename)) {
46428 + found = 1;
46429 + break;
46430 + }
46431 + }
46432 +
46433 + if (!found)
46434 + return 0;
46435 +
46436 + /* handle special roles that do not require authentication
46437 + and check ip */
46438 +
46439 + FOR_EACH_ROLE_START(r)
46440 + if (!strcmp(rolename, r->rolename) &&
46441 + (r->roletype & GR_ROLE_SPECIAL)) {
46442 + found = 0;
46443 + if (r->allowed_ips != NULL) {
46444 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
46445 + if ((ntohl(curr_ip) & ipp->netmask) ==
46446 + (ntohl(ipp->addr) & ipp->netmask))
46447 + found = 1;
46448 + }
46449 + } else
46450 + found = 2;
46451 + if (!found)
46452 + return 0;
46453 +
46454 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
46455 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
46456 + *salt = NULL;
46457 + *sum = NULL;
46458 + return 1;
46459 + }
46460 + }
46461 + FOR_EACH_ROLE_END(r)
46462 +
46463 + for (i = 0; i < num_sprole_pws; i++) {
46464 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
46465 + *salt = acl_special_roles[i]->salt;
46466 + *sum = acl_special_roles[i]->sum;
46467 + return 1;
46468 + }
46469 + }
46470 +
46471 + return 0;
46472 +}
46473 +
46474 +static void
46475 +assign_special_role(char *rolename)
46476 +{
46477 + struct acl_object_label *obj;
46478 + struct acl_role_label *r;
46479 + struct acl_role_label *assigned = NULL;
46480 + struct task_struct *tsk;
46481 + struct file *filp;
46482 +
46483 + FOR_EACH_ROLE_START(r)
46484 + if (!strcmp(rolename, r->rolename) &&
46485 + (r->roletype & GR_ROLE_SPECIAL)) {
46486 + assigned = r;
46487 + break;
46488 + }
46489 + FOR_EACH_ROLE_END(r)
46490 +
46491 + if (!assigned)
46492 + return;
46493 +
46494 + read_lock(&tasklist_lock);
46495 + read_lock(&grsec_exec_file_lock);
46496 +
46497 + tsk = current->real_parent;
46498 + if (tsk == NULL)
46499 + goto out_unlock;
46500 +
46501 + filp = tsk->exec_file;
46502 + if (filp == NULL)
46503 + goto out_unlock;
46504 +
46505 + tsk->is_writable = 0;
46506 +
46507 + tsk->acl_sp_role = 1;
46508 + tsk->acl_role_id = ++acl_sp_role_value;
46509 + tsk->role = assigned;
46510 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
46511 +
46512 + /* ignore additional mmap checks for processes that are writable
46513 + by the default ACL */
46514 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
46515 + if (unlikely(obj->mode & GR_WRITE))
46516 + tsk->is_writable = 1;
46517 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
46518 + if (unlikely(obj->mode & GR_WRITE))
46519 + tsk->is_writable = 1;
46520 +
46521 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46522 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
46523 +#endif
46524 +
46525 +out_unlock:
46526 + read_unlock(&grsec_exec_file_lock);
46527 + read_unlock(&tasklist_lock);
46528 + return;
46529 +}
46530 +
46531 +int gr_check_secure_terminal(struct task_struct *task)
46532 +{
46533 + struct task_struct *p, *p2, *p3;
46534 + struct files_struct *files;
46535 + struct fdtable *fdt;
46536 + struct file *our_file = NULL, *file;
46537 + int i;
46538 +
46539 + if (task->signal->tty == NULL)
46540 + return 1;
46541 +
46542 + files = get_files_struct(task);
46543 + if (files != NULL) {
46544 + rcu_read_lock();
46545 + fdt = files_fdtable(files);
46546 + for (i=0; i < fdt->max_fds; i++) {
46547 + file = fcheck_files(files, i);
46548 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
46549 + get_file(file);
46550 + our_file = file;
46551 + }
46552 + }
46553 + rcu_read_unlock();
46554 + put_files_struct(files);
46555 + }
46556 +
46557 + if (our_file == NULL)
46558 + return 1;
46559 +
46560 + read_lock(&tasklist_lock);
46561 + do_each_thread(p2, p) {
46562 + files = get_files_struct(p);
46563 + if (files == NULL ||
46564 + (p->signal && p->signal->tty == task->signal->tty)) {
46565 + if (files != NULL)
46566 + put_files_struct(files);
46567 + continue;
46568 + }
46569 + rcu_read_lock();
46570 + fdt = files_fdtable(files);
46571 + for (i=0; i < fdt->max_fds; i++) {
46572 + file = fcheck_files(files, i);
46573 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
46574 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
46575 + p3 = task;
46576 + while (p3->pid > 0) {
46577 + if (p3 == p)
46578 + break;
46579 + p3 = p3->real_parent;
46580 + }
46581 + if (p3 == p)
46582 + break;
46583 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
46584 + gr_handle_alertkill(p);
46585 + rcu_read_unlock();
46586 + put_files_struct(files);
46587 + read_unlock(&tasklist_lock);
46588 + fput(our_file);
46589 + return 0;
46590 + }
46591 + }
46592 + rcu_read_unlock();
46593 + put_files_struct(files);
46594 + } while_each_thread(p2, p);
46595 + read_unlock(&tasklist_lock);
46596 +
46597 + fput(our_file);
46598 + return 1;
46599 +}
46600 +
46601 +ssize_t
46602 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
46603 +{
46604 + struct gr_arg_wrapper uwrap;
46605 + unsigned char *sprole_salt = NULL;
46606 + unsigned char *sprole_sum = NULL;
46607 + int error = sizeof (struct gr_arg_wrapper);
46608 + int error2 = 0;
46609 +
46610 + mutex_lock(&gr_dev_mutex);
46611 +
46612 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
46613 + error = -EPERM;
46614 + goto out;
46615 + }
46616 +
46617 + if (count != sizeof (struct gr_arg_wrapper)) {
46618 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
46619 + error = -EINVAL;
46620 + goto out;
46621 + }
46622 +
46623 +
46624 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
46625 + gr_auth_expires = 0;
46626 + gr_auth_attempts = 0;
46627 + }
46628 +
46629 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
46630 + error = -EFAULT;
46631 + goto out;
46632 + }
46633 +
46634 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
46635 + error = -EINVAL;
46636 + goto out;
46637 + }
46638 +
46639 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
46640 + error = -EFAULT;
46641 + goto out;
46642 + }
46643 +
46644 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
46645 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
46646 + time_after(gr_auth_expires, get_seconds())) {
46647 + error = -EBUSY;
46648 + goto out;
46649 + }
46650 +
46651 + /* if non-root trying to do anything other than use a special role,
46652 + do not attempt authentication, do not count towards authentication
46653 + locking
46654 + */
46655 +
46656 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
46657 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
46658 + current_uid()) {
46659 + error = -EPERM;
46660 + goto out;
46661 + }
46662 +
46663 + /* ensure pw and special role name are null terminated */
46664 +
46665 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
46666 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
46667 +
46668 + /* Okay.
46669 + * We have our enough of the argument structure..(we have yet
46670 + * to copy_from_user the tables themselves) . Copy the tables
46671 + * only if we need them, i.e. for loading operations. */
46672 +
46673 + switch (gr_usermode->mode) {
46674 + case GR_STATUS:
46675 + if (gr_status & GR_READY) {
46676 + error = 1;
46677 + if (!gr_check_secure_terminal(current))
46678 + error = 3;
46679 + } else
46680 + error = 2;
46681 + goto out;
46682 + case GR_SHUTDOWN:
46683 + if ((gr_status & GR_READY)
46684 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
46685 + pax_open_kernel();
46686 + gr_status &= ~GR_READY;
46687 + pax_close_kernel();
46688 +
46689 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
46690 + free_variables();
46691 + memset(gr_usermode, 0, sizeof (struct gr_arg));
46692 + memset(gr_system_salt, 0, GR_SALT_LEN);
46693 + memset(gr_system_sum, 0, GR_SHA_LEN);
46694 + } else if (gr_status & GR_READY) {
46695 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
46696 + error = -EPERM;
46697 + } else {
46698 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
46699 + error = -EAGAIN;
46700 + }
46701 + break;
46702 + case GR_ENABLE:
46703 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
46704 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
46705 + else {
46706 + if (gr_status & GR_READY)
46707 + error = -EAGAIN;
46708 + else
46709 + error = error2;
46710 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
46711 + }
46712 + break;
46713 + case GR_RELOAD:
46714 + if (!(gr_status & GR_READY)) {
46715 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
46716 + error = -EAGAIN;
46717 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
46718 + lock_kernel();
46719 +
46720 + pax_open_kernel();
46721 + gr_status &= ~GR_READY;
46722 + pax_close_kernel();
46723 +
46724 + free_variables();
46725 + if (!(error2 = gracl_init(gr_usermode))) {
46726 + unlock_kernel();
46727 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
46728 + } else {
46729 + unlock_kernel();
46730 + error = error2;
46731 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
46732 + }
46733 + } else {
46734 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
46735 + error = -EPERM;
46736 + }
46737 + break;
46738 + case GR_SEGVMOD:
46739 + if (unlikely(!(gr_status & GR_READY))) {
46740 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
46741 + error = -EAGAIN;
46742 + break;
46743 + }
46744 +
46745 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
46746 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
46747 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
46748 + struct acl_subject_label *segvacl;
46749 + segvacl =
46750 + lookup_acl_subj_label(gr_usermode->segv_inode,
46751 + gr_usermode->segv_device,
46752 + current->role);
46753 + if (segvacl) {
46754 + segvacl->crashes = 0;
46755 + segvacl->expires = 0;
46756 + }
46757 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
46758 + gr_remove_uid(gr_usermode->segv_uid);
46759 + }
46760 + } else {
46761 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
46762 + error = -EPERM;
46763 + }
46764 + break;
46765 + case GR_SPROLE:
46766 + case GR_SPROLEPAM:
46767 + if (unlikely(!(gr_status & GR_READY))) {
46768 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
46769 + error = -EAGAIN;
46770 + break;
46771 + }
46772 +
46773 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
46774 + current->role->expires = 0;
46775 + current->role->auth_attempts = 0;
46776 + }
46777 +
46778 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
46779 + time_after(current->role->expires, get_seconds())) {
46780 + error = -EBUSY;
46781 + goto out;
46782 + }
46783 +
46784 + if (lookup_special_role_auth
46785 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
46786 + && ((!sprole_salt && !sprole_sum)
46787 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
46788 + char *p = "";
46789 + assign_special_role(gr_usermode->sp_role);
46790 + read_lock(&tasklist_lock);
46791 + if (current->real_parent)
46792 + p = current->real_parent->role->rolename;
46793 + read_unlock(&tasklist_lock);
46794 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
46795 + p, acl_sp_role_value);
46796 + } else {
46797 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
46798 + error = -EPERM;
46799 + if(!(current->role->auth_attempts++))
46800 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
46801 +
46802 + goto out;
46803 + }
46804 + break;
46805 + case GR_UNSPROLE:
46806 + if (unlikely(!(gr_status & GR_READY))) {
46807 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
46808 + error = -EAGAIN;
46809 + break;
46810 + }
46811 +
46812 + if (current->role->roletype & GR_ROLE_SPECIAL) {
46813 + char *p = "";
46814 + int i = 0;
46815 +
46816 + read_lock(&tasklist_lock);
46817 + if (current->real_parent) {
46818 + p = current->real_parent->role->rolename;
46819 + i = current->real_parent->acl_role_id;
46820 + }
46821 + read_unlock(&tasklist_lock);
46822 +
46823 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
46824 + gr_set_acls(1);
46825 + } else {
46826 + error = -EPERM;
46827 + goto out;
46828 + }
46829 + break;
46830 + default:
46831 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
46832 + error = -EINVAL;
46833 + break;
46834 + }
46835 +
46836 + if (error != -EPERM)
46837 + goto out;
46838 +
46839 + if(!(gr_auth_attempts++))
46840 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
46841 +
46842 + out:
46843 + mutex_unlock(&gr_dev_mutex);
46844 + return error;
46845 +}
46846 +
46847 +/* must be called with
46848 + rcu_read_lock();
46849 + read_lock(&tasklist_lock);
46850 + read_lock(&grsec_exec_file_lock);
46851 +*/
46852 +int gr_apply_subject_to_task(struct task_struct *task)
46853 +{
46854 + struct acl_object_label *obj;
46855 + char *tmpname;
46856 + struct acl_subject_label *tmpsubj;
46857 + struct file *filp;
46858 + struct name_entry *nmatch;
46859 +
46860 + filp = task->exec_file;
46861 + if (filp == NULL)
46862 + return 0;
46863 +
46864 + /* the following is to apply the correct subject
46865 + on binaries running when the RBAC system
46866 + is enabled, when the binaries have been
46867 + replaced or deleted since their execution
46868 + -----
46869 + when the RBAC system starts, the inode/dev
46870 + from exec_file will be one the RBAC system
46871 + is unaware of. It only knows the inode/dev
46872 + of the present file on disk, or the absence
46873 + of it.
46874 + */
46875 + preempt_disable();
46876 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
46877 +
46878 + nmatch = lookup_name_entry(tmpname);
46879 + preempt_enable();
46880 + tmpsubj = NULL;
46881 + if (nmatch) {
46882 + if (nmatch->deleted)
46883 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
46884 + else
46885 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
46886 + if (tmpsubj != NULL)
46887 + task->acl = tmpsubj;
46888 + }
46889 + if (tmpsubj == NULL)
46890 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
46891 + task->role);
46892 + if (task->acl) {
46893 + struct acl_subject_label *curr;
46894 + curr = task->acl;
46895 +
46896 + task->is_writable = 0;
46897 + /* ignore additional mmap checks for processes that are writable
46898 + by the default ACL */
46899 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
46900 + if (unlikely(obj->mode & GR_WRITE))
46901 + task->is_writable = 1;
46902 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
46903 + if (unlikely(obj->mode & GR_WRITE))
46904 + task->is_writable = 1;
46905 +
46906 + gr_set_proc_res(task);
46907 +
46908 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46909 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
46910 +#endif
46911 + } else {
46912 + return 1;
46913 + }
46914 +
46915 + return 0;
46916 +}
46917 +
46918 +int
46919 +gr_set_acls(const int type)
46920 +{
46921 + struct task_struct *task, *task2;
46922 + struct acl_role_label *role = current->role;
46923 + __u16 acl_role_id = current->acl_role_id;
46924 + const struct cred *cred;
46925 + int ret;
46926 +
46927 + rcu_read_lock();
46928 + read_lock(&tasklist_lock);
46929 + read_lock(&grsec_exec_file_lock);
46930 + do_each_thread(task2, task) {
46931 + /* check to see if we're called from the exit handler,
46932 + if so, only replace ACLs that have inherited the admin
46933 + ACL */
46934 +
46935 + if (type && (task->role != role ||
46936 + task->acl_role_id != acl_role_id))
46937 + continue;
46938 +
46939 + task->acl_role_id = 0;
46940 + task->acl_sp_role = 0;
46941 +
46942 + if (task->exec_file) {
46943 + cred = __task_cred(task);
46944 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
46945 +
46946 + ret = gr_apply_subject_to_task(task);
46947 + if (ret) {
46948 + read_unlock(&grsec_exec_file_lock);
46949 + read_unlock(&tasklist_lock);
46950 + rcu_read_unlock();
46951 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
46952 + return ret;
46953 + }
46954 + } else {
46955 + // it's a kernel process
46956 + task->role = kernel_role;
46957 + task->acl = kernel_role->root_label;
46958 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
46959 + task->acl->mode &= ~GR_PROCFIND;
46960 +#endif
46961 + }
46962 + } while_each_thread(task2, task);
46963 + read_unlock(&grsec_exec_file_lock);
46964 + read_unlock(&tasklist_lock);
46965 + rcu_read_unlock();
46966 +
46967 + return 0;
46968 +}
46969 +
46970 +void
46971 +gr_learn_resource(const struct task_struct *task,
46972 + const int res, const unsigned long wanted, const int gt)
46973 +{
46974 + struct acl_subject_label *acl;
46975 + const struct cred *cred;
46976 +
46977 + if (unlikely((gr_status & GR_READY) &&
46978 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
46979 + goto skip_reslog;
46980 +
46981 +#ifdef CONFIG_GRKERNSEC_RESLOG
46982 + gr_log_resource(task, res, wanted, gt);
46983 +#endif
46984 + skip_reslog:
46985 +
46986 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
46987 + return;
46988 +
46989 + acl = task->acl;
46990 +
46991 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
46992 + !(acl->resmask & (1 << (unsigned short) res))))
46993 + return;
46994 +
46995 + if (wanted >= acl->res[res].rlim_cur) {
46996 + unsigned long res_add;
46997 +
46998 + res_add = wanted;
46999 + switch (res) {
47000 + case RLIMIT_CPU:
47001 + res_add += GR_RLIM_CPU_BUMP;
47002 + break;
47003 + case RLIMIT_FSIZE:
47004 + res_add += GR_RLIM_FSIZE_BUMP;
47005 + break;
47006 + case RLIMIT_DATA:
47007 + res_add += GR_RLIM_DATA_BUMP;
47008 + break;
47009 + case RLIMIT_STACK:
47010 + res_add += GR_RLIM_STACK_BUMP;
47011 + break;
47012 + case RLIMIT_CORE:
47013 + res_add += GR_RLIM_CORE_BUMP;
47014 + break;
47015 + case RLIMIT_RSS:
47016 + res_add += GR_RLIM_RSS_BUMP;
47017 + break;
47018 + case RLIMIT_NPROC:
47019 + res_add += GR_RLIM_NPROC_BUMP;
47020 + break;
47021 + case RLIMIT_NOFILE:
47022 + res_add += GR_RLIM_NOFILE_BUMP;
47023 + break;
47024 + case RLIMIT_MEMLOCK:
47025 + res_add += GR_RLIM_MEMLOCK_BUMP;
47026 + break;
47027 + case RLIMIT_AS:
47028 + res_add += GR_RLIM_AS_BUMP;
47029 + break;
47030 + case RLIMIT_LOCKS:
47031 + res_add += GR_RLIM_LOCKS_BUMP;
47032 + break;
47033 + case RLIMIT_SIGPENDING:
47034 + res_add += GR_RLIM_SIGPENDING_BUMP;
47035 + break;
47036 + case RLIMIT_MSGQUEUE:
47037 + res_add += GR_RLIM_MSGQUEUE_BUMP;
47038 + break;
47039 + case RLIMIT_NICE:
47040 + res_add += GR_RLIM_NICE_BUMP;
47041 + break;
47042 + case RLIMIT_RTPRIO:
47043 + res_add += GR_RLIM_RTPRIO_BUMP;
47044 + break;
47045 + case RLIMIT_RTTIME:
47046 + res_add += GR_RLIM_RTTIME_BUMP;
47047 + break;
47048 + }
47049 +
47050 + acl->res[res].rlim_cur = res_add;
47051 +
47052 + if (wanted > acl->res[res].rlim_max)
47053 + acl->res[res].rlim_max = res_add;
47054 +
47055 + /* only log the subject filename, since resource logging is supported for
47056 + single-subject learning only */
47057 + rcu_read_lock();
47058 + cred = __task_cred(task);
47059 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
47060 + task->role->roletype, cred->uid, cred->gid, acl->filename,
47061 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
47062 + "", (unsigned long) res, &task->signal->saved_ip);
47063 + rcu_read_unlock();
47064 + }
47065 +
47066 + return;
47067 +}
47068 +
47069 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
47070 +void
47071 +pax_set_initial_flags(struct linux_binprm *bprm)
47072 +{
47073 + struct task_struct *task = current;
47074 + struct acl_subject_label *proc;
47075 + unsigned long flags;
47076 +
47077 + if (unlikely(!(gr_status & GR_READY)))
47078 + return;
47079 +
47080 + flags = pax_get_flags(task);
47081 +
47082 + proc = task->acl;
47083 +
47084 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
47085 + flags &= ~MF_PAX_PAGEEXEC;
47086 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
47087 + flags &= ~MF_PAX_SEGMEXEC;
47088 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
47089 + flags &= ~MF_PAX_RANDMMAP;
47090 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
47091 + flags &= ~MF_PAX_EMUTRAMP;
47092 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
47093 + flags &= ~MF_PAX_MPROTECT;
47094 +
47095 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
47096 + flags |= MF_PAX_PAGEEXEC;
47097 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
47098 + flags |= MF_PAX_SEGMEXEC;
47099 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
47100 + flags |= MF_PAX_RANDMMAP;
47101 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
47102 + flags |= MF_PAX_EMUTRAMP;
47103 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
47104 + flags |= MF_PAX_MPROTECT;
47105 +
47106 + pax_set_flags(task, flags);
47107 +
47108 + return;
47109 +}
47110 +#endif
47111 +
47112 +#ifdef CONFIG_SYSCTL
47113 +/* Eric Biederman likes breaking userland ABI and every inode-based security
47114 + system to save 35kb of memory */
47115 +
47116 +/* we modify the passed in filename, but adjust it back before returning */
47117 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
47118 +{
47119 + struct name_entry *nmatch;
47120 + char *p, *lastp = NULL;
47121 + struct acl_object_label *obj = NULL, *tmp;
47122 + struct acl_subject_label *tmpsubj;
47123 + char c = '\0';
47124 +
47125 + read_lock(&gr_inode_lock);
47126 +
47127 + p = name + len - 1;
47128 + do {
47129 + nmatch = lookup_name_entry(name);
47130 + if (lastp != NULL)
47131 + *lastp = c;
47132 +
47133 + if (nmatch == NULL)
47134 + goto next_component;
47135 + tmpsubj = current->acl;
47136 + do {
47137 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
47138 + if (obj != NULL) {
47139 + tmp = obj->globbed;
47140 + while (tmp) {
47141 + if (!glob_match(tmp->filename, name)) {
47142 + obj = tmp;
47143 + goto found_obj;
47144 + }
47145 + tmp = tmp->next;
47146 + }
47147 + goto found_obj;
47148 + }
47149 + } while ((tmpsubj = tmpsubj->parent_subject));
47150 +next_component:
47151 + /* end case */
47152 + if (p == name)
47153 + break;
47154 +
47155 + while (*p != '/')
47156 + p--;
47157 + if (p == name)
47158 + lastp = p + 1;
47159 + else {
47160 + lastp = p;
47161 + p--;
47162 + }
47163 + c = *lastp;
47164 + *lastp = '\0';
47165 + } while (1);
47166 +found_obj:
47167 + read_unlock(&gr_inode_lock);
47168 + /* obj returned will always be non-null */
47169 + return obj;
47170 +}
47171 +
47172 +/* returns 0 when allowing, non-zero on error
47173 + op of 0 is used for readdir, so we don't log the names of hidden files
47174 +*/
47175 +__u32
47176 +gr_handle_sysctl(const struct ctl_table *table, const int op)
47177 +{
47178 + ctl_table *tmp;
47179 + const char *proc_sys = "/proc/sys";
47180 + char *path;
47181 + struct acl_object_label *obj;
47182 + unsigned short len = 0, pos = 0, depth = 0, i;
47183 + __u32 err = 0;
47184 + __u32 mode = 0;
47185 +
47186 + if (unlikely(!(gr_status & GR_READY)))
47187 + return 0;
47188 +
47189 + /* for now, ignore operations on non-sysctl entries if it's not a
47190 + readdir*/
47191 + if (table->child != NULL && op != 0)
47192 + return 0;
47193 +
47194 + mode |= GR_FIND;
47195 + /* it's only a read if it's an entry, read on dirs is for readdir */
47196 + if (op & MAY_READ)
47197 + mode |= GR_READ;
47198 + if (op & MAY_WRITE)
47199 + mode |= GR_WRITE;
47200 +
47201 + preempt_disable();
47202 +
47203 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
47204 +
47205 + /* it's only a read/write if it's an actual entry, not a dir
47206 + (which are opened for readdir)
47207 + */
47208 +
47209 + /* convert the requested sysctl entry into a pathname */
47210 +
47211 + for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
47212 + len += strlen(tmp->procname);
47213 + len++;
47214 + depth++;
47215 + }
47216 +
47217 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
47218 + /* deny */
47219 + goto out;
47220 + }
47221 +
47222 + memset(path, 0, PAGE_SIZE);
47223 +
47224 + memcpy(path, proc_sys, strlen(proc_sys));
47225 +
47226 + pos += strlen(proc_sys);
47227 +
47228 + for (; depth > 0; depth--) {
47229 + path[pos] = '/';
47230 + pos++;
47231 + for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
47232 + if (depth == i) {
47233 + memcpy(path + pos, tmp->procname,
47234 + strlen(tmp->procname));
47235 + pos += strlen(tmp->procname);
47236 + }
47237 + i++;
47238 + }
47239 + }
47240 +
47241 + obj = gr_lookup_by_name(path, pos);
47242 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
47243 +
47244 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
47245 + ((err & mode) != mode))) {
47246 + __u32 new_mode = mode;
47247 +
47248 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
47249 +
47250 + err = 0;
47251 + gr_log_learn_sysctl(path, new_mode);
47252 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
47253 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
47254 + err = -ENOENT;
47255 + } else if (!(err & GR_FIND)) {
47256 + err = -ENOENT;
47257 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
47258 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
47259 + path, (mode & GR_READ) ? " reading" : "",
47260 + (mode & GR_WRITE) ? " writing" : "");
47261 + err = -EACCES;
47262 + } else if ((err & mode) != mode) {
47263 + err = -EACCES;
47264 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
47265 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
47266 + path, (mode & GR_READ) ? " reading" : "",
47267 + (mode & GR_WRITE) ? " writing" : "");
47268 + err = 0;
47269 + } else
47270 + err = 0;
47271 +
47272 + out:
47273 + preempt_enable();
47274 +
47275 + return err;
47276 +}
47277 +#endif
47278 +
47279 +int
47280 +gr_handle_proc_ptrace(struct task_struct *task)
47281 +{
47282 + struct file *filp;
47283 + struct task_struct *tmp = task;
47284 + struct task_struct *curtemp = current;
47285 + __u32 retmode;
47286 +
47287 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
47288 + if (unlikely(!(gr_status & GR_READY)))
47289 + return 0;
47290 +#endif
47291 +
47292 + read_lock(&tasklist_lock);
47293 + read_lock(&grsec_exec_file_lock);
47294 + filp = task->exec_file;
47295 +
47296 + while (tmp->pid > 0) {
47297 + if (tmp == curtemp)
47298 + break;
47299 + tmp = tmp->real_parent;
47300 + }
47301 +
47302 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
47303 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
47304 + read_unlock(&grsec_exec_file_lock);
47305 + read_unlock(&tasklist_lock);
47306 + return 1;
47307 + }
47308 +
47309 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
47310 + if (!(gr_status & GR_READY)) {
47311 + read_unlock(&grsec_exec_file_lock);
47312 + read_unlock(&tasklist_lock);
47313 + return 0;
47314 + }
47315 +#endif
47316 +
47317 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
47318 + read_unlock(&grsec_exec_file_lock);
47319 + read_unlock(&tasklist_lock);
47320 +
47321 + if (retmode & GR_NOPTRACE)
47322 + return 1;
47323 +
47324 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
47325 + && (current->acl != task->acl || (current->acl != current->role->root_label
47326 + && current->pid != task->pid)))
47327 + return 1;
47328 +
47329 + return 0;
47330 +}
47331 +
47332 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
47333 +{
47334 + if (unlikely(!(gr_status & GR_READY)))
47335 + return;
47336 +
47337 + if (!(current->role->roletype & GR_ROLE_GOD))
47338 + return;
47339 +
47340 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
47341 + p->role->rolename, gr_task_roletype_to_char(p),
47342 + p->acl->filename);
47343 +}
47344 +
47345 +int
47346 +gr_handle_ptrace(struct task_struct *task, const long request)
47347 +{
47348 + struct task_struct *tmp = task;
47349 + struct task_struct *curtemp = current;
47350 + __u32 retmode;
47351 +
47352 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
47353 + if (unlikely(!(gr_status & GR_READY)))
47354 + return 0;
47355 +#endif
47356 +
47357 + read_lock(&tasklist_lock);
47358 + while (tmp->pid > 0) {
47359 + if (tmp == curtemp)
47360 + break;
47361 + tmp = tmp->real_parent;
47362 + }
47363 +
47364 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
47365 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
47366 + read_unlock(&tasklist_lock);
47367 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
47368 + return 1;
47369 + }
47370 + read_unlock(&tasklist_lock);
47371 +
47372 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
47373 + if (!(gr_status & GR_READY))
47374 + return 0;
47375 +#endif
47376 +
47377 + read_lock(&grsec_exec_file_lock);
47378 + if (unlikely(!task->exec_file)) {
47379 + read_unlock(&grsec_exec_file_lock);
47380 + return 0;
47381 + }
47382 +
47383 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
47384 + read_unlock(&grsec_exec_file_lock);
47385 +
47386 + if (retmode & GR_NOPTRACE) {
47387 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
47388 + return 1;
47389 + }
47390 +
47391 + if (retmode & GR_PTRACERD) {
47392 + switch (request) {
47393 + case PTRACE_POKETEXT:
47394 + case PTRACE_POKEDATA:
47395 + case PTRACE_POKEUSR:
47396 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
47397 + case PTRACE_SETREGS:
47398 + case PTRACE_SETFPREGS:
47399 +#endif
47400 +#ifdef CONFIG_X86
47401 + case PTRACE_SETFPXREGS:
47402 +#endif
47403 +#ifdef CONFIG_ALTIVEC
47404 + case PTRACE_SETVRREGS:
47405 +#endif
47406 + return 1;
47407 + default:
47408 + return 0;
47409 + }
47410 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
47411 + !(current->role->roletype & GR_ROLE_GOD) &&
47412 + (current->acl != task->acl)) {
47413 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
47414 + return 1;
47415 + }
47416 +
47417 + return 0;
47418 +}
47419 +
47420 +static int is_writable_mmap(const struct file *filp)
47421 +{
47422 + struct task_struct *task = current;
47423 + struct acl_object_label *obj, *obj2;
47424 +
47425 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
47426 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
47427 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
47428 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
47429 + task->role->root_label);
47430 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
47431 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
47432 + return 1;
47433 + }
47434 + }
47435 + return 0;
47436 +}
47437 +
47438 +int
47439 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
47440 +{
47441 + __u32 mode;
47442 +
47443 + if (unlikely(!file || !(prot & PROT_EXEC)))
47444 + return 1;
47445 +
47446 + if (is_writable_mmap(file))
47447 + return 0;
47448 +
47449 + mode =
47450 + gr_search_file(file->f_path.dentry,
47451 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
47452 + file->f_path.mnt);
47453 +
47454 + if (!gr_tpe_allow(file))
47455 + return 0;
47456 +
47457 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
47458 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
47459 + return 0;
47460 + } else if (unlikely(!(mode & GR_EXEC))) {
47461 + return 0;
47462 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
47463 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
47464 + return 1;
47465 + }
47466 +
47467 + return 1;
47468 +}
47469 +
47470 +int
47471 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
47472 +{
47473 + __u32 mode;
47474 +
47475 + if (unlikely(!file || !(prot & PROT_EXEC)))
47476 + return 1;
47477 +
47478 + if (is_writable_mmap(file))
47479 + return 0;
47480 +
47481 + mode =
47482 + gr_search_file(file->f_path.dentry,
47483 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
47484 + file->f_path.mnt);
47485 +
47486 + if (!gr_tpe_allow(file))
47487 + return 0;
47488 +
47489 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
47490 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
47491 + return 0;
47492 + } else if (unlikely(!(mode & GR_EXEC))) {
47493 + return 0;
47494 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
47495 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
47496 + return 1;
47497 + }
47498 +
47499 + return 1;
47500 +}
47501 +
47502 +void
47503 +gr_acl_handle_psacct(struct task_struct *task, const long code)
47504 +{
47505 + unsigned long runtime;
47506 + unsigned long cputime;
47507 + unsigned int wday, cday;
47508 + __u8 whr, chr;
47509 + __u8 wmin, cmin;
47510 + __u8 wsec, csec;
47511 + struct timespec timeval;
47512 +
47513 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
47514 + !(task->acl->mode & GR_PROCACCT)))
47515 + return;
47516 +
47517 + do_posix_clock_monotonic_gettime(&timeval);
47518 + runtime = timeval.tv_sec - task->start_time.tv_sec;
47519 + wday = runtime / (3600 * 24);
47520 + runtime -= wday * (3600 * 24);
47521 + whr = runtime / 3600;
47522 + runtime -= whr * 3600;
47523 + wmin = runtime / 60;
47524 + runtime -= wmin * 60;
47525 + wsec = runtime;
47526 +
47527 + cputime = (task->utime + task->stime) / HZ;
47528 + cday = cputime / (3600 * 24);
47529 + cputime -= cday * (3600 * 24);
47530 + chr = cputime / 3600;
47531 + cputime -= chr * 3600;
47532 + cmin = cputime / 60;
47533 + cputime -= cmin * 60;
47534 + csec = cputime;
47535 +
47536 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
47537 +
47538 + return;
47539 +}
47540 +
47541 +void gr_set_kernel_label(struct task_struct *task)
47542 +{
47543 + if (gr_status & GR_READY) {
47544 + task->role = kernel_role;
47545 + task->acl = kernel_role->root_label;
47546 + }
47547 + return;
47548 +}
47549 +
47550 +#ifdef CONFIG_TASKSTATS
47551 +int gr_is_taskstats_denied(int pid)
47552 +{
47553 + struct task_struct *task;
47554 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47555 + const struct cred *cred;
47556 +#endif
47557 + int ret = 0;
47558 +
47559 + /* restrict taskstats viewing to un-chrooted root users
47560 + who have the 'view' subject flag if the RBAC system is enabled
47561 + */
47562 +
47563 + rcu_read_lock();
47564 + read_lock(&tasklist_lock);
47565 + task = find_task_by_vpid(pid);
47566 + if (task) {
47567 +#ifdef CONFIG_GRKERNSEC_CHROOT
47568 + if (proc_is_chrooted(task))
47569 + ret = -EACCES;
47570 +#endif
47571 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47572 + cred = __task_cred(task);
47573 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47574 + if (cred->uid != 0)
47575 + ret = -EACCES;
47576 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47577 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
47578 + ret = -EACCES;
47579 +#endif
47580 +#endif
47581 + if (gr_status & GR_READY) {
47582 + if (!(task->acl->mode & GR_VIEW))
47583 + ret = -EACCES;
47584 + }
47585 + } else
47586 + ret = -ENOENT;
47587 +
47588 + read_unlock(&tasklist_lock);
47589 + rcu_read_unlock();
47590 +
47591 + return ret;
47592 +}
47593 +#endif
47594 +
47595 +/* AUXV entries are filled via a descendant of search_binary_handler
47596 + after we've already applied the subject for the target
47597 +*/
47598 +int gr_acl_enable_at_secure(void)
47599 +{
47600 + if (unlikely(!(gr_status & GR_READY)))
47601 + return 0;
47602 +
47603 + if (current->acl->mode & GR_ATSECURE)
47604 + return 1;
47605 +
47606 + return 0;
47607 +}
47608 +
47609 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
47610 +{
47611 + struct task_struct *task = current;
47612 + struct dentry *dentry = file->f_path.dentry;
47613 + struct vfsmount *mnt = file->f_path.mnt;
47614 + struct acl_object_label *obj, *tmp;
47615 + struct acl_subject_label *subj;
47616 + unsigned int bufsize;
47617 + int is_not_root;
47618 + char *path;
47619 + dev_t dev = __get_dev(dentry);
47620 +
47621 + if (unlikely(!(gr_status & GR_READY)))
47622 + return 1;
47623 +
47624 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
47625 + return 1;
47626 +
47627 + /* ignore Eric Biederman */
47628 + if (IS_PRIVATE(dentry->d_inode))
47629 + return 1;
47630 +
47631 + subj = task->acl;
47632 + do {
47633 + obj = lookup_acl_obj_label(ino, dev, subj);
47634 + if (obj != NULL)
47635 + return (obj->mode & GR_FIND) ? 1 : 0;
47636 + } while ((subj = subj->parent_subject));
47637 +
47638 + /* this is purely an optimization since we're looking for an object
47639 + for the directory we're doing a readdir on
47640 + if it's possible for any globbed object to match the entry we're
47641 + filling into the directory, then the object we find here will be
47642 + an anchor point with attached globbed objects
47643 + */
47644 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
47645 + if (obj->globbed == NULL)
47646 + return (obj->mode & GR_FIND) ? 1 : 0;
47647 +
47648 + is_not_root = ((obj->filename[0] == '/') &&
47649 + (obj->filename[1] == '\0')) ? 0 : 1;
47650 + bufsize = PAGE_SIZE - namelen - is_not_root;
47651 +
47652 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
47653 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
47654 + return 1;
47655 +
47656 + preempt_disable();
47657 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
47658 + bufsize);
47659 +
47660 + bufsize = strlen(path);
47661 +
47662 + /* if base is "/", don't append an additional slash */
47663 + if (is_not_root)
47664 + *(path + bufsize) = '/';
47665 + memcpy(path + bufsize + is_not_root, name, namelen);
47666 + *(path + bufsize + namelen + is_not_root) = '\0';
47667 +
47668 + tmp = obj->globbed;
47669 + while (tmp) {
47670 + if (!glob_match(tmp->filename, path)) {
47671 + preempt_enable();
47672 + return (tmp->mode & GR_FIND) ? 1 : 0;
47673 + }
47674 + tmp = tmp->next;
47675 + }
47676 + preempt_enable();
47677 + return (obj->mode & GR_FIND) ? 1 : 0;
47678 +}
47679 +
47680 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
47681 +EXPORT_SYMBOL(gr_acl_is_enabled);
47682 +#endif
47683 +EXPORT_SYMBOL(gr_learn_resource);
47684 +EXPORT_SYMBOL(gr_set_kernel_label);
47685 +#ifdef CONFIG_SECURITY
47686 +EXPORT_SYMBOL(gr_check_user_change);
47687 +EXPORT_SYMBOL(gr_check_group_change);
47688 +#endif
47689 +
47690 diff -urNp linux-2.6.32.41/grsecurity/gracl_cap.c linux-2.6.32.41/grsecurity/gracl_cap.c
47691 --- linux-2.6.32.41/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
47692 +++ linux-2.6.32.41/grsecurity/gracl_cap.c 2011-04-17 15:56:46.000000000 -0400
47693 @@ -0,0 +1,138 @@
47694 +#include <linux/kernel.h>
47695 +#include <linux/module.h>
47696 +#include <linux/sched.h>
47697 +#include <linux/gracl.h>
47698 +#include <linux/grsecurity.h>
47699 +#include <linux/grinternal.h>
47700 +
47701 +static const char *captab_log[] = {
47702 + "CAP_CHOWN",
47703 + "CAP_DAC_OVERRIDE",
47704 + "CAP_DAC_READ_SEARCH",
47705 + "CAP_FOWNER",
47706 + "CAP_FSETID",
47707 + "CAP_KILL",
47708 + "CAP_SETGID",
47709 + "CAP_SETUID",
47710 + "CAP_SETPCAP",
47711 + "CAP_LINUX_IMMUTABLE",
47712 + "CAP_NET_BIND_SERVICE",
47713 + "CAP_NET_BROADCAST",
47714 + "CAP_NET_ADMIN",
47715 + "CAP_NET_RAW",
47716 + "CAP_IPC_LOCK",
47717 + "CAP_IPC_OWNER",
47718 + "CAP_SYS_MODULE",
47719 + "CAP_SYS_RAWIO",
47720 + "CAP_SYS_CHROOT",
47721 + "CAP_SYS_PTRACE",
47722 + "CAP_SYS_PACCT",
47723 + "CAP_SYS_ADMIN",
47724 + "CAP_SYS_BOOT",
47725 + "CAP_SYS_NICE",
47726 + "CAP_SYS_RESOURCE",
47727 + "CAP_SYS_TIME",
47728 + "CAP_SYS_TTY_CONFIG",
47729 + "CAP_MKNOD",
47730 + "CAP_LEASE",
47731 + "CAP_AUDIT_WRITE",
47732 + "CAP_AUDIT_CONTROL",
47733 + "CAP_SETFCAP",
47734 + "CAP_MAC_OVERRIDE",
47735 + "CAP_MAC_ADMIN"
47736 +};
47737 +
47738 +EXPORT_SYMBOL(gr_is_capable);
47739 +EXPORT_SYMBOL(gr_is_capable_nolog);
47740 +
47741 +int
47742 +gr_is_capable(const int cap)
47743 +{
47744 + struct task_struct *task = current;
47745 + const struct cred *cred = current_cred();
47746 + struct acl_subject_label *curracl;
47747 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
47748 + kernel_cap_t cap_audit = __cap_empty_set;
47749 +
47750 + if (!gr_acl_is_enabled())
47751 + return 1;
47752 +
47753 + curracl = task->acl;
47754 +
47755 + cap_drop = curracl->cap_lower;
47756 + cap_mask = curracl->cap_mask;
47757 + cap_audit = curracl->cap_invert_audit;
47758 +
47759 + while ((curracl = curracl->parent_subject)) {
47760 + /* if the cap isn't specified in the current computed mask but is specified in the
47761 + current level subject, and is lowered in the current level subject, then add
47762 + it to the set of dropped capabilities
47763 + otherwise, add the current level subject's mask to the current computed mask
47764 + */
47765 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
47766 + cap_raise(cap_mask, cap);
47767 + if (cap_raised(curracl->cap_lower, cap))
47768 + cap_raise(cap_drop, cap);
47769 + if (cap_raised(curracl->cap_invert_audit, cap))
47770 + cap_raise(cap_audit, cap);
47771 + }
47772 + }
47773 +
47774 + if (!cap_raised(cap_drop, cap)) {
47775 + if (cap_raised(cap_audit, cap))
47776 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
47777 + return 1;
47778 + }
47779 +
47780 + curracl = task->acl;
47781 +
47782 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
47783 + && cap_raised(cred->cap_effective, cap)) {
47784 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
47785 + task->role->roletype, cred->uid,
47786 + cred->gid, task->exec_file ?
47787 + gr_to_filename(task->exec_file->f_path.dentry,
47788 + task->exec_file->f_path.mnt) : curracl->filename,
47789 + curracl->filename, 0UL,
47790 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
47791 + return 1;
47792 + }
47793 +
47794 + if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
47795 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
47796 + return 0;
47797 +}
47798 +
47799 +int
47800 +gr_is_capable_nolog(const int cap)
47801 +{
47802 + struct acl_subject_label *curracl;
47803 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
47804 +
47805 + if (!gr_acl_is_enabled())
47806 + return 1;
47807 +
47808 + curracl = current->acl;
47809 +
47810 + cap_drop = curracl->cap_lower;
47811 + cap_mask = curracl->cap_mask;
47812 +
47813 + while ((curracl = curracl->parent_subject)) {
47814 + /* if the cap isn't specified in the current computed mask but is specified in the
47815 + current level subject, and is lowered in the current level subject, then add
47816 + it to the set of dropped capabilities
47817 + otherwise, add the current level subject's mask to the current computed mask
47818 + */
47819 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
47820 + cap_raise(cap_mask, cap);
47821 + if (cap_raised(curracl->cap_lower, cap))
47822 + cap_raise(cap_drop, cap);
47823 + }
47824 + }
47825 +
47826 + if (!cap_raised(cap_drop, cap))
47827 + return 1;
47828 +
47829 + return 0;
47830 +}
47831 +
47832 diff -urNp linux-2.6.32.41/grsecurity/gracl_fs.c linux-2.6.32.41/grsecurity/gracl_fs.c
47833 --- linux-2.6.32.41/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
47834 +++ linux-2.6.32.41/grsecurity/gracl_fs.c 2011-04-17 15:56:46.000000000 -0400
47835 @@ -0,0 +1,431 @@
47836 +#include <linux/kernel.h>
47837 +#include <linux/sched.h>
47838 +#include <linux/types.h>
47839 +#include <linux/fs.h>
47840 +#include <linux/file.h>
47841 +#include <linux/stat.h>
47842 +#include <linux/grsecurity.h>
47843 +#include <linux/grinternal.h>
47844 +#include <linux/gracl.h>
47845 +
47846 +__u32
47847 +gr_acl_handle_hidden_file(const struct dentry * dentry,
47848 + const struct vfsmount * mnt)
47849 +{
47850 + __u32 mode;
47851 +
47852 + if (unlikely(!dentry->d_inode))
47853 + return GR_FIND;
47854 +
47855 + mode =
47856 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
47857 +
47858 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
47859 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
47860 + return mode;
47861 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
47862 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
47863 + return 0;
47864 + } else if (unlikely(!(mode & GR_FIND)))
47865 + return 0;
47866 +
47867 + return GR_FIND;
47868 +}
47869 +
47870 +__u32
47871 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
47872 + const int fmode)
47873 +{
47874 + __u32 reqmode = GR_FIND;
47875 + __u32 mode;
47876 +
47877 + if (unlikely(!dentry->d_inode))
47878 + return reqmode;
47879 +
47880 + if (unlikely(fmode & O_APPEND))
47881 + reqmode |= GR_APPEND;
47882 + else if (unlikely(fmode & FMODE_WRITE))
47883 + reqmode |= GR_WRITE;
47884 + if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
47885 + reqmode |= GR_READ;
47886 + if ((fmode & FMODE_GREXEC) && (fmode & FMODE_EXEC))
47887 + reqmode &= ~GR_READ;
47888 + mode =
47889 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
47890 + mnt);
47891 +
47892 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
47893 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
47894 + reqmode & GR_READ ? " reading" : "",
47895 + reqmode & GR_WRITE ? " writing" : reqmode &
47896 + GR_APPEND ? " appending" : "");
47897 + return reqmode;
47898 + } else
47899 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
47900 + {
47901 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
47902 + reqmode & GR_READ ? " reading" : "",
47903 + reqmode & GR_WRITE ? " writing" : reqmode &
47904 + GR_APPEND ? " appending" : "");
47905 + return 0;
47906 + } else if (unlikely((mode & reqmode) != reqmode))
47907 + return 0;
47908 +
47909 + return reqmode;
47910 +}
47911 +
47912 +__u32
47913 +gr_acl_handle_creat(const struct dentry * dentry,
47914 + const struct dentry * p_dentry,
47915 + const struct vfsmount * p_mnt, const int fmode,
47916 + const int imode)
47917 +{
47918 + __u32 reqmode = GR_WRITE | GR_CREATE;
47919 + __u32 mode;
47920 +
47921 + if (unlikely(fmode & O_APPEND))
47922 + reqmode |= GR_APPEND;
47923 + if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
47924 + reqmode |= GR_READ;
47925 + if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
47926 + reqmode |= GR_SETID;
47927 +
47928 + mode =
47929 + gr_check_create(dentry, p_dentry, p_mnt,
47930 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
47931 +
47932 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
47933 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
47934 + reqmode & GR_READ ? " reading" : "",
47935 + reqmode & GR_WRITE ? " writing" : reqmode &
47936 + GR_APPEND ? " appending" : "");
47937 + return reqmode;
47938 + } else
47939 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
47940 + {
47941 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
47942 + reqmode & GR_READ ? " reading" : "",
47943 + reqmode & GR_WRITE ? " writing" : reqmode &
47944 + GR_APPEND ? " appending" : "");
47945 + return 0;
47946 + } else if (unlikely((mode & reqmode) != reqmode))
47947 + return 0;
47948 +
47949 + return reqmode;
47950 +}
47951 +
47952 +__u32
47953 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
47954 + const int fmode)
47955 +{
47956 + __u32 mode, reqmode = GR_FIND;
47957 +
47958 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
47959 + reqmode |= GR_EXEC;
47960 + if (fmode & S_IWOTH)
47961 + reqmode |= GR_WRITE;
47962 + if (fmode & S_IROTH)
47963 + reqmode |= GR_READ;
47964 +
47965 + mode =
47966 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
47967 + mnt);
47968 +
47969 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
47970 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
47971 + reqmode & GR_READ ? " reading" : "",
47972 + reqmode & GR_WRITE ? " writing" : "",
47973 + reqmode & GR_EXEC ? " executing" : "");
47974 + return reqmode;
47975 + } else
47976 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
47977 + {
47978 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
47979 + reqmode & GR_READ ? " reading" : "",
47980 + reqmode & GR_WRITE ? " writing" : "",
47981 + reqmode & GR_EXEC ? " executing" : "");
47982 + return 0;
47983 + } else if (unlikely((mode & reqmode) != reqmode))
47984 + return 0;
47985 +
47986 + return reqmode;
47987 +}
47988 +
47989 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
47990 +{
47991 + __u32 mode;
47992 +
47993 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
47994 +
47995 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
47996 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
47997 + return mode;
47998 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
47999 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
48000 + return 0;
48001 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
48002 + return 0;
48003 +
48004 + return (reqmode);
48005 +}
48006 +
48007 +__u32
48008 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
48009 +{
48010 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
48011 +}
48012 +
48013 +__u32
48014 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
48015 +{
48016 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
48017 +}
48018 +
48019 +__u32
48020 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
48021 +{
48022 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
48023 +}
48024 +
48025 +__u32
48026 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
48027 +{
48028 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
48029 +}
48030 +
48031 +__u32
48032 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
48033 + mode_t mode)
48034 +{
48035 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
48036 + return 1;
48037 +
48038 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
48039 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
48040 + GR_FCHMOD_ACL_MSG);
48041 + } else {
48042 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
48043 + }
48044 +}
48045 +
48046 +__u32
48047 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
48048 + mode_t mode)
48049 +{
48050 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
48051 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
48052 + GR_CHMOD_ACL_MSG);
48053 + } else {
48054 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
48055 + }
48056 +}
48057 +
48058 +__u32
48059 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
48060 +{
48061 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
48062 +}
48063 +
48064 +__u32
48065 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
48066 +{
48067 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
48068 +}
48069 +
48070 +__u32
48071 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
48072 +{
48073 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
48074 +}
48075 +
48076 +__u32
48077 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
48078 +{
48079 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
48080 + GR_UNIXCONNECT_ACL_MSG);
48081 +}
48082 +
48083 +/* hardlinks require at minimum create permission,
48084 + any additional privilege required is based on the
48085 + privilege of the file being linked to
48086 +*/
48087 +__u32
48088 +gr_acl_handle_link(const struct dentry * new_dentry,
48089 + const struct dentry * parent_dentry,
48090 + const struct vfsmount * parent_mnt,
48091 + const struct dentry * old_dentry,
48092 + const struct vfsmount * old_mnt, const char *to)
48093 +{
48094 + __u32 mode;
48095 + __u32 needmode = GR_CREATE | GR_LINK;
48096 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
48097 +
48098 + mode =
48099 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
48100 + old_mnt);
48101 +
48102 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
48103 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
48104 + return mode;
48105 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
48106 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
48107 + return 0;
48108 + } else if (unlikely((mode & needmode) != needmode))
48109 + return 0;
48110 +
48111 + return 1;
48112 +}
48113 +
48114 +__u32
48115 +gr_acl_handle_symlink(const struct dentry * new_dentry,
48116 + const struct dentry * parent_dentry,
48117 + const struct vfsmount * parent_mnt, const char *from)
48118 +{
48119 + __u32 needmode = GR_WRITE | GR_CREATE;
48120 + __u32 mode;
48121 +
48122 + mode =
48123 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
48124 + GR_CREATE | GR_AUDIT_CREATE |
48125 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
48126 +
48127 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
48128 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
48129 + return mode;
48130 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
48131 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
48132 + return 0;
48133 + } else if (unlikely((mode & needmode) != needmode))
48134 + return 0;
48135 +
48136 + return (GR_WRITE | GR_CREATE);
48137 +}
48138 +
48139 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
48140 +{
48141 + __u32 mode;
48142 +
48143 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
48144 +
48145 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
48146 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
48147 + return mode;
48148 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
48149 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
48150 + return 0;
48151 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
48152 + return 0;
48153 +
48154 + return (reqmode);
48155 +}
48156 +
48157 +__u32
48158 +gr_acl_handle_mknod(const struct dentry * new_dentry,
48159 + const struct dentry * parent_dentry,
48160 + const struct vfsmount * parent_mnt,
48161 + const int mode)
48162 +{
48163 + __u32 reqmode = GR_WRITE | GR_CREATE;
48164 + if (unlikely(mode & (S_ISUID | S_ISGID)))
48165 + reqmode |= GR_SETID;
48166 +
48167 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
48168 + reqmode, GR_MKNOD_ACL_MSG);
48169 +}
48170 +
48171 +__u32
48172 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
48173 + const struct dentry *parent_dentry,
48174 + const struct vfsmount *parent_mnt)
48175 +{
48176 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
48177 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
48178 +}
48179 +
48180 +#define RENAME_CHECK_SUCCESS(old, new) \
48181 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
48182 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
48183 +
48184 +int
48185 +gr_acl_handle_rename(struct dentry *new_dentry,
48186 + struct dentry *parent_dentry,
48187 + const struct vfsmount *parent_mnt,
48188 + struct dentry *old_dentry,
48189 + struct inode *old_parent_inode,
48190 + struct vfsmount *old_mnt, const char *newname)
48191 +{
48192 + __u32 comp1, comp2;
48193 + int error = 0;
48194 +
48195 + if (unlikely(!gr_acl_is_enabled()))
48196 + return 0;
48197 +
48198 + if (!new_dentry->d_inode) {
48199 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
48200 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
48201 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
48202 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
48203 + GR_DELETE | GR_AUDIT_DELETE |
48204 + GR_AUDIT_READ | GR_AUDIT_WRITE |
48205 + GR_SUPPRESS, old_mnt);
48206 + } else {
48207 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
48208 + GR_CREATE | GR_DELETE |
48209 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
48210 + GR_AUDIT_READ | GR_AUDIT_WRITE |
48211 + GR_SUPPRESS, parent_mnt);
48212 + comp2 =
48213 + gr_search_file(old_dentry,
48214 + GR_READ | GR_WRITE | GR_AUDIT_READ |
48215 + GR_DELETE | GR_AUDIT_DELETE |
48216 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
48217 + }
48218 +
48219 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
48220 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
48221 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
48222 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
48223 + && !(comp2 & GR_SUPPRESS)) {
48224 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
48225 + error = -EACCES;
48226 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
48227 + error = -EACCES;
48228 +
48229 + return error;
48230 +}
48231 +
48232 +void
48233 +gr_acl_handle_exit(void)
48234 +{
48235 + u16 id;
48236 + char *rolename;
48237 + struct file *exec_file;
48238 +
48239 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
48240 + !(current->role->roletype & GR_ROLE_PERSIST))) {
48241 + id = current->acl_role_id;
48242 + rolename = current->role->rolename;
48243 + gr_set_acls(1);
48244 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
48245 + }
48246 +
48247 + write_lock(&grsec_exec_file_lock);
48248 + exec_file = current->exec_file;
48249 + current->exec_file = NULL;
48250 + write_unlock(&grsec_exec_file_lock);
48251 +
48252 + if (exec_file)
48253 + fput(exec_file);
48254 +}
48255 +
48256 +int
48257 +gr_acl_handle_procpidmem(const struct task_struct *task)
48258 +{
48259 + if (unlikely(!gr_acl_is_enabled()))
48260 + return 0;
48261 +
48262 + if (task != current && task->acl->mode & GR_PROTPROCFD)
48263 + return -EACCES;
48264 +
48265 + return 0;
48266 +}
48267 diff -urNp linux-2.6.32.41/grsecurity/gracl_ip.c linux-2.6.32.41/grsecurity/gracl_ip.c
48268 --- linux-2.6.32.41/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
48269 +++ linux-2.6.32.41/grsecurity/gracl_ip.c 2011-04-17 15:56:46.000000000 -0400
48270 @@ -0,0 +1,382 @@
48271 +#include <linux/kernel.h>
48272 +#include <asm/uaccess.h>
48273 +#include <asm/errno.h>
48274 +#include <net/sock.h>
48275 +#include <linux/file.h>
48276 +#include <linux/fs.h>
48277 +#include <linux/net.h>
48278 +#include <linux/in.h>
48279 +#include <linux/skbuff.h>
48280 +#include <linux/ip.h>
48281 +#include <linux/udp.h>
48282 +#include <linux/smp_lock.h>
48283 +#include <linux/types.h>
48284 +#include <linux/sched.h>
48285 +#include <linux/netdevice.h>
48286 +#include <linux/inetdevice.h>
48287 +#include <linux/gracl.h>
48288 +#include <linux/grsecurity.h>
48289 +#include <linux/grinternal.h>
48290 +
48291 +#define GR_BIND 0x01
48292 +#define GR_CONNECT 0x02
48293 +#define GR_INVERT 0x04
48294 +#define GR_BINDOVERRIDE 0x08
48295 +#define GR_CONNECTOVERRIDE 0x10
48296 +#define GR_SOCK_FAMILY 0x20
48297 +
48298 +static const char * gr_protocols[IPPROTO_MAX] = {
48299 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
48300 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
48301 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
48302 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
48303 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
48304 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
48305 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
48306 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
48307 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
48308 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
48309 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
48310 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
48311 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
48312 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
48313 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
48314 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
48315 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
48316 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
48317 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
48318 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
48319 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
48320 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
48321 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
48322 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
48323 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
48324 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
48325 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
48326 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
48327 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
48328 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
48329 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
48330 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
48331 + };
48332 +
48333 +static const char * gr_socktypes[SOCK_MAX] = {
48334 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
48335 + "unknown:7", "unknown:8", "unknown:9", "packet"
48336 + };
48337 +
48338 +static const char * gr_sockfamilies[AF_MAX+1] = {
48339 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
48340 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
48341 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
48342 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
48343 + };
48344 +
48345 +const char *
48346 +gr_proto_to_name(unsigned char proto)
48347 +{
48348 + return gr_protocols[proto];
48349 +}
48350 +
48351 +const char *
48352 +gr_socktype_to_name(unsigned char type)
48353 +{
48354 + return gr_socktypes[type];
48355 +}
48356 +
48357 +const char *
48358 +gr_sockfamily_to_name(unsigned char family)
48359 +{
48360 + return gr_sockfamilies[family];
48361 +}
48362 +
48363 +int
48364 +gr_search_socket(const int domain, const int type, const int protocol)
48365 +{
48366 + struct acl_subject_label *curr;
48367 + const struct cred *cred = current_cred();
48368 +
48369 + if (unlikely(!gr_acl_is_enabled()))
48370 + goto exit;
48371 +
48372 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
48373 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
48374 + goto exit; // let the kernel handle it
48375 +
48376 + curr = current->acl;
48377 +
48378 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
48379 + /* the family is allowed, if this is PF_INET allow it only if
48380 + the extra sock type/protocol checks pass */
48381 + if (domain == PF_INET)
48382 + goto inet_check;
48383 + goto exit;
48384 + } else {
48385 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
48386 + __u32 fakeip = 0;
48387 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48388 + current->role->roletype, cred->uid,
48389 + cred->gid, current->exec_file ?
48390 + gr_to_filename(current->exec_file->f_path.dentry,
48391 + current->exec_file->f_path.mnt) :
48392 + curr->filename, curr->filename,
48393 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
48394 + &current->signal->saved_ip);
48395 + goto exit;
48396 + }
48397 + goto exit_fail;
48398 + }
48399 +
48400 +inet_check:
48401 + /* the rest of this checking is for IPv4 only */
48402 + if (!curr->ips)
48403 + goto exit;
48404 +
48405 + if ((curr->ip_type & (1 << type)) &&
48406 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
48407 + goto exit;
48408 +
48409 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
48410 + /* we don't place acls on raw sockets , and sometimes
48411 + dgram/ip sockets are opened for ioctl and not
48412 + bind/connect, so we'll fake a bind learn log */
48413 + if (type == SOCK_RAW || type == SOCK_PACKET) {
48414 + __u32 fakeip = 0;
48415 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48416 + current->role->roletype, cred->uid,
48417 + cred->gid, current->exec_file ?
48418 + gr_to_filename(current->exec_file->f_path.dentry,
48419 + current->exec_file->f_path.mnt) :
48420 + curr->filename, curr->filename,
48421 + &fakeip, 0, type,
48422 + protocol, GR_CONNECT, &current->signal->saved_ip);
48423 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
48424 + __u32 fakeip = 0;
48425 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48426 + current->role->roletype, cred->uid,
48427 + cred->gid, current->exec_file ?
48428 + gr_to_filename(current->exec_file->f_path.dentry,
48429 + current->exec_file->f_path.mnt) :
48430 + curr->filename, curr->filename,
48431 + &fakeip, 0, type,
48432 + protocol, GR_BIND, &current->signal->saved_ip);
48433 + }
48434 + /* we'll log when they use connect or bind */
48435 + goto exit;
48436 + }
48437 +
48438 +exit_fail:
48439 + if (domain == PF_INET)
48440 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
48441 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
48442 + else
48443 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
48444 + gr_socktype_to_name(type), protocol);
48445 +
48446 + return 0;
48447 +exit:
48448 + return 1;
48449 +}
48450 +
48451 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
48452 +{
48453 + if ((ip->mode & mode) &&
48454 + (ip_port >= ip->low) &&
48455 + (ip_port <= ip->high) &&
48456 + ((ntohl(ip_addr) & our_netmask) ==
48457 + (ntohl(our_addr) & our_netmask))
48458 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
48459 + && (ip->type & (1 << type))) {
48460 + if (ip->mode & GR_INVERT)
48461 + return 2; // specifically denied
48462 + else
48463 + return 1; // allowed
48464 + }
48465 +
48466 + return 0; // not specifically allowed, may continue parsing
48467 +}
48468 +
48469 +static int
48470 +gr_search_connectbind(const int full_mode, struct sock *sk,
48471 + struct sockaddr_in *addr, const int type)
48472 +{
48473 + char iface[IFNAMSIZ] = {0};
48474 + struct acl_subject_label *curr;
48475 + struct acl_ip_label *ip;
48476 + struct inet_sock *isk;
48477 + struct net_device *dev;
48478 + struct in_device *idev;
48479 + unsigned long i;
48480 + int ret;
48481 + int mode = full_mode & (GR_BIND | GR_CONNECT);
48482 + __u32 ip_addr = 0;
48483 + __u32 our_addr;
48484 + __u32 our_netmask;
48485 + char *p;
48486 + __u16 ip_port = 0;
48487 + const struct cred *cred = current_cred();
48488 +
48489 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
48490 + return 0;
48491 +
48492 + curr = current->acl;
48493 + isk = inet_sk(sk);
48494 +
48495 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
48496 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
48497 + addr->sin_addr.s_addr = curr->inaddr_any_override;
48498 + if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
48499 + struct sockaddr_in saddr;
48500 + int err;
48501 +
48502 + saddr.sin_family = AF_INET;
48503 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
48504 + saddr.sin_port = isk->sport;
48505 +
48506 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
48507 + if (err)
48508 + return err;
48509 +
48510 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
48511 + if (err)
48512 + return err;
48513 + }
48514 +
48515 + if (!curr->ips)
48516 + return 0;
48517 +
48518 + ip_addr = addr->sin_addr.s_addr;
48519 + ip_port = ntohs(addr->sin_port);
48520 +
48521 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
48522 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48523 + current->role->roletype, cred->uid,
48524 + cred->gid, current->exec_file ?
48525 + gr_to_filename(current->exec_file->f_path.dentry,
48526 + current->exec_file->f_path.mnt) :
48527 + curr->filename, curr->filename,
48528 + &ip_addr, ip_port, type,
48529 + sk->sk_protocol, mode, &current->signal->saved_ip);
48530 + return 0;
48531 + }
48532 +
48533 + for (i = 0; i < curr->ip_num; i++) {
48534 + ip = *(curr->ips + i);
48535 + if (ip->iface != NULL) {
48536 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
48537 + p = strchr(iface, ':');
48538 + if (p != NULL)
48539 + *p = '\0';
48540 + dev = dev_get_by_name(sock_net(sk), iface);
48541 + if (dev == NULL)
48542 + continue;
48543 + idev = in_dev_get(dev);
48544 + if (idev == NULL) {
48545 + dev_put(dev);
48546 + continue;
48547 + }
48548 + rcu_read_lock();
48549 + for_ifa(idev) {
48550 + if (!strcmp(ip->iface, ifa->ifa_label)) {
48551 + our_addr = ifa->ifa_address;
48552 + our_netmask = 0xffffffff;
48553 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
48554 + if (ret == 1) {
48555 + rcu_read_unlock();
48556 + in_dev_put(idev);
48557 + dev_put(dev);
48558 + return 0;
48559 + } else if (ret == 2) {
48560 + rcu_read_unlock();
48561 + in_dev_put(idev);
48562 + dev_put(dev);
48563 + goto denied;
48564 + }
48565 + }
48566 + } endfor_ifa(idev);
48567 + rcu_read_unlock();
48568 + in_dev_put(idev);
48569 + dev_put(dev);
48570 + } else {
48571 + our_addr = ip->addr;
48572 + our_netmask = ip->netmask;
48573 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
48574 + if (ret == 1)
48575 + return 0;
48576 + else if (ret == 2)
48577 + goto denied;
48578 + }
48579 + }
48580 +
48581 +denied:
48582 + if (mode == GR_BIND)
48583 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
48584 + else if (mode == GR_CONNECT)
48585 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
48586 +
48587 + return -EACCES;
48588 +}
48589 +
48590 +int
48591 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
48592 +{
48593 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
48594 +}
48595 +
48596 +int
48597 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
48598 +{
48599 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
48600 +}
48601 +
48602 +int gr_search_listen(struct socket *sock)
48603 +{
48604 + struct sock *sk = sock->sk;
48605 + struct sockaddr_in addr;
48606 +
48607 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
48608 + addr.sin_port = inet_sk(sk)->sport;
48609 +
48610 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
48611 +}
48612 +
48613 +int gr_search_accept(struct socket *sock)
48614 +{
48615 + struct sock *sk = sock->sk;
48616 + struct sockaddr_in addr;
48617 +
48618 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
48619 + addr.sin_port = inet_sk(sk)->sport;
48620 +
48621 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
48622 +}
48623 +
48624 +int
48625 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
48626 +{
48627 + if (addr)
48628 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
48629 + else {
48630 + struct sockaddr_in sin;
48631 + const struct inet_sock *inet = inet_sk(sk);
48632 +
48633 + sin.sin_addr.s_addr = inet->daddr;
48634 + sin.sin_port = inet->dport;
48635 +
48636 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
48637 + }
48638 +}
48639 +
48640 +int
48641 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
48642 +{
48643 + struct sockaddr_in sin;
48644 +
48645 + if (unlikely(skb->len < sizeof (struct udphdr)))
48646 + return 0; // skip this packet
48647 +
48648 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
48649 + sin.sin_port = udp_hdr(skb)->source;
48650 +
48651 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
48652 +}
48653 diff -urNp linux-2.6.32.41/grsecurity/gracl_learn.c linux-2.6.32.41/grsecurity/gracl_learn.c
48654 --- linux-2.6.32.41/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
48655 +++ linux-2.6.32.41/grsecurity/gracl_learn.c 2011-04-17 15:56:46.000000000 -0400
48656 @@ -0,0 +1,211 @@
48657 +#include <linux/kernel.h>
48658 +#include <linux/mm.h>
48659 +#include <linux/sched.h>
48660 +#include <linux/poll.h>
48661 +#include <linux/smp_lock.h>
48662 +#include <linux/string.h>
48663 +#include <linux/file.h>
48664 +#include <linux/types.h>
48665 +#include <linux/vmalloc.h>
48666 +#include <linux/grinternal.h>
48667 +
48668 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
48669 + size_t count, loff_t *ppos);
48670 +extern int gr_acl_is_enabled(void);
48671 +
48672 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
48673 +static int gr_learn_attached;
48674 +
48675 +/* use a 512k buffer */
48676 +#define LEARN_BUFFER_SIZE (512 * 1024)
48677 +
48678 +static DEFINE_SPINLOCK(gr_learn_lock);
48679 +static DEFINE_MUTEX(gr_learn_user_mutex);
48680 +
48681 +/* we need to maintain two buffers, so that the kernel context of grlearn
48682 + uses a semaphore around the userspace copying, and the other kernel contexts
48683 + use a spinlock when copying into the buffer, since they cannot sleep
48684 +*/
48685 +static char *learn_buffer;
48686 +static char *learn_buffer_user;
48687 +static int learn_buffer_len;
48688 +static int learn_buffer_user_len;
48689 +
48690 +static ssize_t
48691 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
48692 +{
48693 + DECLARE_WAITQUEUE(wait, current);
48694 + ssize_t retval = 0;
48695 +
48696 + add_wait_queue(&learn_wait, &wait);
48697 + set_current_state(TASK_INTERRUPTIBLE);
48698 + do {
48699 + mutex_lock(&gr_learn_user_mutex);
48700 + spin_lock(&gr_learn_lock);
48701 + if (learn_buffer_len)
48702 + break;
48703 + spin_unlock(&gr_learn_lock);
48704 + mutex_unlock(&gr_learn_user_mutex);
48705 + if (file->f_flags & O_NONBLOCK) {
48706 + retval = -EAGAIN;
48707 + goto out;
48708 + }
48709 + if (signal_pending(current)) {
48710 + retval = -ERESTARTSYS;
48711 + goto out;
48712 + }
48713 +
48714 + schedule();
48715 + } while (1);
48716 +
48717 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
48718 + learn_buffer_user_len = learn_buffer_len;
48719 + retval = learn_buffer_len;
48720 + learn_buffer_len = 0;
48721 +
48722 + spin_unlock(&gr_learn_lock);
48723 +
48724 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
48725 + retval = -EFAULT;
48726 +
48727 + mutex_unlock(&gr_learn_user_mutex);
48728 +out:
48729 + set_current_state(TASK_RUNNING);
48730 + remove_wait_queue(&learn_wait, &wait);
48731 + return retval;
48732 +}
48733 +
48734 +static unsigned int
48735 +poll_learn(struct file * file, poll_table * wait)
48736 +{
48737 + poll_wait(file, &learn_wait, wait);
48738 +
48739 + if (learn_buffer_len)
48740 + return (POLLIN | POLLRDNORM);
48741 +
48742 + return 0;
48743 +}
48744 +
48745 +void
48746 +gr_clear_learn_entries(void)
48747 +{
48748 + char *tmp;
48749 +
48750 + mutex_lock(&gr_learn_user_mutex);
48751 + if (learn_buffer != NULL) {
48752 + spin_lock(&gr_learn_lock);
48753 + tmp = learn_buffer;
48754 + learn_buffer = NULL;
48755 + spin_unlock(&gr_learn_lock);
48756 + vfree(learn_buffer);
48757 + }
48758 + if (learn_buffer_user != NULL) {
48759 + vfree(learn_buffer_user);
48760 + learn_buffer_user = NULL;
48761 + }
48762 + learn_buffer_len = 0;
48763 + mutex_unlock(&gr_learn_user_mutex);
48764 +
48765 + return;
48766 +}
48767 +
48768 +void
48769 +gr_add_learn_entry(const char *fmt, ...)
48770 +{
48771 + va_list args;
48772 + unsigned int len;
48773 +
48774 + if (!gr_learn_attached)
48775 + return;
48776 +
48777 + spin_lock(&gr_learn_lock);
48778 +
48779 + /* leave a gap at the end so we know when it's "full" but don't have to
48780 + compute the exact length of the string we're trying to append
48781 + */
48782 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
48783 + spin_unlock(&gr_learn_lock);
48784 + wake_up_interruptible(&learn_wait);
48785 + return;
48786 + }
48787 + if (learn_buffer == NULL) {
48788 + spin_unlock(&gr_learn_lock);
48789 + return;
48790 + }
48791 +
48792 + va_start(args, fmt);
48793 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
48794 + va_end(args);
48795 +
48796 + learn_buffer_len += len + 1;
48797 +
48798 + spin_unlock(&gr_learn_lock);
48799 + wake_up_interruptible(&learn_wait);
48800 +
48801 + return;
48802 +}
48803 +
48804 +static int
48805 +open_learn(struct inode *inode, struct file *file)
48806 +{
48807 + if (file->f_mode & FMODE_READ && gr_learn_attached)
48808 + return -EBUSY;
48809 + if (file->f_mode & FMODE_READ) {
48810 + int retval = 0;
48811 + mutex_lock(&gr_learn_user_mutex);
48812 + if (learn_buffer == NULL)
48813 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
48814 + if (learn_buffer_user == NULL)
48815 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
48816 + if (learn_buffer == NULL) {
48817 + retval = -ENOMEM;
48818 + goto out_error;
48819 + }
48820 + if (learn_buffer_user == NULL) {
48821 + retval = -ENOMEM;
48822 + goto out_error;
48823 + }
48824 + learn_buffer_len = 0;
48825 + learn_buffer_user_len = 0;
48826 + gr_learn_attached = 1;
48827 +out_error:
48828 + mutex_unlock(&gr_learn_user_mutex);
48829 + return retval;
48830 + }
48831 + return 0;
48832 +}
48833 +
48834 +static int
48835 +close_learn(struct inode *inode, struct file *file)
48836 +{
48837 + char *tmp;
48838 +
48839 + if (file->f_mode & FMODE_READ) {
48840 + mutex_lock(&gr_learn_user_mutex);
48841 + if (learn_buffer != NULL) {
48842 + spin_lock(&gr_learn_lock);
48843 + tmp = learn_buffer;
48844 + learn_buffer = NULL;
48845 + spin_unlock(&gr_learn_lock);
48846 + vfree(tmp);
48847 + }
48848 + if (learn_buffer_user != NULL) {
48849 + vfree(learn_buffer_user);
48850 + learn_buffer_user = NULL;
48851 + }
48852 + learn_buffer_len = 0;
48853 + learn_buffer_user_len = 0;
48854 + gr_learn_attached = 0;
48855 + mutex_unlock(&gr_learn_user_mutex);
48856 + }
48857 +
48858 + return 0;
48859 +}
48860 +
48861 +const struct file_operations grsec_fops = {
48862 + .read = read_learn,
48863 + .write = write_grsec_handler,
48864 + .open = open_learn,
48865 + .release = close_learn,
48866 + .poll = poll_learn,
48867 +};
48868 diff -urNp linux-2.6.32.41/grsecurity/gracl_res.c linux-2.6.32.41/grsecurity/gracl_res.c
48869 --- linux-2.6.32.41/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
48870 +++ linux-2.6.32.41/grsecurity/gracl_res.c 2011-04-17 15:56:46.000000000 -0400
48871 @@ -0,0 +1,67 @@
48872 +#include <linux/kernel.h>
48873 +#include <linux/sched.h>
48874 +#include <linux/gracl.h>
48875 +#include <linux/grinternal.h>
48876 +
48877 +static const char *restab_log[] = {
48878 + [RLIMIT_CPU] = "RLIMIT_CPU",
48879 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
48880 + [RLIMIT_DATA] = "RLIMIT_DATA",
48881 + [RLIMIT_STACK] = "RLIMIT_STACK",
48882 + [RLIMIT_CORE] = "RLIMIT_CORE",
48883 + [RLIMIT_RSS] = "RLIMIT_RSS",
48884 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
48885 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
48886 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
48887 + [RLIMIT_AS] = "RLIMIT_AS",
48888 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
48889 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
48890 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
48891 + [RLIMIT_NICE] = "RLIMIT_NICE",
48892 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
48893 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
48894 + [GR_CRASH_RES] = "RLIMIT_CRASH"
48895 +};
48896 +
48897 +void
48898 +gr_log_resource(const struct task_struct *task,
48899 + const int res, const unsigned long wanted, const int gt)
48900 +{
48901 + const struct cred *cred;
48902 + unsigned long rlim;
48903 +
48904 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
48905 + return;
48906 +
48907 + // not yet supported resource
48908 + if (unlikely(!restab_log[res]))
48909 + return;
48910 +
48911 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
48912 + rlim = task->signal->rlim[res].rlim_max;
48913 + else
48914 + rlim = task->signal->rlim[res].rlim_cur;
48915 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
48916 + return;
48917 +
48918 + rcu_read_lock();
48919 + cred = __task_cred(task);
48920 +
48921 + if (res == RLIMIT_NPROC &&
48922 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
48923 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
48924 + goto out_rcu_unlock;
48925 + else if (res == RLIMIT_MEMLOCK &&
48926 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
48927 + goto out_rcu_unlock;
48928 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
48929 + goto out_rcu_unlock;
48930 + rcu_read_unlock();
48931 +
48932 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
48933 +
48934 + return;
48935 +out_rcu_unlock:
48936 + rcu_read_unlock();
48937 + return;
48938 +}
48939 diff -urNp linux-2.6.32.41/grsecurity/gracl_segv.c linux-2.6.32.41/grsecurity/gracl_segv.c
48940 --- linux-2.6.32.41/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
48941 +++ linux-2.6.32.41/grsecurity/gracl_segv.c 2011-04-17 15:56:46.000000000 -0400
48942 @@ -0,0 +1,284 @@
48943 +#include <linux/kernel.h>
48944 +#include <linux/mm.h>
48945 +#include <asm/uaccess.h>
48946 +#include <asm/errno.h>
48947 +#include <asm/mman.h>
48948 +#include <net/sock.h>
48949 +#include <linux/file.h>
48950 +#include <linux/fs.h>
48951 +#include <linux/net.h>
48952 +#include <linux/in.h>
48953 +#include <linux/smp_lock.h>
48954 +#include <linux/slab.h>
48955 +#include <linux/types.h>
48956 +#include <linux/sched.h>
48957 +#include <linux/timer.h>
48958 +#include <linux/gracl.h>
48959 +#include <linux/grsecurity.h>
48960 +#include <linux/grinternal.h>
48961 +
48962 +static struct crash_uid *uid_set;
48963 +static unsigned short uid_used;
48964 +static DEFINE_SPINLOCK(gr_uid_lock);
48965 +extern rwlock_t gr_inode_lock;
48966 +extern struct acl_subject_label *
48967 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
48968 + struct acl_role_label *role);
48969 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
48970 +
48971 +int
48972 +gr_init_uidset(void)
48973 +{
48974 + uid_set =
48975 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
48976 + uid_used = 0;
48977 +
48978 + return uid_set ? 1 : 0;
48979 +}
48980 +
48981 +void
48982 +gr_free_uidset(void)
48983 +{
48984 + if (uid_set)
48985 + kfree(uid_set);
48986 +
48987 + return;
48988 +}
48989 +
48990 +int
48991 +gr_find_uid(const uid_t uid)
48992 +{
48993 + struct crash_uid *tmp = uid_set;
48994 + uid_t buid;
48995 + int low = 0, high = uid_used - 1, mid;
48996 +
48997 + while (high >= low) {
48998 + mid = (low + high) >> 1;
48999 + buid = tmp[mid].uid;
49000 + if (buid == uid)
49001 + return mid;
49002 + if (buid > uid)
49003 + high = mid - 1;
49004 + if (buid < uid)
49005 + low = mid + 1;
49006 + }
49007 +
49008 + return -1;
49009 +}
49010 +
49011 +static __inline__ void
49012 +gr_insertsort(void)
49013 +{
49014 + unsigned short i, j;
49015 + struct crash_uid index;
49016 +
49017 + for (i = 1; i < uid_used; i++) {
49018 + index = uid_set[i];
49019 + j = i;
49020 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
49021 + uid_set[j] = uid_set[j - 1];
49022 + j--;
49023 + }
49024 + uid_set[j] = index;
49025 + }
49026 +
49027 + return;
49028 +}
49029 +
49030 +static __inline__ void
49031 +gr_insert_uid(const uid_t uid, const unsigned long expires)
49032 +{
49033 + int loc;
49034 +
49035 + if (uid_used == GR_UIDTABLE_MAX)
49036 + return;
49037 +
49038 + loc = gr_find_uid(uid);
49039 +
49040 + if (loc >= 0) {
49041 + uid_set[loc].expires = expires;
49042 + return;
49043 + }
49044 +
49045 + uid_set[uid_used].uid = uid;
49046 + uid_set[uid_used].expires = expires;
49047 + uid_used++;
49048 +
49049 + gr_insertsort();
49050 +
49051 + return;
49052 +}
49053 +
49054 +void
49055 +gr_remove_uid(const unsigned short loc)
49056 +{
49057 + unsigned short i;
49058 +
49059 + for (i = loc + 1; i < uid_used; i++)
49060 + uid_set[i - 1] = uid_set[i];
49061 +
49062 + uid_used--;
49063 +
49064 + return;
49065 +}
49066 +
49067 +int
49068 +gr_check_crash_uid(const uid_t uid)
49069 +{
49070 + int loc;
49071 + int ret = 0;
49072 +
49073 + if (unlikely(!gr_acl_is_enabled()))
49074 + return 0;
49075 +
49076 + spin_lock(&gr_uid_lock);
49077 + loc = gr_find_uid(uid);
49078 +
49079 + if (loc < 0)
49080 + goto out_unlock;
49081 +
49082 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
49083 + gr_remove_uid(loc);
49084 + else
49085 + ret = 1;
49086 +
49087 +out_unlock:
49088 + spin_unlock(&gr_uid_lock);
49089 + return ret;
49090 +}
49091 +
49092 +static __inline__ int
49093 +proc_is_setxid(const struct cred *cred)
49094 +{
49095 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
49096 + cred->uid != cred->fsuid)
49097 + return 1;
49098 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
49099 + cred->gid != cred->fsgid)
49100 + return 1;
49101 +
49102 + return 0;
49103 +}
49104 +
49105 +void
49106 +gr_handle_crash(struct task_struct *task, const int sig)
49107 +{
49108 + struct acl_subject_label *curr;
49109 + struct acl_subject_label *curr2;
49110 + struct task_struct *tsk, *tsk2;
49111 + const struct cred *cred;
49112 + const struct cred *cred2;
49113 +
49114 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
49115 + return;
49116 +
49117 + if (unlikely(!gr_acl_is_enabled()))
49118 + return;
49119 +
49120 + curr = task->acl;
49121 +
49122 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
49123 + return;
49124 +
49125 + if (time_before_eq(curr->expires, get_seconds())) {
49126 + curr->expires = 0;
49127 + curr->crashes = 0;
49128 + }
49129 +
49130 + curr->crashes++;
49131 +
49132 + if (!curr->expires)
49133 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
49134 +
49135 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
49136 + time_after(curr->expires, get_seconds())) {
49137 + rcu_read_lock();
49138 + cred = __task_cred(task);
49139 + if (cred->uid && proc_is_setxid(cred)) {
49140 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
49141 + spin_lock(&gr_uid_lock);
49142 + gr_insert_uid(cred->uid, curr->expires);
49143 + spin_unlock(&gr_uid_lock);
49144 + curr->expires = 0;
49145 + curr->crashes = 0;
49146 + read_lock(&tasklist_lock);
49147 + do_each_thread(tsk2, tsk) {
49148 + cred2 = __task_cred(tsk);
49149 + if (tsk != task && cred2->uid == cred->uid)
49150 + gr_fake_force_sig(SIGKILL, tsk);
49151 + } while_each_thread(tsk2, tsk);
49152 + read_unlock(&tasklist_lock);
49153 + } else {
49154 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
49155 + read_lock(&tasklist_lock);
49156 + do_each_thread(tsk2, tsk) {
49157 + if (likely(tsk != task)) {
49158 + curr2 = tsk->acl;
49159 +
49160 + if (curr2->device == curr->device &&
49161 + curr2->inode == curr->inode)
49162 + gr_fake_force_sig(SIGKILL, tsk);
49163 + }
49164 + } while_each_thread(tsk2, tsk);
49165 + read_unlock(&tasklist_lock);
49166 + }
49167 + rcu_read_unlock();
49168 + }
49169 +
49170 + return;
49171 +}
49172 +
49173 +int
49174 +gr_check_crash_exec(const struct file *filp)
49175 +{
49176 + struct acl_subject_label *curr;
49177 +
49178 + if (unlikely(!gr_acl_is_enabled()))
49179 + return 0;
49180 +
49181 + read_lock(&gr_inode_lock);
49182 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
49183 + filp->f_path.dentry->d_inode->i_sb->s_dev,
49184 + current->role);
49185 + read_unlock(&gr_inode_lock);
49186 +
49187 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
49188 + (!curr->crashes && !curr->expires))
49189 + return 0;
49190 +
49191 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
49192 + time_after(curr->expires, get_seconds()))
49193 + return 1;
49194 + else if (time_before_eq(curr->expires, get_seconds())) {
49195 + curr->crashes = 0;
49196 + curr->expires = 0;
49197 + }
49198 +
49199 + return 0;
49200 +}
49201 +
49202 +void
49203 +gr_handle_alertkill(struct task_struct *task)
49204 +{
49205 + struct acl_subject_label *curracl;
49206 + __u32 curr_ip;
49207 + struct task_struct *p, *p2;
49208 +
49209 + if (unlikely(!gr_acl_is_enabled()))
49210 + return;
49211 +
49212 + curracl = task->acl;
49213 + curr_ip = task->signal->curr_ip;
49214 +
49215 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
49216 + read_lock(&tasklist_lock);
49217 + do_each_thread(p2, p) {
49218 + if (p->signal->curr_ip == curr_ip)
49219 + gr_fake_force_sig(SIGKILL, p);
49220 + } while_each_thread(p2, p);
49221 + read_unlock(&tasklist_lock);
49222 + } else if (curracl->mode & GR_KILLPROC)
49223 + gr_fake_force_sig(SIGKILL, task);
49224 +
49225 + return;
49226 +}
49227 diff -urNp linux-2.6.32.41/grsecurity/gracl_shm.c linux-2.6.32.41/grsecurity/gracl_shm.c
49228 --- linux-2.6.32.41/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
49229 +++ linux-2.6.32.41/grsecurity/gracl_shm.c 2011-04-17 15:56:46.000000000 -0400
49230 @@ -0,0 +1,40 @@
49231 +#include <linux/kernel.h>
49232 +#include <linux/mm.h>
49233 +#include <linux/sched.h>
49234 +#include <linux/file.h>
49235 +#include <linux/ipc.h>
49236 +#include <linux/gracl.h>
49237 +#include <linux/grsecurity.h>
49238 +#include <linux/grinternal.h>
49239 +
49240 +int
49241 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
49242 + const time_t shm_createtime, const uid_t cuid, const int shmid)
49243 +{
49244 + struct task_struct *task;
49245 +
49246 + if (!gr_acl_is_enabled())
49247 + return 1;
49248 +
49249 + rcu_read_lock();
49250 + read_lock(&tasklist_lock);
49251 +
49252 + task = find_task_by_vpid(shm_cprid);
49253 +
49254 + if (unlikely(!task))
49255 + task = find_task_by_vpid(shm_lapid);
49256 +
49257 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
49258 + (task->pid == shm_lapid)) &&
49259 + (task->acl->mode & GR_PROTSHM) &&
49260 + (task->acl != current->acl))) {
49261 + read_unlock(&tasklist_lock);
49262 + rcu_read_unlock();
49263 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
49264 + return 0;
49265 + }
49266 + read_unlock(&tasklist_lock);
49267 + rcu_read_unlock();
49268 +
49269 + return 1;
49270 +}
49271 diff -urNp linux-2.6.32.41/grsecurity/grsec_chdir.c linux-2.6.32.41/grsecurity/grsec_chdir.c
49272 --- linux-2.6.32.41/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
49273 +++ linux-2.6.32.41/grsecurity/grsec_chdir.c 2011-04-17 15:56:46.000000000 -0400
49274 @@ -0,0 +1,19 @@
49275 +#include <linux/kernel.h>
49276 +#include <linux/sched.h>
49277 +#include <linux/fs.h>
49278 +#include <linux/file.h>
49279 +#include <linux/grsecurity.h>
49280 +#include <linux/grinternal.h>
49281 +
49282 +void
49283 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
49284 +{
49285 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
49286 + if ((grsec_enable_chdir && grsec_enable_group &&
49287 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
49288 + !grsec_enable_group)) {
49289 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
49290 + }
49291 +#endif
49292 + return;
49293 +}
49294 diff -urNp linux-2.6.32.41/grsecurity/grsec_chroot.c linux-2.6.32.41/grsecurity/grsec_chroot.c
49295 --- linux-2.6.32.41/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
49296 +++ linux-2.6.32.41/grsecurity/grsec_chroot.c 2011-04-17 15:56:46.000000000 -0400
49297 @@ -0,0 +1,395 @@
49298 +#include <linux/kernel.h>
49299 +#include <linux/module.h>
49300 +#include <linux/sched.h>
49301 +#include <linux/file.h>
49302 +#include <linux/fs.h>
49303 +#include <linux/mount.h>
49304 +#include <linux/types.h>
49305 +#include <linux/pid_namespace.h>
49306 +#include <linux/grsecurity.h>
49307 +#include <linux/grinternal.h>
49308 +
49309 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
49310 +{
49311 +#ifdef CONFIG_GRKERNSEC
49312 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
49313 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
49314 + task->gr_is_chrooted = 1;
49315 + else
49316 + task->gr_is_chrooted = 0;
49317 +
49318 + task->gr_chroot_dentry = path->dentry;
49319 +#endif
49320 + return;
49321 +}
49322 +
49323 +void gr_clear_chroot_entries(struct task_struct *task)
49324 +{
49325 +#ifdef CONFIG_GRKERNSEC
49326 + task->gr_is_chrooted = 0;
49327 + task->gr_chroot_dentry = NULL;
49328 +#endif
49329 + return;
49330 +}
49331 +
49332 +int
49333 +gr_handle_chroot_unix(const pid_t pid)
49334 +{
49335 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
49336 + struct pid *spid = NULL;
49337 +
49338 + if (unlikely(!grsec_enable_chroot_unix))
49339 + return 1;
49340 +
49341 + if (likely(!proc_is_chrooted(current)))
49342 + return 1;
49343 +
49344 + rcu_read_lock();
49345 + read_lock(&tasklist_lock);
49346 +
49347 + spid = find_vpid(pid);
49348 + if (spid) {
49349 + struct task_struct *p;
49350 + p = pid_task(spid, PIDTYPE_PID);
49351 + if (unlikely(p && !have_same_root(current, p))) {
49352 + read_unlock(&tasklist_lock);
49353 + rcu_read_unlock();
49354 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
49355 + return 0;
49356 + }
49357 + }
49358 + read_unlock(&tasklist_lock);
49359 + rcu_read_unlock();
49360 +#endif
49361 + return 1;
49362 +}
49363 +
49364 +int
49365 +gr_handle_chroot_nice(void)
49366 +{
49367 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
49368 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
49369 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
49370 + return -EPERM;
49371 + }
49372 +#endif
49373 + return 0;
49374 +}
49375 +
49376 +int
49377 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
49378 +{
49379 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
49380 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
49381 + && proc_is_chrooted(current)) {
49382 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
49383 + return -EACCES;
49384 + }
49385 +#endif
49386 + return 0;
49387 +}
49388 +
49389 +int
49390 +gr_handle_chroot_rawio(const struct inode *inode)
49391 +{
49392 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
49393 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
49394 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
49395 + return 1;
49396 +#endif
49397 + return 0;
49398 +}
49399 +
49400 +int
49401 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
49402 +{
49403 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
49404 + struct task_struct *p;
49405 + int ret = 0;
49406 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
49407 + return ret;
49408 +
49409 + read_lock(&tasklist_lock);
49410 + do_each_pid_task(pid, type, p) {
49411 + if (!have_same_root(current, p)) {
49412 + ret = 1;
49413 + goto out;
49414 + }
49415 + } while_each_pid_task(pid, type, p);
49416 +out:
49417 + read_unlock(&tasklist_lock);
49418 + return ret;
49419 +#endif
49420 + return 0;
49421 +}
49422 +
49423 +int
49424 +gr_pid_is_chrooted(struct task_struct *p)
49425 +{
49426 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
49427 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
49428 + return 0;
49429 +
49430 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
49431 + !have_same_root(current, p)) {
49432 + return 1;
49433 + }
49434 +#endif
49435 + return 0;
49436 +}
49437 +
49438 +EXPORT_SYMBOL(gr_pid_is_chrooted);
49439 +
49440 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
49441 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
49442 +{
49443 + struct dentry *dentry = (struct dentry *)u_dentry;
49444 + struct vfsmount *mnt = (struct vfsmount *)u_mnt;
49445 + struct dentry *realroot;
49446 + struct vfsmount *realrootmnt;
49447 + struct dentry *currentroot;
49448 + struct vfsmount *currentmnt;
49449 + struct task_struct *reaper = &init_task;
49450 + int ret = 1;
49451 +
49452 + read_lock(&reaper->fs->lock);
49453 + realrootmnt = mntget(reaper->fs->root.mnt);
49454 + realroot = dget(reaper->fs->root.dentry);
49455 + read_unlock(&reaper->fs->lock);
49456 +
49457 + read_lock(&current->fs->lock);
49458 + currentmnt = mntget(current->fs->root.mnt);
49459 + currentroot = dget(current->fs->root.dentry);
49460 + read_unlock(&current->fs->lock);
49461 +
49462 + spin_lock(&dcache_lock);
49463 + for (;;) {
49464 + if (unlikely((dentry == realroot && mnt == realrootmnt)
49465 + || (dentry == currentroot && mnt == currentmnt)))
49466 + break;
49467 + if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
49468 + if (mnt->mnt_parent == mnt)
49469 + break;
49470 + dentry = mnt->mnt_mountpoint;
49471 + mnt = mnt->mnt_parent;
49472 + continue;
49473 + }
49474 + dentry = dentry->d_parent;
49475 + }
49476 + spin_unlock(&dcache_lock);
49477 +
49478 + dput(currentroot);
49479 + mntput(currentmnt);
49480 +
49481 + /* access is outside of chroot */
49482 + if (dentry == realroot && mnt == realrootmnt)
49483 + ret = 0;
49484 +
49485 + dput(realroot);
49486 + mntput(realrootmnt);
49487 + return ret;
49488 +}
49489 +#endif
49490 +
49491 +int
49492 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
49493 +{
49494 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
49495 + if (!grsec_enable_chroot_fchdir)
49496 + return 1;
49497 +
49498 + if (!proc_is_chrooted(current))
49499 + return 1;
49500 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
49501 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
49502 + return 0;
49503 + }
49504 +#endif
49505 + return 1;
49506 +}
49507 +
49508 +int
49509 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
49510 + const time_t shm_createtime)
49511 +{
49512 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
49513 + struct pid *pid = NULL;
49514 + time_t starttime;
49515 +
49516 + if (unlikely(!grsec_enable_chroot_shmat))
49517 + return 1;
49518 +
49519 + if (likely(!proc_is_chrooted(current)))
49520 + return 1;
49521 +
49522 + rcu_read_lock();
49523 + read_lock(&tasklist_lock);
49524 +
49525 + pid = find_vpid(shm_cprid);
49526 + if (pid) {
49527 + struct task_struct *p;
49528 + p = pid_task(pid, PIDTYPE_PID);
49529 + if (p == NULL)
49530 + goto unlock;
49531 + starttime = p->start_time.tv_sec;
49532 + if (unlikely(!have_same_root(current, p) &&
49533 + time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime))) {
49534 + read_unlock(&tasklist_lock);
49535 + rcu_read_unlock();
49536 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
49537 + return 0;
49538 + }
49539 + } else {
49540 + pid = find_vpid(shm_lapid);
49541 + if (pid) {
49542 + struct task_struct *p;
49543 + p = pid_task(pid, PIDTYPE_PID);
49544 + if (p == NULL)
49545 + goto unlock;
49546 + if (unlikely(!have_same_root(current, p))) {
49547 + read_unlock(&tasklist_lock);
49548 + rcu_read_unlock();
49549 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
49550 + return 0;
49551 + }
49552 + }
49553 + }
49554 +
49555 +unlock:
49556 + read_unlock(&tasklist_lock);
49557 + rcu_read_unlock();
49558 +#endif
49559 + return 1;
49560 +}
49561 +
49562 +void
49563 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
49564 +{
49565 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
49566 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
49567 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
49568 +#endif
49569 + return;
49570 +}
49571 +
49572 +int
49573 +gr_handle_chroot_mknod(const struct dentry *dentry,
49574 + const struct vfsmount *mnt, const int mode)
49575 +{
49576 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
49577 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
49578 + proc_is_chrooted(current)) {
49579 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
49580 + return -EPERM;
49581 + }
49582 +#endif
49583 + return 0;
49584 +}
49585 +
49586 +int
49587 +gr_handle_chroot_mount(const struct dentry *dentry,
49588 + const struct vfsmount *mnt, const char *dev_name)
49589 +{
49590 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
49591 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
49592 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name, dentry, mnt);
49593 + return -EPERM;
49594 + }
49595 +#endif
49596 + return 0;
49597 +}
49598 +
49599 +int
49600 +gr_handle_chroot_pivot(void)
49601 +{
49602 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
49603 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
49604 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
49605 + return -EPERM;
49606 + }
49607 +#endif
49608 + return 0;
49609 +}
49610 +
49611 +int
49612 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
49613 +{
49614 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
49615 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
49616 + !gr_is_outside_chroot(dentry, mnt)) {
49617 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
49618 + return -EPERM;
49619 + }
49620 +#endif
49621 + return 0;
49622 +}
49623 +
49624 +int
49625 +gr_handle_chroot_caps(struct path *path)
49626 +{
49627 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
49628 + if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
49629 + (init_task.fs->root.dentry != path->dentry) &&
49630 + (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
49631 +
49632 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
49633 + const struct cred *old = current_cred();
49634 + struct cred *new = prepare_creds();
49635 + if (new == NULL)
49636 + return 1;
49637 +
49638 + new->cap_permitted = cap_drop(old->cap_permitted,
49639 + chroot_caps);
49640 + new->cap_inheritable = cap_drop(old->cap_inheritable,
49641 + chroot_caps);
49642 + new->cap_effective = cap_drop(old->cap_effective,
49643 + chroot_caps);
49644 +
49645 + commit_creds(new);
49646 +
49647 + return 0;
49648 + }
49649 +#endif
49650 + return 0;
49651 +}
49652 +
49653 +int
49654 +gr_handle_chroot_sysctl(const int op)
49655 +{
49656 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
49657 + if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
49658 + && (op & MAY_WRITE))
49659 + return -EACCES;
49660 +#endif
49661 + return 0;
49662 +}
49663 +
49664 +void
49665 +gr_handle_chroot_chdir(struct path *path)
49666 +{
49667 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
49668 + if (grsec_enable_chroot_chdir)
49669 + set_fs_pwd(current->fs, path);
49670 +#endif
49671 + return;
49672 +}
49673 +
49674 +int
49675 +gr_handle_chroot_chmod(const struct dentry *dentry,
49676 + const struct vfsmount *mnt, const int mode)
49677 +{
49678 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
49679 + /* allow chmod +s on directories, but not on files */
49680 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
49681 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
49682 + proc_is_chrooted(current)) {
49683 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
49684 + return -EPERM;
49685 + }
49686 +#endif
49687 + return 0;
49688 +}
49689 +
49690 +#ifdef CONFIG_SECURITY
49691 +EXPORT_SYMBOL(gr_handle_chroot_caps);
49692 +#endif
49693 diff -urNp linux-2.6.32.41/grsecurity/grsec_disabled.c linux-2.6.32.41/grsecurity/grsec_disabled.c
49694 --- linux-2.6.32.41/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
49695 +++ linux-2.6.32.41/grsecurity/grsec_disabled.c 2011-04-17 15:56:46.000000000 -0400
49696 @@ -0,0 +1,447 @@
49697 +#include <linux/kernel.h>
49698 +#include <linux/module.h>
49699 +#include <linux/sched.h>
49700 +#include <linux/file.h>
49701 +#include <linux/fs.h>
49702 +#include <linux/kdev_t.h>
49703 +#include <linux/net.h>
49704 +#include <linux/in.h>
49705 +#include <linux/ip.h>
49706 +#include <linux/skbuff.h>
49707 +#include <linux/sysctl.h>
49708 +
49709 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
49710 +void
49711 +pax_set_initial_flags(struct linux_binprm *bprm)
49712 +{
49713 + return;
49714 +}
49715 +#endif
49716 +
49717 +#ifdef CONFIG_SYSCTL
49718 +__u32
49719 +gr_handle_sysctl(const struct ctl_table * table, const int op)
49720 +{
49721 + return 0;
49722 +}
49723 +#endif
49724 +
49725 +#ifdef CONFIG_TASKSTATS
49726 +int gr_is_taskstats_denied(int pid)
49727 +{
49728 + return 0;
49729 +}
49730 +#endif
49731 +
49732 +int
49733 +gr_acl_is_enabled(void)
49734 +{
49735 + return 0;
49736 +}
49737 +
49738 +int
49739 +gr_handle_rawio(const struct inode *inode)
49740 +{
49741 + return 0;
49742 +}
49743 +
49744 +void
49745 +gr_acl_handle_psacct(struct task_struct *task, const long code)
49746 +{
49747 + return;
49748 +}
49749 +
49750 +int
49751 +gr_handle_ptrace(struct task_struct *task, const long request)
49752 +{
49753 + return 0;
49754 +}
49755 +
49756 +int
49757 +gr_handle_proc_ptrace(struct task_struct *task)
49758 +{
49759 + return 0;
49760 +}
49761 +
49762 +void
49763 +gr_learn_resource(const struct task_struct *task,
49764 + const int res, const unsigned long wanted, const int gt)
49765 +{
49766 + return;
49767 +}
49768 +
49769 +int
49770 +gr_set_acls(const int type)
49771 +{
49772 + return 0;
49773 +}
49774 +
49775 +int
49776 +gr_check_hidden_task(const struct task_struct *tsk)
49777 +{
49778 + return 0;
49779 +}
49780 +
49781 +int
49782 +gr_check_protected_task(const struct task_struct *task)
49783 +{
49784 + return 0;
49785 +}
49786 +
49787 +int
49788 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
49789 +{
49790 + return 0;
49791 +}
49792 +
49793 +void
49794 +gr_copy_label(struct task_struct *tsk)
49795 +{
49796 + return;
49797 +}
49798 +
49799 +void
49800 +gr_set_pax_flags(struct task_struct *task)
49801 +{
49802 + return;
49803 +}
49804 +
49805 +int
49806 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
49807 + const int unsafe_share)
49808 +{
49809 + return 0;
49810 +}
49811 +
49812 +void
49813 +gr_handle_delete(const ino_t ino, const dev_t dev)
49814 +{
49815 + return;
49816 +}
49817 +
49818 +void
49819 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
49820 +{
49821 + return;
49822 +}
49823 +
49824 +void
49825 +gr_handle_crash(struct task_struct *task, const int sig)
49826 +{
49827 + return;
49828 +}
49829 +
49830 +int
49831 +gr_check_crash_exec(const struct file *filp)
49832 +{
49833 + return 0;
49834 +}
49835 +
49836 +int
49837 +gr_check_crash_uid(const uid_t uid)
49838 +{
49839 + return 0;
49840 +}
49841 +
49842 +void
49843 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
49844 + struct dentry *old_dentry,
49845 + struct dentry *new_dentry,
49846 + struct vfsmount *mnt, const __u8 replace)
49847 +{
49848 + return;
49849 +}
49850 +
49851 +int
49852 +gr_search_socket(const int family, const int type, const int protocol)
49853 +{
49854 + return 1;
49855 +}
49856 +
49857 +int
49858 +gr_search_connectbind(const int mode, const struct socket *sock,
49859 + const struct sockaddr_in *addr)
49860 +{
49861 + return 0;
49862 +}
49863 +
49864 +int
49865 +gr_is_capable(const int cap)
49866 +{
49867 + return 1;
49868 +}
49869 +
49870 +int
49871 +gr_is_capable_nolog(const int cap)
49872 +{
49873 + return 1;
49874 +}
49875 +
49876 +void
49877 +gr_handle_alertkill(struct task_struct *task)
49878 +{
49879 + return;
49880 +}
49881 +
49882 +__u32
49883 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
49884 +{
49885 + return 1;
49886 +}
49887 +
49888 +__u32
49889 +gr_acl_handle_hidden_file(const struct dentry * dentry,
49890 + const struct vfsmount * mnt)
49891 +{
49892 + return 1;
49893 +}
49894 +
49895 +__u32
49896 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
49897 + const int fmode)
49898 +{
49899 + return 1;
49900 +}
49901 +
49902 +__u32
49903 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
49904 +{
49905 + return 1;
49906 +}
49907 +
49908 +__u32
49909 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
49910 +{
49911 + return 1;
49912 +}
49913 +
49914 +int
49915 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
49916 + unsigned int *vm_flags)
49917 +{
49918 + return 1;
49919 +}
49920 +
49921 +__u32
49922 +gr_acl_handle_truncate(const struct dentry * dentry,
49923 + const struct vfsmount * mnt)
49924 +{
49925 + return 1;
49926 +}
49927 +
49928 +__u32
49929 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
49930 +{
49931 + return 1;
49932 +}
49933 +
49934 +__u32
49935 +gr_acl_handle_access(const struct dentry * dentry,
49936 + const struct vfsmount * mnt, const int fmode)
49937 +{
49938 + return 1;
49939 +}
49940 +
49941 +__u32
49942 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
49943 + mode_t mode)
49944 +{
49945 + return 1;
49946 +}
49947 +
49948 +__u32
49949 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
49950 + mode_t mode)
49951 +{
49952 + return 1;
49953 +}
49954 +
49955 +__u32
49956 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
49957 +{
49958 + return 1;
49959 +}
49960 +
49961 +__u32
49962 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
49963 +{
49964 + return 1;
49965 +}
49966 +
49967 +void
49968 +grsecurity_init(void)
49969 +{
49970 + return;
49971 +}
49972 +
49973 +__u32
49974 +gr_acl_handle_mknod(const struct dentry * new_dentry,
49975 + const struct dentry * parent_dentry,
49976 + const struct vfsmount * parent_mnt,
49977 + const int mode)
49978 +{
49979 + return 1;
49980 +}
49981 +
49982 +__u32
49983 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
49984 + const struct dentry * parent_dentry,
49985 + const struct vfsmount * parent_mnt)
49986 +{
49987 + return 1;
49988 +}
49989 +
49990 +__u32
49991 +gr_acl_handle_symlink(const struct dentry * new_dentry,
49992 + const struct dentry * parent_dentry,
49993 + const struct vfsmount * parent_mnt, const char *from)
49994 +{
49995 + return 1;
49996 +}
49997 +
49998 +__u32
49999 +gr_acl_handle_link(const struct dentry * new_dentry,
50000 + const struct dentry * parent_dentry,
50001 + const struct vfsmount * parent_mnt,
50002 + const struct dentry * old_dentry,
50003 + const struct vfsmount * old_mnt, const char *to)
50004 +{
50005 + return 1;
50006 +}
50007 +
50008 +int
50009 +gr_acl_handle_rename(const struct dentry *new_dentry,
50010 + const struct dentry *parent_dentry,
50011 + const struct vfsmount *parent_mnt,
50012 + const struct dentry *old_dentry,
50013 + const struct inode *old_parent_inode,
50014 + const struct vfsmount *old_mnt, const char *newname)
50015 +{
50016 + return 0;
50017 +}
50018 +
50019 +int
50020 +gr_acl_handle_filldir(const struct file *file, const char *name,
50021 + const int namelen, const ino_t ino)
50022 +{
50023 + return 1;
50024 +}
50025 +
50026 +int
50027 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
50028 + const time_t shm_createtime, const uid_t cuid, const int shmid)
50029 +{
50030 + return 1;
50031 +}
50032 +
50033 +int
50034 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
50035 +{
50036 + return 0;
50037 +}
50038 +
50039 +int
50040 +gr_search_accept(const struct socket *sock)
50041 +{
50042 + return 0;
50043 +}
50044 +
50045 +int
50046 +gr_search_listen(const struct socket *sock)
50047 +{
50048 + return 0;
50049 +}
50050 +
50051 +int
50052 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
50053 +{
50054 + return 0;
50055 +}
50056 +
50057 +__u32
50058 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
50059 +{
50060 + return 1;
50061 +}
50062 +
50063 +__u32
50064 +gr_acl_handle_creat(const struct dentry * dentry,
50065 + const struct dentry * p_dentry,
50066 + const struct vfsmount * p_mnt, const int fmode,
50067 + const int imode)
50068 +{
50069 + return 1;
50070 +}
50071 +
50072 +void
50073 +gr_acl_handle_exit(void)
50074 +{
50075 + return;
50076 +}
50077 +
50078 +int
50079 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
50080 +{
50081 + return 1;
50082 +}
50083 +
50084 +void
50085 +gr_set_role_label(const uid_t uid, const gid_t gid)
50086 +{
50087 + return;
50088 +}
50089 +
50090 +int
50091 +gr_acl_handle_procpidmem(const struct task_struct *task)
50092 +{
50093 + return 0;
50094 +}
50095 +
50096 +int
50097 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
50098 +{
50099 + return 0;
50100 +}
50101 +
50102 +int
50103 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
50104 +{
50105 + return 0;
50106 +}
50107 +
50108 +void
50109 +gr_set_kernel_label(struct task_struct *task)
50110 +{
50111 + return;
50112 +}
50113 +
50114 +int
50115 +gr_check_user_change(int real, int effective, int fs)
50116 +{
50117 + return 0;
50118 +}
50119 +
50120 +int
50121 +gr_check_group_change(int real, int effective, int fs)
50122 +{
50123 + return 0;
50124 +}
50125 +
50126 +int gr_acl_enable_at_secure(void)
50127 +{
50128 + return 0;
50129 +}
50130 +
50131 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
50132 +{
50133 + return dentry->d_inode->i_sb->s_dev;
50134 +}
50135 +
50136 +EXPORT_SYMBOL(gr_is_capable);
50137 +EXPORT_SYMBOL(gr_is_capable_nolog);
50138 +EXPORT_SYMBOL(gr_learn_resource);
50139 +EXPORT_SYMBOL(gr_set_kernel_label);
50140 +#ifdef CONFIG_SECURITY
50141 +EXPORT_SYMBOL(gr_check_user_change);
50142 +EXPORT_SYMBOL(gr_check_group_change);
50143 +#endif
50144 diff -urNp linux-2.6.32.41/grsecurity/grsec_exec.c linux-2.6.32.41/grsecurity/grsec_exec.c
50145 --- linux-2.6.32.41/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
50146 +++ linux-2.6.32.41/grsecurity/grsec_exec.c 2011-04-17 15:56:46.000000000 -0400
50147 @@ -0,0 +1,148 @@
50148 +#include <linux/kernel.h>
50149 +#include <linux/sched.h>
50150 +#include <linux/file.h>
50151 +#include <linux/binfmts.h>
50152 +#include <linux/smp_lock.h>
50153 +#include <linux/fs.h>
50154 +#include <linux/types.h>
50155 +#include <linux/grdefs.h>
50156 +#include <linux/grinternal.h>
50157 +#include <linux/capability.h>
50158 +#include <linux/compat.h>
50159 +
50160 +#include <asm/uaccess.h>
50161 +
50162 +#ifdef CONFIG_GRKERNSEC_EXECLOG
50163 +static char gr_exec_arg_buf[132];
50164 +static DEFINE_MUTEX(gr_exec_arg_mutex);
50165 +#endif
50166 +
50167 +int
50168 +gr_handle_nproc(void)
50169 +{
50170 +#ifdef CONFIG_GRKERNSEC_EXECVE
50171 + const struct cred *cred = current_cred();
50172 + if (grsec_enable_execve && cred->user &&
50173 + (atomic_read(&cred->user->processes) >
50174 + current->signal->rlim[RLIMIT_NPROC].rlim_cur) &&
50175 + !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) {
50176 + gr_log_noargs(GR_DONT_AUDIT, GR_NPROC_MSG);
50177 + return -EAGAIN;
50178 + }
50179 +#endif
50180 + return 0;
50181 +}
50182 +
50183 +void
50184 +gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
50185 +{
50186 +#ifdef CONFIG_GRKERNSEC_EXECLOG
50187 + char *grarg = gr_exec_arg_buf;
50188 + unsigned int i, x, execlen = 0;
50189 + char c;
50190 +
50191 + if (!((grsec_enable_execlog && grsec_enable_group &&
50192 + in_group_p(grsec_audit_gid))
50193 + || (grsec_enable_execlog && !grsec_enable_group)))
50194 + return;
50195 +
50196 + mutex_lock(&gr_exec_arg_mutex);
50197 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
50198 +
50199 + if (unlikely(argv == NULL))
50200 + goto log;
50201 +
50202 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
50203 + const char __user *p;
50204 + unsigned int len;
50205 +
50206 + if (copy_from_user(&p, argv + i, sizeof(p)))
50207 + goto log;
50208 + if (!p)
50209 + goto log;
50210 + len = strnlen_user(p, 128 - execlen);
50211 + if (len > 128 - execlen)
50212 + len = 128 - execlen;
50213 + else if (len > 0)
50214 + len--;
50215 + if (copy_from_user(grarg + execlen, p, len))
50216 + goto log;
50217 +
50218 + /* rewrite unprintable characters */
50219 + for (x = 0; x < len; x++) {
50220 + c = *(grarg + execlen + x);
50221 + if (c < 32 || c > 126)
50222 + *(grarg + execlen + x) = ' ';
50223 + }
50224 +
50225 + execlen += len;
50226 + *(grarg + execlen) = ' ';
50227 + *(grarg + execlen + 1) = '\0';
50228 + execlen++;
50229 + }
50230 +
50231 + log:
50232 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
50233 + bprm->file->f_path.mnt, grarg);
50234 + mutex_unlock(&gr_exec_arg_mutex);
50235 +#endif
50236 + return;
50237 +}
50238 +
50239 +#ifdef CONFIG_COMPAT
50240 +void
50241 +gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
50242 +{
50243 +#ifdef CONFIG_GRKERNSEC_EXECLOG
50244 + char *grarg = gr_exec_arg_buf;
50245 + unsigned int i, x, execlen = 0;
50246 + char c;
50247 +
50248 + if (!((grsec_enable_execlog && grsec_enable_group &&
50249 + in_group_p(grsec_audit_gid))
50250 + || (grsec_enable_execlog && !grsec_enable_group)))
50251 + return;
50252 +
50253 + mutex_lock(&gr_exec_arg_mutex);
50254 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
50255 +
50256 + if (unlikely(argv == NULL))
50257 + goto log;
50258 +
50259 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
50260 + compat_uptr_t p;
50261 + unsigned int len;
50262 +
50263 + if (get_user(p, argv + i))
50264 + goto log;
50265 + len = strnlen_user(compat_ptr(p), 128 - execlen);
50266 + if (len > 128 - execlen)
50267 + len = 128 - execlen;
50268 + else if (len > 0)
50269 + len--;
50270 + else
50271 + goto log;
50272 + if (copy_from_user(grarg + execlen, compat_ptr(p), len))
50273 + goto log;
50274 +
50275 + /* rewrite unprintable characters */
50276 + for (x = 0; x < len; x++) {
50277 + c = *(grarg + execlen + x);
50278 + if (c < 32 || c > 126)
50279 + *(grarg + execlen + x) = ' ';
50280 + }
50281 +
50282 + execlen += len;
50283 + *(grarg + execlen) = ' ';
50284 + *(grarg + execlen + 1) = '\0';
50285 + execlen++;
50286 + }
50287 +
50288 + log:
50289 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
50290 + bprm->file->f_path.mnt, grarg);
50291 + mutex_unlock(&gr_exec_arg_mutex);
50292 +#endif
50293 + return;
50294 +}
50295 +#endif
50296 diff -urNp linux-2.6.32.41/grsecurity/grsec_fifo.c linux-2.6.32.41/grsecurity/grsec_fifo.c
50297 --- linux-2.6.32.41/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
50298 +++ linux-2.6.32.41/grsecurity/grsec_fifo.c 2011-04-17 15:56:46.000000000 -0400
50299 @@ -0,0 +1,24 @@
50300 +#include <linux/kernel.h>
50301 +#include <linux/sched.h>
50302 +#include <linux/fs.h>
50303 +#include <linux/file.h>
50304 +#include <linux/grinternal.h>
50305 +
50306 +int
50307 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
50308 + const struct dentry *dir, const int flag, const int acc_mode)
50309 +{
50310 +#ifdef CONFIG_GRKERNSEC_FIFO
50311 + const struct cred *cred = current_cred();
50312 +
50313 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
50314 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
50315 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
50316 + (cred->fsuid != dentry->d_inode->i_uid)) {
50317 + if (!inode_permission(dentry->d_inode, acc_mode))
50318 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
50319 + return -EACCES;
50320 + }
50321 +#endif
50322 + return 0;
50323 +}
50324 diff -urNp linux-2.6.32.41/grsecurity/grsec_fork.c linux-2.6.32.41/grsecurity/grsec_fork.c
50325 --- linux-2.6.32.41/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
50326 +++ linux-2.6.32.41/grsecurity/grsec_fork.c 2011-04-17 15:56:46.000000000 -0400
50327 @@ -0,0 +1,23 @@
50328 +#include <linux/kernel.h>
50329 +#include <linux/sched.h>
50330 +#include <linux/grsecurity.h>
50331 +#include <linux/grinternal.h>
50332 +#include <linux/errno.h>
50333 +
50334 +void
50335 +gr_log_forkfail(const int retval)
50336 +{
50337 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
50338 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
50339 + switch (retval) {
50340 + case -EAGAIN:
50341 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
50342 + break;
50343 + case -ENOMEM:
50344 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
50345 + break;
50346 + }
50347 + }
50348 +#endif
50349 + return;
50350 +}
50351 diff -urNp linux-2.6.32.41/grsecurity/grsec_init.c linux-2.6.32.41/grsecurity/grsec_init.c
50352 --- linux-2.6.32.41/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
50353 +++ linux-2.6.32.41/grsecurity/grsec_init.c 2011-04-17 15:56:46.000000000 -0400
50354 @@ -0,0 +1,270 @@
50355 +#include <linux/kernel.h>
50356 +#include <linux/sched.h>
50357 +#include <linux/mm.h>
50358 +#include <linux/smp_lock.h>
50359 +#include <linux/gracl.h>
50360 +#include <linux/slab.h>
50361 +#include <linux/vmalloc.h>
50362 +#include <linux/percpu.h>
50363 +#include <linux/module.h>
50364 +
50365 +int grsec_enable_link;
50366 +int grsec_enable_dmesg;
50367 +int grsec_enable_harden_ptrace;
50368 +int grsec_enable_fifo;
50369 +int grsec_enable_execve;
50370 +int grsec_enable_execlog;
50371 +int grsec_enable_signal;
50372 +int grsec_enable_forkfail;
50373 +int grsec_enable_audit_ptrace;
50374 +int grsec_enable_time;
50375 +int grsec_enable_audit_textrel;
50376 +int grsec_enable_group;
50377 +int grsec_audit_gid;
50378 +int grsec_enable_chdir;
50379 +int grsec_enable_mount;
50380 +int grsec_enable_rofs;
50381 +int grsec_enable_chroot_findtask;
50382 +int grsec_enable_chroot_mount;
50383 +int grsec_enable_chroot_shmat;
50384 +int grsec_enable_chroot_fchdir;
50385 +int grsec_enable_chroot_double;
50386 +int grsec_enable_chroot_pivot;
50387 +int grsec_enable_chroot_chdir;
50388 +int grsec_enable_chroot_chmod;
50389 +int grsec_enable_chroot_mknod;
50390 +int grsec_enable_chroot_nice;
50391 +int grsec_enable_chroot_execlog;
50392 +int grsec_enable_chroot_caps;
50393 +int grsec_enable_chroot_sysctl;
50394 +int grsec_enable_chroot_unix;
50395 +int grsec_enable_tpe;
50396 +int grsec_tpe_gid;
50397 +int grsec_enable_blackhole;
50398 +#ifdef CONFIG_IPV6_MODULE
50399 +EXPORT_SYMBOL(grsec_enable_blackhole);
50400 +#endif
50401 +int grsec_lastack_retries;
50402 +int grsec_enable_tpe_all;
50403 +int grsec_enable_tpe_invert;
50404 +int grsec_enable_socket_all;
50405 +int grsec_socket_all_gid;
50406 +int grsec_enable_socket_client;
50407 +int grsec_socket_client_gid;
50408 +int grsec_enable_socket_server;
50409 +int grsec_socket_server_gid;
50410 +int grsec_resource_logging;
50411 +int grsec_disable_privio;
50412 +int grsec_enable_log_rwxmaps;
50413 +int grsec_lock;
50414 +
50415 +DEFINE_SPINLOCK(grsec_alert_lock);
50416 +unsigned long grsec_alert_wtime = 0;
50417 +unsigned long grsec_alert_fyet = 0;
50418 +
50419 +DEFINE_SPINLOCK(grsec_audit_lock);
50420 +
50421 +DEFINE_RWLOCK(grsec_exec_file_lock);
50422 +
50423 +char *gr_shared_page[4];
50424 +
50425 +char *gr_alert_log_fmt;
50426 +char *gr_audit_log_fmt;
50427 +char *gr_alert_log_buf;
50428 +char *gr_audit_log_buf;
50429 +
50430 +extern struct gr_arg *gr_usermode;
50431 +extern unsigned char *gr_system_salt;
50432 +extern unsigned char *gr_system_sum;
50433 +
50434 +void __init
50435 +grsecurity_init(void)
50436 +{
50437 + int j;
50438 + /* create the per-cpu shared pages */
50439 +
50440 +#ifdef CONFIG_X86
50441 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
50442 +#endif
50443 +
50444 + for (j = 0; j < 4; j++) {
50445 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
50446 + if (gr_shared_page[j] == NULL) {
50447 + panic("Unable to allocate grsecurity shared page");
50448 + return;
50449 + }
50450 + }
50451 +
50452 + /* allocate log buffers */
50453 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
50454 + if (!gr_alert_log_fmt) {
50455 + panic("Unable to allocate grsecurity alert log format buffer");
50456 + return;
50457 + }
50458 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
50459 + if (!gr_audit_log_fmt) {
50460 + panic("Unable to allocate grsecurity audit log format buffer");
50461 + return;
50462 + }
50463 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
50464 + if (!gr_alert_log_buf) {
50465 + panic("Unable to allocate grsecurity alert log buffer");
50466 + return;
50467 + }
50468 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
50469 + if (!gr_audit_log_buf) {
50470 + panic("Unable to allocate grsecurity audit log buffer");
50471 + return;
50472 + }
50473 +
50474 + /* allocate memory for authentication structure */
50475 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
50476 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
50477 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
50478 +
50479 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
50480 + panic("Unable to allocate grsecurity authentication structure");
50481 + return;
50482 + }
50483 +
50484 +
50485 +#ifdef CONFIG_GRKERNSEC_IO
50486 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
50487 + grsec_disable_privio = 1;
50488 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
50489 + grsec_disable_privio = 1;
50490 +#else
50491 + grsec_disable_privio = 0;
50492 +#endif
50493 +#endif
50494 +
50495 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
50496 + /* for backward compatibility, tpe_invert always defaults to on if
50497 + enabled in the kernel
50498 + */
50499 + grsec_enable_tpe_invert = 1;
50500 +#endif
50501 +
50502 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
50503 +#ifndef CONFIG_GRKERNSEC_SYSCTL
50504 + grsec_lock = 1;
50505 +#endif
50506 +
50507 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
50508 + grsec_enable_audit_textrel = 1;
50509 +#endif
50510 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
50511 + grsec_enable_log_rwxmaps = 1;
50512 +#endif
50513 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
50514 + grsec_enable_group = 1;
50515 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
50516 +#endif
50517 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
50518 + grsec_enable_chdir = 1;
50519 +#endif
50520 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
50521 + grsec_enable_harden_ptrace = 1;
50522 +#endif
50523 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
50524 + grsec_enable_mount = 1;
50525 +#endif
50526 +#ifdef CONFIG_GRKERNSEC_LINK
50527 + grsec_enable_link = 1;
50528 +#endif
50529 +#ifdef CONFIG_GRKERNSEC_DMESG
50530 + grsec_enable_dmesg = 1;
50531 +#endif
50532 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
50533 + grsec_enable_blackhole = 1;
50534 + grsec_lastack_retries = 4;
50535 +#endif
50536 +#ifdef CONFIG_GRKERNSEC_FIFO
50537 + grsec_enable_fifo = 1;
50538 +#endif
50539 +#ifdef CONFIG_GRKERNSEC_EXECVE
50540 + grsec_enable_execve = 1;
50541 +#endif
50542 +#ifdef CONFIG_GRKERNSEC_EXECLOG
50543 + grsec_enable_execlog = 1;
50544 +#endif
50545 +#ifdef CONFIG_GRKERNSEC_SIGNAL
50546 + grsec_enable_signal = 1;
50547 +#endif
50548 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
50549 + grsec_enable_forkfail = 1;
50550 +#endif
50551 +#ifdef CONFIG_GRKERNSEC_TIME
50552 + grsec_enable_time = 1;
50553 +#endif
50554 +#ifdef CONFIG_GRKERNSEC_RESLOG
50555 + grsec_resource_logging = 1;
50556 +#endif
50557 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
50558 + grsec_enable_chroot_findtask = 1;
50559 +#endif
50560 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
50561 + grsec_enable_chroot_unix = 1;
50562 +#endif
50563 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
50564 + grsec_enable_chroot_mount = 1;
50565 +#endif
50566 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
50567 + grsec_enable_chroot_fchdir = 1;
50568 +#endif
50569 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
50570 + grsec_enable_chroot_shmat = 1;
50571 +#endif
50572 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
50573 + grsec_enable_audit_ptrace = 1;
50574 +#endif
50575 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
50576 + grsec_enable_chroot_double = 1;
50577 +#endif
50578 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
50579 + grsec_enable_chroot_pivot = 1;
50580 +#endif
50581 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
50582 + grsec_enable_chroot_chdir = 1;
50583 +#endif
50584 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
50585 + grsec_enable_chroot_chmod = 1;
50586 +#endif
50587 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
50588 + grsec_enable_chroot_mknod = 1;
50589 +#endif
50590 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
50591 + grsec_enable_chroot_nice = 1;
50592 +#endif
50593 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
50594 + grsec_enable_chroot_execlog = 1;
50595 +#endif
50596 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50597 + grsec_enable_chroot_caps = 1;
50598 +#endif
50599 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
50600 + grsec_enable_chroot_sysctl = 1;
50601 +#endif
50602 +#ifdef CONFIG_GRKERNSEC_TPE
50603 + grsec_enable_tpe = 1;
50604 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
50605 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
50606 + grsec_enable_tpe_all = 1;
50607 +#endif
50608 +#endif
50609 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
50610 + grsec_enable_socket_all = 1;
50611 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
50612 +#endif
50613 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
50614 + grsec_enable_socket_client = 1;
50615 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
50616 +#endif
50617 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
50618 + grsec_enable_socket_server = 1;
50619 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
50620 +#endif
50621 +#endif
50622 +
50623 + return;
50624 +}
50625 diff -urNp linux-2.6.32.41/grsecurity/grsec_link.c linux-2.6.32.41/grsecurity/grsec_link.c
50626 --- linux-2.6.32.41/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
50627 +++ linux-2.6.32.41/grsecurity/grsec_link.c 2011-04-17 15:56:46.000000000 -0400
50628 @@ -0,0 +1,43 @@
50629 +#include <linux/kernel.h>
50630 +#include <linux/sched.h>
50631 +#include <linux/fs.h>
50632 +#include <linux/file.h>
50633 +#include <linux/grinternal.h>
50634 +
50635 +int
50636 +gr_handle_follow_link(const struct inode *parent,
50637 + const struct inode *inode,
50638 + const struct dentry *dentry, const struct vfsmount *mnt)
50639 +{
50640 +#ifdef CONFIG_GRKERNSEC_LINK
50641 + const struct cred *cred = current_cred();
50642 +
50643 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
50644 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
50645 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
50646 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
50647 + return -EACCES;
50648 + }
50649 +#endif
50650 + return 0;
50651 +}
50652 +
50653 +int
50654 +gr_handle_hardlink(const struct dentry *dentry,
50655 + const struct vfsmount *mnt,
50656 + struct inode *inode, const int mode, const char *to)
50657 +{
50658 +#ifdef CONFIG_GRKERNSEC_LINK
50659 + const struct cred *cred = current_cred();
50660 +
50661 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
50662 + (!S_ISREG(mode) || (mode & S_ISUID) ||
50663 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
50664 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
50665 + !capable(CAP_FOWNER) && cred->uid) {
50666 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
50667 + return -EPERM;
50668 + }
50669 +#endif
50670 + return 0;
50671 +}
50672 diff -urNp linux-2.6.32.41/grsecurity/grsec_log.c linux-2.6.32.41/grsecurity/grsec_log.c
50673 --- linux-2.6.32.41/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
50674 +++ linux-2.6.32.41/grsecurity/grsec_log.c 2011-05-10 21:58:49.000000000 -0400
50675 @@ -0,0 +1,310 @@
50676 +#include <linux/kernel.h>
50677 +#include <linux/sched.h>
50678 +#include <linux/file.h>
50679 +#include <linux/tty.h>
50680 +#include <linux/fs.h>
50681 +#include <linux/grinternal.h>
50682 +
50683 +#ifdef CONFIG_TREE_PREEMPT_RCU
50684 +#define DISABLE_PREEMPT() preempt_disable()
50685 +#define ENABLE_PREEMPT() preempt_enable()
50686 +#else
50687 +#define DISABLE_PREEMPT()
50688 +#define ENABLE_PREEMPT()
50689 +#endif
50690 +
50691 +#define BEGIN_LOCKS(x) \
50692 + DISABLE_PREEMPT(); \
50693 + rcu_read_lock(); \
50694 + read_lock(&tasklist_lock); \
50695 + read_lock(&grsec_exec_file_lock); \
50696 + if (x != GR_DO_AUDIT) \
50697 + spin_lock(&grsec_alert_lock); \
50698 + else \
50699 + spin_lock(&grsec_audit_lock)
50700 +
50701 +#define END_LOCKS(x) \
50702 + if (x != GR_DO_AUDIT) \
50703 + spin_unlock(&grsec_alert_lock); \
50704 + else \
50705 + spin_unlock(&grsec_audit_lock); \
50706 + read_unlock(&grsec_exec_file_lock); \
50707 + read_unlock(&tasklist_lock); \
50708 + rcu_read_unlock(); \
50709 + ENABLE_PREEMPT(); \
50710 + if (x == GR_DONT_AUDIT) \
50711 + gr_handle_alertkill(current)
50712 +
50713 +enum {
50714 + FLOODING,
50715 + NO_FLOODING
50716 +};
50717 +
50718 +extern char *gr_alert_log_fmt;
50719 +extern char *gr_audit_log_fmt;
50720 +extern char *gr_alert_log_buf;
50721 +extern char *gr_audit_log_buf;
50722 +
50723 +static int gr_log_start(int audit)
50724 +{
50725 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
50726 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
50727 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
50728 +
50729 + if (audit == GR_DO_AUDIT)
50730 + goto set_fmt;
50731 +
50732 + if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
50733 + grsec_alert_wtime = jiffies;
50734 + grsec_alert_fyet = 0;
50735 + } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
50736 + grsec_alert_fyet++;
50737 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
50738 + grsec_alert_wtime = jiffies;
50739 + grsec_alert_fyet++;
50740 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
50741 + return FLOODING;
50742 + } else return FLOODING;
50743 +
50744 +set_fmt:
50745 + memset(buf, 0, PAGE_SIZE);
50746 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
50747 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
50748 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
50749 + } else if (current->signal->curr_ip) {
50750 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
50751 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
50752 + } else if (gr_acl_is_enabled()) {
50753 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
50754 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
50755 + } else {
50756 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
50757 + strcpy(buf, fmt);
50758 + }
50759 +
50760 + return NO_FLOODING;
50761 +}
50762 +
50763 +static void gr_log_middle(int audit, const char *msg, va_list ap)
50764 + __attribute__ ((format (printf, 2, 0)));
50765 +
50766 +static void gr_log_middle(int audit, const char *msg, va_list ap)
50767 +{
50768 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
50769 + unsigned int len = strlen(buf);
50770 +
50771 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
50772 +
50773 + return;
50774 +}
50775 +
50776 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
50777 + __attribute__ ((format (printf, 2, 3)));
50778 +
50779 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
50780 +{
50781 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
50782 + unsigned int len = strlen(buf);
50783 + va_list ap;
50784 +
50785 + va_start(ap, msg);
50786 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
50787 + va_end(ap);
50788 +
50789 + return;
50790 +}
50791 +
50792 +static void gr_log_end(int audit)
50793 +{
50794 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
50795 + unsigned int len = strlen(buf);
50796 +
50797 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
50798 + printk("%s\n", buf);
50799 +
50800 + return;
50801 +}
50802 +
50803 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
50804 +{
50805 + int logtype;
50806 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
50807 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
50808 + void *voidptr = NULL;
50809 + int num1 = 0, num2 = 0;
50810 + unsigned long ulong1 = 0, ulong2 = 0;
50811 + struct dentry *dentry = NULL;
50812 + struct vfsmount *mnt = NULL;
50813 + struct file *file = NULL;
50814 + struct task_struct *task = NULL;
50815 + const struct cred *cred, *pcred;
50816 + va_list ap;
50817 +
50818 + BEGIN_LOCKS(audit);
50819 + logtype = gr_log_start(audit);
50820 + if (logtype == FLOODING) {
50821 + END_LOCKS(audit);
50822 + return;
50823 + }
50824 + va_start(ap, argtypes);
50825 + switch (argtypes) {
50826 + case GR_TTYSNIFF:
50827 + task = va_arg(ap, struct task_struct *);
50828 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
50829 + break;
50830 + case GR_SYSCTL_HIDDEN:
50831 + str1 = va_arg(ap, char *);
50832 + gr_log_middle_varargs(audit, msg, result, str1);
50833 + break;
50834 + case GR_RBAC:
50835 + dentry = va_arg(ap, struct dentry *);
50836 + mnt = va_arg(ap, struct vfsmount *);
50837 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
50838 + break;
50839 + case GR_RBAC_STR:
50840 + dentry = va_arg(ap, struct dentry *);
50841 + mnt = va_arg(ap, struct vfsmount *);
50842 + str1 = va_arg(ap, char *);
50843 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
50844 + break;
50845 + case GR_STR_RBAC:
50846 + str1 = va_arg(ap, char *);
50847 + dentry = va_arg(ap, struct dentry *);
50848 + mnt = va_arg(ap, struct vfsmount *);
50849 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
50850 + break;
50851 + case GR_RBAC_MODE2:
50852 + dentry = va_arg(ap, struct dentry *);
50853 + mnt = va_arg(ap, struct vfsmount *);
50854 + str1 = va_arg(ap, char *);
50855 + str2 = va_arg(ap, char *);
50856 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
50857 + break;
50858 + case GR_RBAC_MODE3:
50859 + dentry = va_arg(ap, struct dentry *);
50860 + mnt = va_arg(ap, struct vfsmount *);
50861 + str1 = va_arg(ap, char *);
50862 + str2 = va_arg(ap, char *);
50863 + str3 = va_arg(ap, char *);
50864 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
50865 + break;
50866 + case GR_FILENAME:
50867 + dentry = va_arg(ap, struct dentry *);
50868 + mnt = va_arg(ap, struct vfsmount *);
50869 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
50870 + break;
50871 + case GR_STR_FILENAME:
50872 + str1 = va_arg(ap, char *);
50873 + dentry = va_arg(ap, struct dentry *);
50874 + mnt = va_arg(ap, struct vfsmount *);
50875 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
50876 + break;
50877 + case GR_FILENAME_STR:
50878 + dentry = va_arg(ap, struct dentry *);
50879 + mnt = va_arg(ap, struct vfsmount *);
50880 + str1 = va_arg(ap, char *);
50881 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
50882 + break;
50883 + case GR_FILENAME_TWO_INT:
50884 + dentry = va_arg(ap, struct dentry *);
50885 + mnt = va_arg(ap, struct vfsmount *);
50886 + num1 = va_arg(ap, int);
50887 + num2 = va_arg(ap, int);
50888 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
50889 + break;
50890 + case GR_FILENAME_TWO_INT_STR:
50891 + dentry = va_arg(ap, struct dentry *);
50892 + mnt = va_arg(ap, struct vfsmount *);
50893 + num1 = va_arg(ap, int);
50894 + num2 = va_arg(ap, int);
50895 + str1 = va_arg(ap, char *);
50896 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
50897 + break;
50898 + case GR_TEXTREL:
50899 + file = va_arg(ap, struct file *);
50900 + ulong1 = va_arg(ap, unsigned long);
50901 + ulong2 = va_arg(ap, unsigned long);
50902 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
50903 + break;
50904 + case GR_PTRACE:
50905 + task = va_arg(ap, struct task_struct *);
50906 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
50907 + break;
50908 + case GR_RESOURCE:
50909 + task = va_arg(ap, struct task_struct *);
50910 + cred = __task_cred(task);
50911 + pcred = __task_cred(task->real_parent);
50912 + ulong1 = va_arg(ap, unsigned long);
50913 + str1 = va_arg(ap, char *);
50914 + ulong2 = va_arg(ap, unsigned long);
50915 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
50916 + break;
50917 + case GR_CAP:
50918 + task = va_arg(ap, struct task_struct *);
50919 + cred = __task_cred(task);
50920 + pcred = __task_cred(task->real_parent);
50921 + str1 = va_arg(ap, char *);
50922 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
50923 + break;
50924 + case GR_SIG:
50925 + str1 = va_arg(ap, char *);
50926 + voidptr = va_arg(ap, void *);
50927 + gr_log_middle_varargs(audit, msg, str1, voidptr);
50928 + break;
50929 + case GR_SIG2:
50930 + task = va_arg(ap, struct task_struct *);
50931 + cred = __task_cred(task);
50932 + pcred = __task_cred(task->real_parent);
50933 + num1 = va_arg(ap, int);
50934 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
50935 + break;
50936 + case GR_CRASH1:
50937 + task = va_arg(ap, struct task_struct *);
50938 + cred = __task_cred(task);
50939 + pcred = __task_cred(task->real_parent);
50940 + ulong1 = va_arg(ap, unsigned long);
50941 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
50942 + break;
50943 + case GR_CRASH2:
50944 + task = va_arg(ap, struct task_struct *);
50945 + cred = __task_cred(task);
50946 + pcred = __task_cred(task->real_parent);
50947 + ulong1 = va_arg(ap, unsigned long);
50948 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
50949 + break;
50950 + case GR_RWXMAP:
50951 + file = va_arg(ap, struct file *);
50952 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
50953 + break;
50954 + case GR_PSACCT:
50955 + {
50956 + unsigned int wday, cday;
50957 + __u8 whr, chr;
50958 + __u8 wmin, cmin;
50959 + __u8 wsec, csec;
50960 + char cur_tty[64] = { 0 };
50961 + char parent_tty[64] = { 0 };
50962 +
50963 + task = va_arg(ap, struct task_struct *);
50964 + wday = va_arg(ap, unsigned int);
50965 + cday = va_arg(ap, unsigned int);
50966 + whr = va_arg(ap, int);
50967 + chr = va_arg(ap, int);
50968 + wmin = va_arg(ap, int);
50969 + cmin = va_arg(ap, int);
50970 + wsec = va_arg(ap, int);
50971 + csec = va_arg(ap, int);
50972 + ulong1 = va_arg(ap, unsigned long);
50973 + cred = __task_cred(task);
50974 + pcred = __task_cred(task->real_parent);
50975 +
50976 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
50977 + }
50978 + break;
50979 + default:
50980 + gr_log_middle(audit, msg, ap);
50981 + }
50982 + va_end(ap);
50983 + gr_log_end(audit);
50984 + END_LOCKS(audit);
50985 +}
50986 diff -urNp linux-2.6.32.41/grsecurity/grsec_mem.c linux-2.6.32.41/grsecurity/grsec_mem.c
50987 --- linux-2.6.32.41/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
50988 +++ linux-2.6.32.41/grsecurity/grsec_mem.c 2011-04-17 15:56:46.000000000 -0400
50989 @@ -0,0 +1,33 @@
50990 +#include <linux/kernel.h>
50991 +#include <linux/sched.h>
50992 +#include <linux/mm.h>
50993 +#include <linux/mman.h>
50994 +#include <linux/grinternal.h>
50995 +
50996 +void
50997 +gr_handle_ioperm(void)
50998 +{
50999 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
51000 + return;
51001 +}
51002 +
51003 +void
51004 +gr_handle_iopl(void)
51005 +{
51006 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
51007 + return;
51008 +}
51009 +
51010 +void
51011 +gr_handle_mem_readwrite(u64 from, u64 to)
51012 +{
51013 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
51014 + return;
51015 +}
51016 +
51017 +void
51018 +gr_handle_vm86(void)
51019 +{
51020 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
51021 + return;
51022 +}
51023 diff -urNp linux-2.6.32.41/grsecurity/grsec_mount.c linux-2.6.32.41/grsecurity/grsec_mount.c
51024 --- linux-2.6.32.41/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
51025 +++ linux-2.6.32.41/grsecurity/grsec_mount.c 2011-04-17 15:56:46.000000000 -0400
51026 @@ -0,0 +1,62 @@
51027 +#include <linux/kernel.h>
51028 +#include <linux/sched.h>
51029 +#include <linux/mount.h>
51030 +#include <linux/grsecurity.h>
51031 +#include <linux/grinternal.h>
51032 +
51033 +void
51034 +gr_log_remount(const char *devname, const int retval)
51035 +{
51036 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51037 + if (grsec_enable_mount && (retval >= 0))
51038 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
51039 +#endif
51040 + return;
51041 +}
51042 +
51043 +void
51044 +gr_log_unmount(const char *devname, const int retval)
51045 +{
51046 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51047 + if (grsec_enable_mount && (retval >= 0))
51048 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
51049 +#endif
51050 + return;
51051 +}
51052 +
51053 +void
51054 +gr_log_mount(const char *from, const char *to, const int retval)
51055 +{
51056 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51057 + if (grsec_enable_mount && (retval >= 0))
51058 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from, to);
51059 +#endif
51060 + return;
51061 +}
51062 +
51063 +int
51064 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
51065 +{
51066 +#ifdef CONFIG_GRKERNSEC_ROFS
51067 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
51068 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
51069 + return -EPERM;
51070 + } else
51071 + return 0;
51072 +#endif
51073 + return 0;
51074 +}
51075 +
51076 +int
51077 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
51078 +{
51079 +#ifdef CONFIG_GRKERNSEC_ROFS
51080 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
51081 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
51082 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
51083 + return -EPERM;
51084 + } else
51085 + return 0;
51086 +#endif
51087 + return 0;
51088 +}
51089 diff -urNp linux-2.6.32.41/grsecurity/grsec_pax.c linux-2.6.32.41/grsecurity/grsec_pax.c
51090 --- linux-2.6.32.41/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
51091 +++ linux-2.6.32.41/grsecurity/grsec_pax.c 2011-04-17 15:56:46.000000000 -0400
51092 @@ -0,0 +1,36 @@
51093 +#include <linux/kernel.h>
51094 +#include <linux/sched.h>
51095 +#include <linux/mm.h>
51096 +#include <linux/file.h>
51097 +#include <linux/grinternal.h>
51098 +#include <linux/grsecurity.h>
51099 +
51100 +void
51101 +gr_log_textrel(struct vm_area_struct * vma)
51102 +{
51103 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
51104 + if (grsec_enable_audit_textrel)
51105 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
51106 +#endif
51107 + return;
51108 +}
51109 +
51110 +void
51111 +gr_log_rwxmmap(struct file *file)
51112 +{
51113 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
51114 + if (grsec_enable_log_rwxmaps)
51115 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
51116 +#endif
51117 + return;
51118 +}
51119 +
51120 +void
51121 +gr_log_rwxmprotect(struct file *file)
51122 +{
51123 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
51124 + if (grsec_enable_log_rwxmaps)
51125 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
51126 +#endif
51127 + return;
51128 +}
51129 diff -urNp linux-2.6.32.41/grsecurity/grsec_ptrace.c linux-2.6.32.41/grsecurity/grsec_ptrace.c
51130 --- linux-2.6.32.41/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
51131 +++ linux-2.6.32.41/grsecurity/grsec_ptrace.c 2011-04-17 15:56:46.000000000 -0400
51132 @@ -0,0 +1,14 @@
51133 +#include <linux/kernel.h>
51134 +#include <linux/sched.h>
51135 +#include <linux/grinternal.h>
51136 +#include <linux/grsecurity.h>
51137 +
51138 +void
51139 +gr_audit_ptrace(struct task_struct *task)
51140 +{
51141 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
51142 + if (grsec_enable_audit_ptrace)
51143 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
51144 +#endif
51145 + return;
51146 +}
51147 diff -urNp linux-2.6.32.41/grsecurity/grsec_sig.c linux-2.6.32.41/grsecurity/grsec_sig.c
51148 --- linux-2.6.32.41/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
51149 +++ linux-2.6.32.41/grsecurity/grsec_sig.c 2011-05-17 17:30:04.000000000 -0400
51150 @@ -0,0 +1,202 @@
51151 +#include <linux/kernel.h>
51152 +#include <linux/sched.h>
51153 +#include <linux/delay.h>
51154 +#include <linux/grsecurity.h>
51155 +#include <linux/grinternal.h>
51156 +#include <linux/hardirq.h>
51157 +
51158 +char *signames[] = {
51159 + [SIGSEGV] = "Segmentation fault",
51160 + [SIGILL] = "Illegal instruction",
51161 + [SIGABRT] = "Abort",
51162 + [SIGBUS] = "Invalid alignment/Bus error"
51163 +};
51164 +
51165 +void
51166 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
51167 +{
51168 +#ifdef CONFIG_GRKERNSEC_SIGNAL
51169 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
51170 + (sig == SIGABRT) || (sig == SIGBUS))) {
51171 + if (t->pid == current->pid) {
51172 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
51173 + } else {
51174 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
51175 + }
51176 + }
51177 +#endif
51178 + return;
51179 +}
51180 +
51181 +int
51182 +gr_handle_signal(const struct task_struct *p, const int sig)
51183 +{
51184 +#ifdef CONFIG_GRKERNSEC
51185 + if (current->pid > 1 && gr_check_protected_task(p)) {
51186 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
51187 + return -EPERM;
51188 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
51189 + return -EPERM;
51190 + }
51191 +#endif
51192 + return 0;
51193 +}
51194 +
51195 +#ifdef CONFIG_GRKERNSEC
51196 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
51197 +
51198 +int gr_fake_force_sig(int sig, struct task_struct *t)
51199 +{
51200 + unsigned long int flags;
51201 + int ret, blocked, ignored;
51202 + struct k_sigaction *action;
51203 +
51204 + spin_lock_irqsave(&t->sighand->siglock, flags);
51205 + action = &t->sighand->action[sig-1];
51206 + ignored = action->sa.sa_handler == SIG_IGN;
51207 + blocked = sigismember(&t->blocked, sig);
51208 + if (blocked || ignored) {
51209 + action->sa.sa_handler = SIG_DFL;
51210 + if (blocked) {
51211 + sigdelset(&t->blocked, sig);
51212 + recalc_sigpending_and_wake(t);
51213 + }
51214 + }
51215 + if (action->sa.sa_handler == SIG_DFL)
51216 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
51217 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
51218 +
51219 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
51220 +
51221 + return ret;
51222 +}
51223 +#endif
51224 +
51225 +#ifdef CONFIG_GRKERNSEC_BRUTE
51226 +#define GR_USER_BAN_TIME (15 * 60)
51227 +
51228 +static int __get_dumpable(unsigned long mm_flags)
51229 +{
51230 + int ret;
51231 +
51232 + ret = mm_flags & MMF_DUMPABLE_MASK;
51233 + return (ret >= 2) ? 2 : ret;
51234 +}
51235 +#endif
51236 +
51237 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
51238 +{
51239 +#ifdef CONFIG_GRKERNSEC_BRUTE
51240 + uid_t uid = 0;
51241 +
51242 + rcu_read_lock();
51243 + read_lock(&tasklist_lock);
51244 + read_lock(&grsec_exec_file_lock);
51245 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
51246 + p->real_parent->brute = 1;
51247 + else {
51248 + const struct cred *cred = __task_cred(p), *cred2;
51249 + struct task_struct *tsk, *tsk2;
51250 +
51251 + if (!__get_dumpable(mm_flags) && cred->uid) {
51252 + struct user_struct *user;
51253 +
51254 + uid = cred->uid;
51255 +
51256 + /* this is put upon execution past expiration */
51257 + user = find_user(uid);
51258 + if (user == NULL)
51259 + goto unlock;
51260 + user->banned = 1;
51261 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
51262 + if (user->ban_expires == ~0UL)
51263 + user->ban_expires--;
51264 +
51265 + do_each_thread(tsk2, tsk) {
51266 + cred2 = __task_cred(tsk);
51267 + if (tsk != p && cred2->uid == uid)
51268 + gr_fake_force_sig(SIGKILL, tsk);
51269 + } while_each_thread(tsk2, tsk);
51270 + }
51271 + }
51272 +unlock:
51273 + read_unlock(&grsec_exec_file_lock);
51274 + read_unlock(&tasklist_lock);
51275 + rcu_read_unlock();
51276 +
51277 + if (uid)
51278 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
51279 +#endif
51280 + return;
51281 +}
51282 +
51283 +void gr_handle_brute_check(void)
51284 +{
51285 +#ifdef CONFIG_GRKERNSEC_BRUTE
51286 + if (current->brute)
51287 + msleep(30 * 1000);
51288 +#endif
51289 + return;
51290 +}
51291 +
51292 +void gr_handle_kernel_exploit(void)
51293 +{
51294 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
51295 + const struct cred *cred;
51296 + struct task_struct *tsk, *tsk2;
51297 + struct user_struct *user;
51298 + uid_t uid;
51299 +
51300 + if (in_irq() || in_serving_softirq() || in_nmi())
51301 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
51302 +
51303 + uid = current_uid();
51304 +
51305 + if (uid == 0)
51306 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
51307 + else {
51308 + /* kill all the processes of this user, hold a reference
51309 + to their creds struct, and prevent them from creating
51310 + another process until system reset
51311 + */
51312 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
51313 + /* we intentionally leak this ref */
51314 + user = get_uid(current->cred->user);
51315 + if (user) {
51316 + user->banned = 1;
51317 + user->ban_expires = ~0UL;
51318 + }
51319 +
51320 + read_lock(&tasklist_lock);
51321 + do_each_thread(tsk2, tsk) {
51322 + cred = __task_cred(tsk);
51323 + if (cred->uid == uid)
51324 + gr_fake_force_sig(SIGKILL, tsk);
51325 + } while_each_thread(tsk2, tsk);
51326 + read_unlock(&tasklist_lock);
51327 + }
51328 +#endif
51329 +}
51330 +
51331 +int __gr_process_user_ban(struct user_struct *user)
51332 +{
51333 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
51334 + if (unlikely(user->banned)) {
51335 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
51336 + user->banned = 0;
51337 + user->ban_expires = 0;
51338 + free_uid(user);
51339 + } else
51340 + return -EPERM;
51341 + }
51342 +#endif
51343 + return 0;
51344 +}
51345 +
51346 +int gr_process_user_ban(void)
51347 +{
51348 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
51349 + return __gr_process_user_ban(current->cred->user);
51350 +#endif
51351 + return 0;
51352 +}
51353 diff -urNp linux-2.6.32.41/grsecurity/grsec_sock.c linux-2.6.32.41/grsecurity/grsec_sock.c
51354 --- linux-2.6.32.41/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
51355 +++ linux-2.6.32.41/grsecurity/grsec_sock.c 2011-04-17 15:56:46.000000000 -0400
51356 @@ -0,0 +1,275 @@
51357 +#include <linux/kernel.h>
51358 +#include <linux/module.h>
51359 +#include <linux/sched.h>
51360 +#include <linux/file.h>
51361 +#include <linux/net.h>
51362 +#include <linux/in.h>
51363 +#include <linux/ip.h>
51364 +#include <net/sock.h>
51365 +#include <net/inet_sock.h>
51366 +#include <linux/grsecurity.h>
51367 +#include <linux/grinternal.h>
51368 +#include <linux/gracl.h>
51369 +
51370 +kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
51371 +EXPORT_SYMBOL(gr_cap_rtnetlink);
51372 +
51373 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
51374 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
51375 +
51376 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
51377 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
51378 +
51379 +#ifdef CONFIG_UNIX_MODULE
51380 +EXPORT_SYMBOL(gr_acl_handle_unix);
51381 +EXPORT_SYMBOL(gr_acl_handle_mknod);
51382 +EXPORT_SYMBOL(gr_handle_chroot_unix);
51383 +EXPORT_SYMBOL(gr_handle_create);
51384 +#endif
51385 +
51386 +#ifdef CONFIG_GRKERNSEC
51387 +#define gr_conn_table_size 32749
51388 +struct conn_table_entry {
51389 + struct conn_table_entry *next;
51390 + struct signal_struct *sig;
51391 +};
51392 +
51393 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
51394 +DEFINE_SPINLOCK(gr_conn_table_lock);
51395 +
51396 +extern const char * gr_socktype_to_name(unsigned char type);
51397 +extern const char * gr_proto_to_name(unsigned char proto);
51398 +extern const char * gr_sockfamily_to_name(unsigned char family);
51399 +
51400 +static __inline__ int
51401 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
51402 +{
51403 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
51404 +}
51405 +
51406 +static __inline__ int
51407 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
51408 + __u16 sport, __u16 dport)
51409 +{
51410 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
51411 + sig->gr_sport == sport && sig->gr_dport == dport))
51412 + return 1;
51413 + else
51414 + return 0;
51415 +}
51416 +
51417 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
51418 +{
51419 + struct conn_table_entry **match;
51420 + unsigned int index;
51421 +
51422 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
51423 + sig->gr_sport, sig->gr_dport,
51424 + gr_conn_table_size);
51425 +
51426 + newent->sig = sig;
51427 +
51428 + match = &gr_conn_table[index];
51429 + newent->next = *match;
51430 + *match = newent;
51431 +
51432 + return;
51433 +}
51434 +
51435 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
51436 +{
51437 + struct conn_table_entry *match, *last = NULL;
51438 + unsigned int index;
51439 +
51440 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
51441 + sig->gr_sport, sig->gr_dport,
51442 + gr_conn_table_size);
51443 +
51444 + match = gr_conn_table[index];
51445 + while (match && !conn_match(match->sig,
51446 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
51447 + sig->gr_dport)) {
51448 + last = match;
51449 + match = match->next;
51450 + }
51451 +
51452 + if (match) {
51453 + if (last)
51454 + last->next = match->next;
51455 + else
51456 + gr_conn_table[index] = NULL;
51457 + kfree(match);
51458 + }
51459 +
51460 + return;
51461 +}
51462 +
51463 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
51464 + __u16 sport, __u16 dport)
51465 +{
51466 + struct conn_table_entry *match;
51467 + unsigned int index;
51468 +
51469 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
51470 +
51471 + match = gr_conn_table[index];
51472 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
51473 + match = match->next;
51474 +
51475 + if (match)
51476 + return match->sig;
51477 + else
51478 + return NULL;
51479 +}
51480 +
51481 +#endif
51482 +
51483 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
51484 +{
51485 +#ifdef CONFIG_GRKERNSEC
51486 + struct signal_struct *sig = task->signal;
51487 + struct conn_table_entry *newent;
51488 +
51489 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
51490 + if (newent == NULL)
51491 + return;
51492 + /* no bh lock needed since we are called with bh disabled */
51493 + spin_lock(&gr_conn_table_lock);
51494 + gr_del_task_from_ip_table_nolock(sig);
51495 + sig->gr_saddr = inet->rcv_saddr;
51496 + sig->gr_daddr = inet->daddr;
51497 + sig->gr_sport = inet->sport;
51498 + sig->gr_dport = inet->dport;
51499 + gr_add_to_task_ip_table_nolock(sig, newent);
51500 + spin_unlock(&gr_conn_table_lock);
51501 +#endif
51502 + return;
51503 +}
51504 +
51505 +void gr_del_task_from_ip_table(struct task_struct *task)
51506 +{
51507 +#ifdef CONFIG_GRKERNSEC
51508 + spin_lock_bh(&gr_conn_table_lock);
51509 + gr_del_task_from_ip_table_nolock(task->signal);
51510 + spin_unlock_bh(&gr_conn_table_lock);
51511 +#endif
51512 + return;
51513 +}
51514 +
51515 +void
51516 +gr_attach_curr_ip(const struct sock *sk)
51517 +{
51518 +#ifdef CONFIG_GRKERNSEC
51519 + struct signal_struct *p, *set;
51520 + const struct inet_sock *inet = inet_sk(sk);
51521 +
51522 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
51523 + return;
51524 +
51525 + set = current->signal;
51526 +
51527 + spin_lock_bh(&gr_conn_table_lock);
51528 + p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
51529 + inet->dport, inet->sport);
51530 + if (unlikely(p != NULL)) {
51531 + set->curr_ip = p->curr_ip;
51532 + set->used_accept = 1;
51533 + gr_del_task_from_ip_table_nolock(p);
51534 + spin_unlock_bh(&gr_conn_table_lock);
51535 + return;
51536 + }
51537 + spin_unlock_bh(&gr_conn_table_lock);
51538 +
51539 + set->curr_ip = inet->daddr;
51540 + set->used_accept = 1;
51541 +#endif
51542 + return;
51543 +}
51544 +
51545 +int
51546 +gr_handle_sock_all(const int family, const int type, const int protocol)
51547 +{
51548 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
51549 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
51550 + (family != AF_UNIX)) {
51551 + if (family == AF_INET)
51552 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
51553 + else
51554 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
51555 + return -EACCES;
51556 + }
51557 +#endif
51558 + return 0;
51559 +}
51560 +
51561 +int
51562 +gr_handle_sock_server(const struct sockaddr *sck)
51563 +{
51564 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
51565 + if (grsec_enable_socket_server &&
51566 + in_group_p(grsec_socket_server_gid) &&
51567 + sck && (sck->sa_family != AF_UNIX) &&
51568 + (sck->sa_family != AF_LOCAL)) {
51569 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
51570 + return -EACCES;
51571 + }
51572 +#endif
51573 + return 0;
51574 +}
51575 +
51576 +int
51577 +gr_handle_sock_server_other(const struct sock *sck)
51578 +{
51579 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
51580 + if (grsec_enable_socket_server &&
51581 + in_group_p(grsec_socket_server_gid) &&
51582 + sck && (sck->sk_family != AF_UNIX) &&
51583 + (sck->sk_family != AF_LOCAL)) {
51584 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
51585 + return -EACCES;
51586 + }
51587 +#endif
51588 + return 0;
51589 +}
51590 +
51591 +int
51592 +gr_handle_sock_client(const struct sockaddr *sck)
51593 +{
51594 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
51595 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
51596 + sck && (sck->sa_family != AF_UNIX) &&
51597 + (sck->sa_family != AF_LOCAL)) {
51598 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
51599 + return -EACCES;
51600 + }
51601 +#endif
51602 + return 0;
51603 +}
51604 +
51605 +kernel_cap_t
51606 +gr_cap_rtnetlink(struct sock *sock)
51607 +{
51608 +#ifdef CONFIG_GRKERNSEC
51609 + if (!gr_acl_is_enabled())
51610 + return current_cap();
51611 + else if (sock->sk_protocol == NETLINK_ISCSI &&
51612 + cap_raised(current_cap(), CAP_SYS_ADMIN) &&
51613 + gr_is_capable(CAP_SYS_ADMIN))
51614 + return current_cap();
51615 + else if (sock->sk_protocol == NETLINK_AUDIT &&
51616 + cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
51617 + gr_is_capable(CAP_AUDIT_WRITE) &&
51618 + cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
51619 + gr_is_capable(CAP_AUDIT_CONTROL))
51620 + return current_cap();
51621 + else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
51622 + ((sock->sk_protocol == NETLINK_ROUTE) ?
51623 + gr_is_capable_nolog(CAP_NET_ADMIN) :
51624 + gr_is_capable(CAP_NET_ADMIN)))
51625 + return current_cap();
51626 + else
51627 + return __cap_empty_set;
51628 +#else
51629 + return current_cap();
51630 +#endif
51631 +}
51632 diff -urNp linux-2.6.32.41/grsecurity/grsec_sysctl.c linux-2.6.32.41/grsecurity/grsec_sysctl.c
51633 --- linux-2.6.32.41/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
51634 +++ linux-2.6.32.41/grsecurity/grsec_sysctl.c 2011-04-17 15:56:46.000000000 -0400
51635 @@ -0,0 +1,479 @@
51636 +#include <linux/kernel.h>
51637 +#include <linux/sched.h>
51638 +#include <linux/sysctl.h>
51639 +#include <linux/grsecurity.h>
51640 +#include <linux/grinternal.h>
51641 +
51642 +int
51643 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
51644 +{
51645 +#ifdef CONFIG_GRKERNSEC_SYSCTL
51646 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
51647 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
51648 + return -EACCES;
51649 + }
51650 +#endif
51651 + return 0;
51652 +}
51653 +
51654 +#ifdef CONFIG_GRKERNSEC_ROFS
51655 +static int __maybe_unused one = 1;
51656 +#endif
51657 +
51658 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
51659 +ctl_table grsecurity_table[] = {
51660 +#ifdef CONFIG_GRKERNSEC_SYSCTL
51661 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
51662 +#ifdef CONFIG_GRKERNSEC_IO
51663 + {
51664 + .ctl_name = CTL_UNNUMBERED,
51665 + .procname = "disable_priv_io",
51666 + .data = &grsec_disable_privio,
51667 + .maxlen = sizeof(int),
51668 + .mode = 0600,
51669 + .proc_handler = &proc_dointvec,
51670 + },
51671 +#endif
51672 +#endif
51673 +#ifdef CONFIG_GRKERNSEC_LINK
51674 + {
51675 + .ctl_name = CTL_UNNUMBERED,
51676 + .procname = "linking_restrictions",
51677 + .data = &grsec_enable_link,
51678 + .maxlen = sizeof(int),
51679 + .mode = 0600,
51680 + .proc_handler = &proc_dointvec,
51681 + },
51682 +#endif
51683 +#ifdef CONFIG_GRKERNSEC_FIFO
51684 + {
51685 + .ctl_name = CTL_UNNUMBERED,
51686 + .procname = "fifo_restrictions",
51687 + .data = &grsec_enable_fifo,
51688 + .maxlen = sizeof(int),
51689 + .mode = 0600,
51690 + .proc_handler = &proc_dointvec,
51691 + },
51692 +#endif
51693 +#ifdef CONFIG_GRKERNSEC_EXECVE
51694 + {
51695 + .ctl_name = CTL_UNNUMBERED,
51696 + .procname = "execve_limiting",
51697 + .data = &grsec_enable_execve,
51698 + .maxlen = sizeof(int),
51699 + .mode = 0600,
51700 + .proc_handler = &proc_dointvec,
51701 + },
51702 +#endif
51703 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
51704 + {
51705 + .ctl_name = CTL_UNNUMBERED,
51706 + .procname = "ip_blackhole",
51707 + .data = &grsec_enable_blackhole,
51708 + .maxlen = sizeof(int),
51709 + .mode = 0600,
51710 + .proc_handler = &proc_dointvec,
51711 + },
51712 + {
51713 + .ctl_name = CTL_UNNUMBERED,
51714 + .procname = "lastack_retries",
51715 + .data = &grsec_lastack_retries,
51716 + .maxlen = sizeof(int),
51717 + .mode = 0600,
51718 + .proc_handler = &proc_dointvec,
51719 + },
51720 +#endif
51721 +#ifdef CONFIG_GRKERNSEC_EXECLOG
51722 + {
51723 + .ctl_name = CTL_UNNUMBERED,
51724 + .procname = "exec_logging",
51725 + .data = &grsec_enable_execlog,
51726 + .maxlen = sizeof(int),
51727 + .mode = 0600,
51728 + .proc_handler = &proc_dointvec,
51729 + },
51730 +#endif
51731 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
51732 + {
51733 + .ctl_name = CTL_UNNUMBERED,
51734 + .procname = "rwxmap_logging",
51735 + .data = &grsec_enable_log_rwxmaps,
51736 + .maxlen = sizeof(int),
51737 + .mode = 0600,
51738 + .proc_handler = &proc_dointvec,
51739 + },
51740 +#endif
51741 +#ifdef CONFIG_GRKERNSEC_SIGNAL
51742 + {
51743 + .ctl_name = CTL_UNNUMBERED,
51744 + .procname = "signal_logging",
51745 + .data = &grsec_enable_signal,
51746 + .maxlen = sizeof(int),
51747 + .mode = 0600,
51748 + .proc_handler = &proc_dointvec,
51749 + },
51750 +#endif
51751 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
51752 + {
51753 + .ctl_name = CTL_UNNUMBERED,
51754 + .procname = "forkfail_logging",
51755 + .data = &grsec_enable_forkfail,
51756 + .maxlen = sizeof(int),
51757 + .mode = 0600,
51758 + .proc_handler = &proc_dointvec,
51759 + },
51760 +#endif
51761 +#ifdef CONFIG_GRKERNSEC_TIME
51762 + {
51763 + .ctl_name = CTL_UNNUMBERED,
51764 + .procname = "timechange_logging",
51765 + .data = &grsec_enable_time,
51766 + .maxlen = sizeof(int),
51767 + .mode = 0600,
51768 + .proc_handler = &proc_dointvec,
51769 + },
51770 +#endif
51771 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
51772 + {
51773 + .ctl_name = CTL_UNNUMBERED,
51774 + .procname = "chroot_deny_shmat",
51775 + .data = &grsec_enable_chroot_shmat,
51776 + .maxlen = sizeof(int),
51777 + .mode = 0600,
51778 + .proc_handler = &proc_dointvec,
51779 + },
51780 +#endif
51781 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
51782 + {
51783 + .ctl_name = CTL_UNNUMBERED,
51784 + .procname = "chroot_deny_unix",
51785 + .data = &grsec_enable_chroot_unix,
51786 + .maxlen = sizeof(int),
51787 + .mode = 0600,
51788 + .proc_handler = &proc_dointvec,
51789 + },
51790 +#endif
51791 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
51792 + {
51793 + .ctl_name = CTL_UNNUMBERED,
51794 + .procname = "chroot_deny_mount",
51795 + .data = &grsec_enable_chroot_mount,
51796 + .maxlen = sizeof(int),
51797 + .mode = 0600,
51798 + .proc_handler = &proc_dointvec,
51799 + },
51800 +#endif
51801 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
51802 + {
51803 + .ctl_name = CTL_UNNUMBERED,
51804 + .procname = "chroot_deny_fchdir",
51805 + .data = &grsec_enable_chroot_fchdir,
51806 + .maxlen = sizeof(int),
51807 + .mode = 0600,
51808 + .proc_handler = &proc_dointvec,
51809 + },
51810 +#endif
51811 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
51812 + {
51813 + .ctl_name = CTL_UNNUMBERED,
51814 + .procname = "chroot_deny_chroot",
51815 + .data = &grsec_enable_chroot_double,
51816 + .maxlen = sizeof(int),
51817 + .mode = 0600,
51818 + .proc_handler = &proc_dointvec,
51819 + },
51820 +#endif
51821 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
51822 + {
51823 + .ctl_name = CTL_UNNUMBERED,
51824 + .procname = "chroot_deny_pivot",
51825 + .data = &grsec_enable_chroot_pivot,
51826 + .maxlen = sizeof(int),
51827 + .mode = 0600,
51828 + .proc_handler = &proc_dointvec,
51829 + },
51830 +#endif
51831 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
51832 + {
51833 + .ctl_name = CTL_UNNUMBERED,
51834 + .procname = "chroot_enforce_chdir",
51835 + .data = &grsec_enable_chroot_chdir,
51836 + .maxlen = sizeof(int),
51837 + .mode = 0600,
51838 + .proc_handler = &proc_dointvec,
51839 + },
51840 +#endif
51841 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
51842 + {
51843 + .ctl_name = CTL_UNNUMBERED,
51844 + .procname = "chroot_deny_chmod",
51845 + .data = &grsec_enable_chroot_chmod,
51846 + .maxlen = sizeof(int),
51847 + .mode = 0600,
51848 + .proc_handler = &proc_dointvec,
51849 + },
51850 +#endif
51851 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
51852 + {
51853 + .ctl_name = CTL_UNNUMBERED,
51854 + .procname = "chroot_deny_mknod",
51855 + .data = &grsec_enable_chroot_mknod,
51856 + .maxlen = sizeof(int),
51857 + .mode = 0600,
51858 + .proc_handler = &proc_dointvec,
51859 + },
51860 +#endif
51861 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
51862 + {
51863 + .ctl_name = CTL_UNNUMBERED,
51864 + .procname = "chroot_restrict_nice",
51865 + .data = &grsec_enable_chroot_nice,
51866 + .maxlen = sizeof(int),
51867 + .mode = 0600,
51868 + .proc_handler = &proc_dointvec,
51869 + },
51870 +#endif
51871 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
51872 + {
51873 + .ctl_name = CTL_UNNUMBERED,
51874 + .procname = "chroot_execlog",
51875 + .data = &grsec_enable_chroot_execlog,
51876 + .maxlen = sizeof(int),
51877 + .mode = 0600,
51878 + .proc_handler = &proc_dointvec,
51879 + },
51880 +#endif
51881 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
51882 + {
51883 + .ctl_name = CTL_UNNUMBERED,
51884 + .procname = "chroot_caps",
51885 + .data = &grsec_enable_chroot_caps,
51886 + .maxlen = sizeof(int),
51887 + .mode = 0600,
51888 + .proc_handler = &proc_dointvec,
51889 + },
51890 +#endif
51891 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
51892 + {
51893 + .ctl_name = CTL_UNNUMBERED,
51894 + .procname = "chroot_deny_sysctl",
51895 + .data = &grsec_enable_chroot_sysctl,
51896 + .maxlen = sizeof(int),
51897 + .mode = 0600,
51898 + .proc_handler = &proc_dointvec,
51899 + },
51900 +#endif
51901 +#ifdef CONFIG_GRKERNSEC_TPE
51902 + {
51903 + .ctl_name = CTL_UNNUMBERED,
51904 + .procname = "tpe",
51905 + .data = &grsec_enable_tpe,
51906 + .maxlen = sizeof(int),
51907 + .mode = 0600,
51908 + .proc_handler = &proc_dointvec,
51909 + },
51910 + {
51911 + .ctl_name = CTL_UNNUMBERED,
51912 + .procname = "tpe_gid",
51913 + .data = &grsec_tpe_gid,
51914 + .maxlen = sizeof(int),
51915 + .mode = 0600,
51916 + .proc_handler = &proc_dointvec,
51917 + },
51918 +#endif
51919 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
51920 + {
51921 + .ctl_name = CTL_UNNUMBERED,
51922 + .procname = "tpe_invert",
51923 + .data = &grsec_enable_tpe_invert,
51924 + .maxlen = sizeof(int),
51925 + .mode = 0600,
51926 + .proc_handler = &proc_dointvec,
51927 + },
51928 +#endif
51929 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
51930 + {
51931 + .ctl_name = CTL_UNNUMBERED,
51932 + .procname = "tpe_restrict_all",
51933 + .data = &grsec_enable_tpe_all,
51934 + .maxlen = sizeof(int),
51935 + .mode = 0600,
51936 + .proc_handler = &proc_dointvec,
51937 + },
51938 +#endif
51939 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
51940 + {
51941 + .ctl_name = CTL_UNNUMBERED,
51942 + .procname = "socket_all",
51943 + .data = &grsec_enable_socket_all,
51944 + .maxlen = sizeof(int),
51945 + .mode = 0600,
51946 + .proc_handler = &proc_dointvec,
51947 + },
51948 + {
51949 + .ctl_name = CTL_UNNUMBERED,
51950 + .procname = "socket_all_gid",
51951 + .data = &grsec_socket_all_gid,
51952 + .maxlen = sizeof(int),
51953 + .mode = 0600,
51954 + .proc_handler = &proc_dointvec,
51955 + },
51956 +#endif
51957 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
51958 + {
51959 + .ctl_name = CTL_UNNUMBERED,
51960 + .procname = "socket_client",
51961 + .data = &grsec_enable_socket_client,
51962 + .maxlen = sizeof(int),
51963 + .mode = 0600,
51964 + .proc_handler = &proc_dointvec,
51965 + },
51966 + {
51967 + .ctl_name = CTL_UNNUMBERED,
51968 + .procname = "socket_client_gid",
51969 + .data = &grsec_socket_client_gid,
51970 + .maxlen = sizeof(int),
51971 + .mode = 0600,
51972 + .proc_handler = &proc_dointvec,
51973 + },
51974 +#endif
51975 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
51976 + {
51977 + .ctl_name = CTL_UNNUMBERED,
51978 + .procname = "socket_server",
51979 + .data = &grsec_enable_socket_server,
51980 + .maxlen = sizeof(int),
51981 + .mode = 0600,
51982 + .proc_handler = &proc_dointvec,
51983 + },
51984 + {
51985 + .ctl_name = CTL_UNNUMBERED,
51986 + .procname = "socket_server_gid",
51987 + .data = &grsec_socket_server_gid,
51988 + .maxlen = sizeof(int),
51989 + .mode = 0600,
51990 + .proc_handler = &proc_dointvec,
51991 + },
51992 +#endif
51993 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
51994 + {
51995 + .ctl_name = CTL_UNNUMBERED,
51996 + .procname = "audit_group",
51997 + .data = &grsec_enable_group,
51998 + .maxlen = sizeof(int),
51999 + .mode = 0600,
52000 + .proc_handler = &proc_dointvec,
52001 + },
52002 + {
52003 + .ctl_name = CTL_UNNUMBERED,
52004 + .procname = "audit_gid",
52005 + .data = &grsec_audit_gid,
52006 + .maxlen = sizeof(int),
52007 + .mode = 0600,
52008 + .proc_handler = &proc_dointvec,
52009 + },
52010 +#endif
52011 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
52012 + {
52013 + .ctl_name = CTL_UNNUMBERED,
52014 + .procname = "audit_chdir",
52015 + .data = &grsec_enable_chdir,
52016 + .maxlen = sizeof(int),
52017 + .mode = 0600,
52018 + .proc_handler = &proc_dointvec,
52019 + },
52020 +#endif
52021 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
52022 + {
52023 + .ctl_name = CTL_UNNUMBERED,
52024 + .procname = "audit_mount",
52025 + .data = &grsec_enable_mount,
52026 + .maxlen = sizeof(int),
52027 + .mode = 0600,
52028 + .proc_handler = &proc_dointvec,
52029 + },
52030 +#endif
52031 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
52032 + {
52033 + .ctl_name = CTL_UNNUMBERED,
52034 + .procname = "audit_textrel",
52035 + .data = &grsec_enable_audit_textrel,
52036 + .maxlen = sizeof(int),
52037 + .mode = 0600,
52038 + .proc_handler = &proc_dointvec,
52039 + },
52040 +#endif
52041 +#ifdef CONFIG_GRKERNSEC_DMESG
52042 + {
52043 + .ctl_name = CTL_UNNUMBERED,
52044 + .procname = "dmesg",
52045 + .data = &grsec_enable_dmesg,
52046 + .maxlen = sizeof(int),
52047 + .mode = 0600,
52048 + .proc_handler = &proc_dointvec,
52049 + },
52050 +#endif
52051 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52052 + {
52053 + .ctl_name = CTL_UNNUMBERED,
52054 + .procname = "chroot_findtask",
52055 + .data = &grsec_enable_chroot_findtask,
52056 + .maxlen = sizeof(int),
52057 + .mode = 0600,
52058 + .proc_handler = &proc_dointvec,
52059 + },
52060 +#endif
52061 +#ifdef CONFIG_GRKERNSEC_RESLOG
52062 + {
52063 + .ctl_name = CTL_UNNUMBERED,
52064 + .procname = "resource_logging",
52065 + .data = &grsec_resource_logging,
52066 + .maxlen = sizeof(int),
52067 + .mode = 0600,
52068 + .proc_handler = &proc_dointvec,
52069 + },
52070 +#endif
52071 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
52072 + {
52073 + .ctl_name = CTL_UNNUMBERED,
52074 + .procname = "audit_ptrace",
52075 + .data = &grsec_enable_audit_ptrace,
52076 + .maxlen = sizeof(int),
52077 + .mode = 0600,
52078 + .proc_handler = &proc_dointvec,
52079 + },
52080 +#endif
52081 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
52082 + {
52083 + .ctl_name = CTL_UNNUMBERED,
52084 + .procname = "harden_ptrace",
52085 + .data = &grsec_enable_harden_ptrace,
52086 + .maxlen = sizeof(int),
52087 + .mode = 0600,
52088 + .proc_handler = &proc_dointvec,
52089 + },
52090 +#endif
52091 + {
52092 + .ctl_name = CTL_UNNUMBERED,
52093 + .procname = "grsec_lock",
52094 + .data = &grsec_lock,
52095 + .maxlen = sizeof(int),
52096 + .mode = 0600,
52097 + .proc_handler = &proc_dointvec,
52098 + },
52099 +#endif
52100 +#ifdef CONFIG_GRKERNSEC_ROFS
52101 + {
52102 + .ctl_name = CTL_UNNUMBERED,
52103 + .procname = "romount_protect",
52104 + .data = &grsec_enable_rofs,
52105 + .maxlen = sizeof(int),
52106 + .mode = 0600,
52107 + .proc_handler = &proc_dointvec_minmax,
52108 + .extra1 = &one,
52109 + .extra2 = &one,
52110 + },
52111 +#endif
52112 + { .ctl_name = 0 }
52113 +};
52114 +#endif
52115 diff -urNp linux-2.6.32.41/grsecurity/grsec_time.c linux-2.6.32.41/grsecurity/grsec_time.c
52116 --- linux-2.6.32.41/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
52117 +++ linux-2.6.32.41/grsecurity/grsec_time.c 2011-04-17 15:56:46.000000000 -0400
52118 @@ -0,0 +1,16 @@
52119 +#include <linux/kernel.h>
52120 +#include <linux/sched.h>
52121 +#include <linux/grinternal.h>
52122 +#include <linux/module.h>
52123 +
52124 +void
52125 +gr_log_timechange(void)
52126 +{
52127 +#ifdef CONFIG_GRKERNSEC_TIME
52128 + if (grsec_enable_time)
52129 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
52130 +#endif
52131 + return;
52132 +}
52133 +
52134 +EXPORT_SYMBOL(gr_log_timechange);
52135 diff -urNp linux-2.6.32.41/grsecurity/grsec_tpe.c linux-2.6.32.41/grsecurity/grsec_tpe.c
52136 --- linux-2.6.32.41/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
52137 +++ linux-2.6.32.41/grsecurity/grsec_tpe.c 2011-04-17 15:56:46.000000000 -0400
52138 @@ -0,0 +1,39 @@
52139 +#include <linux/kernel.h>
52140 +#include <linux/sched.h>
52141 +#include <linux/file.h>
52142 +#include <linux/fs.h>
52143 +#include <linux/grinternal.h>
52144 +
52145 +extern int gr_acl_tpe_check(void);
52146 +
52147 +int
52148 +gr_tpe_allow(const struct file *file)
52149 +{
52150 +#ifdef CONFIG_GRKERNSEC
52151 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
52152 + const struct cred *cred = current_cred();
52153 +
52154 + if (cred->uid && ((grsec_enable_tpe &&
52155 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
52156 + ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
52157 + (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
52158 +#else
52159 + in_group_p(grsec_tpe_gid)
52160 +#endif
52161 + ) || gr_acl_tpe_check()) &&
52162 + (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
52163 + (inode->i_mode & S_IWOTH))))) {
52164 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
52165 + return 0;
52166 + }
52167 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
52168 + if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
52169 + ((inode->i_uid && (inode->i_uid != cred->uid)) ||
52170 + (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
52171 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
52172 + return 0;
52173 + }
52174 +#endif
52175 +#endif
52176 + return 1;
52177 +}
52178 diff -urNp linux-2.6.32.41/grsecurity/grsum.c linux-2.6.32.41/grsecurity/grsum.c
52179 --- linux-2.6.32.41/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
52180 +++ linux-2.6.32.41/grsecurity/grsum.c 2011-04-17 15:56:46.000000000 -0400
52181 @@ -0,0 +1,61 @@
52182 +#include <linux/err.h>
52183 +#include <linux/kernel.h>
52184 +#include <linux/sched.h>
52185 +#include <linux/mm.h>
52186 +#include <linux/scatterlist.h>
52187 +#include <linux/crypto.h>
52188 +#include <linux/gracl.h>
52189 +
52190 +
52191 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
52192 +#error "crypto and sha256 must be built into the kernel"
52193 +#endif
52194 +
52195 +int
52196 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
52197 +{
52198 + char *p;
52199 + struct crypto_hash *tfm;
52200 + struct hash_desc desc;
52201 + struct scatterlist sg;
52202 + unsigned char temp_sum[GR_SHA_LEN];
52203 + volatile int retval = 0;
52204 + volatile int dummy = 0;
52205 + unsigned int i;
52206 +
52207 + sg_init_table(&sg, 1);
52208 +
52209 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
52210 + if (IS_ERR(tfm)) {
52211 + /* should never happen, since sha256 should be built in */
52212 + return 1;
52213 + }
52214 +
52215 + desc.tfm = tfm;
52216 + desc.flags = 0;
52217 +
52218 + crypto_hash_init(&desc);
52219 +
52220 + p = salt;
52221 + sg_set_buf(&sg, p, GR_SALT_LEN);
52222 + crypto_hash_update(&desc, &sg, sg.length);
52223 +
52224 + p = entry->pw;
52225 + sg_set_buf(&sg, p, strlen(p));
52226 +
52227 + crypto_hash_update(&desc, &sg, sg.length);
52228 +
52229 + crypto_hash_final(&desc, temp_sum);
52230 +
52231 + memset(entry->pw, 0, GR_PW_LEN);
52232 +
52233 + for (i = 0; i < GR_SHA_LEN; i++)
52234 + if (sum[i] != temp_sum[i])
52235 + retval = 1;
52236 + else
52237 + dummy = 1; // waste a cycle
52238 +
52239 + crypto_free_hash(tfm);
52240 +
52241 + return retval;
52242 +}
52243 diff -urNp linux-2.6.32.41/grsecurity/Kconfig linux-2.6.32.41/grsecurity/Kconfig
52244 --- linux-2.6.32.41/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
52245 +++ linux-2.6.32.41/grsecurity/Kconfig 2011-04-17 15:56:46.000000000 -0400
52246 @@ -0,0 +1,1045 @@
52247 +#
52248 +# grecurity configuration
52249 +#
52250 +
52251 +menu "Grsecurity"
52252 +
52253 +config GRKERNSEC
52254 + bool "Grsecurity"
52255 + select CRYPTO
52256 + select CRYPTO_SHA256
52257 + help
52258 + If you say Y here, you will be able to configure many features
52259 + that will enhance the security of your system. It is highly
52260 + recommended that you say Y here and read through the help
52261 + for each option so that you fully understand the features and
52262 + can evaluate their usefulness for your machine.
52263 +
52264 +choice
52265 + prompt "Security Level"
52266 + depends on GRKERNSEC
52267 + default GRKERNSEC_CUSTOM
52268 +
52269 +config GRKERNSEC_LOW
52270 + bool "Low"
52271 + select GRKERNSEC_LINK
52272 + select GRKERNSEC_FIFO
52273 + select GRKERNSEC_EXECVE
52274 + select GRKERNSEC_RANDNET
52275 + select GRKERNSEC_DMESG
52276 + select GRKERNSEC_CHROOT
52277 + select GRKERNSEC_CHROOT_CHDIR
52278 +
52279 + help
52280 + If you choose this option, several of the grsecurity options will
52281 + be enabled that will give you greater protection against a number
52282 + of attacks, while assuring that none of your software will have any
52283 + conflicts with the additional security measures. If you run a lot
52284 + of unusual software, or you are having problems with the higher
52285 + security levels, you should say Y here. With this option, the
52286 + following features are enabled:
52287 +
52288 + - Linking restrictions
52289 + - FIFO restrictions
52290 + - Enforcing RLIMIT_NPROC on execve
52291 + - Restricted dmesg
52292 + - Enforced chdir("/") on chroot
52293 + - Runtime module disabling
52294 +
52295 +config GRKERNSEC_MEDIUM
52296 + bool "Medium"
52297 + select PAX
52298 + select PAX_EI_PAX
52299 + select PAX_PT_PAX_FLAGS
52300 + select PAX_HAVE_ACL_FLAGS
52301 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
52302 + select GRKERNSEC_CHROOT
52303 + select GRKERNSEC_CHROOT_SYSCTL
52304 + select GRKERNSEC_LINK
52305 + select GRKERNSEC_FIFO
52306 + select GRKERNSEC_EXECVE
52307 + select GRKERNSEC_DMESG
52308 + select GRKERNSEC_RANDNET
52309 + select GRKERNSEC_FORKFAIL
52310 + select GRKERNSEC_TIME
52311 + select GRKERNSEC_SIGNAL
52312 + select GRKERNSEC_CHROOT
52313 + select GRKERNSEC_CHROOT_UNIX
52314 + select GRKERNSEC_CHROOT_MOUNT
52315 + select GRKERNSEC_CHROOT_PIVOT
52316 + select GRKERNSEC_CHROOT_DOUBLE
52317 + select GRKERNSEC_CHROOT_CHDIR
52318 + select GRKERNSEC_CHROOT_MKNOD
52319 + select GRKERNSEC_PROC
52320 + select GRKERNSEC_PROC_USERGROUP
52321 + select PAX_RANDUSTACK
52322 + select PAX_ASLR
52323 + select PAX_RANDMMAP
52324 + select PAX_REFCOUNT if (X86 || SPARC64)
52325 + select PAX_USERCOPY if ((X86 || SPARC32 || SPARC64 || PPC) && (SLAB || SLUB || SLOB))
52326 +
52327 + help
52328 + If you say Y here, several features in addition to those included
52329 + in the low additional security level will be enabled. These
52330 + features provide even more security to your system, though in rare
52331 + cases they may be incompatible with very old or poorly written
52332 + software. If you enable this option, make sure that your auth
52333 + service (identd) is running as gid 1001. With this option,
52334 + the following features (in addition to those provided in the
52335 + low additional security level) will be enabled:
52336 +
52337 + - Failed fork logging
52338 + - Time change logging
52339 + - Signal logging
52340 + - Deny mounts in chroot
52341 + - Deny double chrooting
52342 + - Deny sysctl writes in chroot
52343 + - Deny mknod in chroot
52344 + - Deny access to abstract AF_UNIX sockets out of chroot
52345 + - Deny pivot_root in chroot
52346 + - Denied writes of /dev/kmem, /dev/mem, and /dev/port
52347 + - /proc restrictions with special GID set to 10 (usually wheel)
52348 + - Address Space Layout Randomization (ASLR)
52349 + - Prevent exploitation of most refcount overflows
52350 + - Bounds checking of copying between the kernel and userland
52351 +
52352 +config GRKERNSEC_HIGH
52353 + bool "High"
52354 + select GRKERNSEC_LINK
52355 + select GRKERNSEC_FIFO
52356 + select GRKERNSEC_EXECVE
52357 + select GRKERNSEC_DMESG
52358 + select GRKERNSEC_FORKFAIL
52359 + select GRKERNSEC_TIME
52360 + select GRKERNSEC_SIGNAL
52361 + select GRKERNSEC_CHROOT
52362 + select GRKERNSEC_CHROOT_SHMAT
52363 + select GRKERNSEC_CHROOT_UNIX
52364 + select GRKERNSEC_CHROOT_MOUNT
52365 + select GRKERNSEC_CHROOT_FCHDIR
52366 + select GRKERNSEC_CHROOT_PIVOT
52367 + select GRKERNSEC_CHROOT_DOUBLE
52368 + select GRKERNSEC_CHROOT_CHDIR
52369 + select GRKERNSEC_CHROOT_MKNOD
52370 + select GRKERNSEC_CHROOT_CAPS
52371 + select GRKERNSEC_CHROOT_SYSCTL
52372 + select GRKERNSEC_CHROOT_FINDTASK
52373 + select GRKERNSEC_SYSFS_RESTRICT
52374 + select GRKERNSEC_PROC
52375 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
52376 + select GRKERNSEC_HIDESYM
52377 + select GRKERNSEC_BRUTE
52378 + select GRKERNSEC_PROC_USERGROUP
52379 + select GRKERNSEC_KMEM
52380 + select GRKERNSEC_RESLOG
52381 + select GRKERNSEC_RANDNET
52382 + select GRKERNSEC_PROC_ADD
52383 + select GRKERNSEC_CHROOT_CHMOD
52384 + select GRKERNSEC_CHROOT_NICE
52385 + select GRKERNSEC_AUDIT_MOUNT
52386 + select GRKERNSEC_MODHARDEN if (MODULES)
52387 + select GRKERNSEC_HARDEN_PTRACE
52388 + select GRKERNSEC_VM86 if (X86_32)
52389 + select GRKERNSEC_KERN_LOCKOUT if (X86)
52390 + select PAX
52391 + select PAX_RANDUSTACK
52392 + select PAX_ASLR
52393 + select PAX_RANDMMAP
52394 + select PAX_NOEXEC
52395 + select PAX_MPROTECT
52396 + select PAX_EI_PAX
52397 + select PAX_PT_PAX_FLAGS
52398 + select PAX_HAVE_ACL_FLAGS
52399 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
52400 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
52401 + select PAX_RANDKSTACK if (X86_TSC && X86)
52402 + select PAX_SEGMEXEC if (X86_32)
52403 + select PAX_PAGEEXEC
52404 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC32 || SPARC64)
52405 + select PAX_EMUTRAMP if (PARISC)
52406 + select PAX_EMUSIGRT if (PARISC)
52407 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
52408 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
52409 + select PAX_REFCOUNT if (X86 || SPARC64)
52410 + select PAX_USERCOPY if ((X86 || PPC || SPARC32 || SPARC64) && (SLAB || SLUB || SLOB))
52411 + help
52412 + If you say Y here, many of the features of grsecurity will be
52413 + enabled, which will protect you against many kinds of attacks
52414 + against your system. The heightened security comes at a cost
52415 + of an increased chance of incompatibilities with rare software
52416 + on your machine. Since this security level enables PaX, you should
52417 + view <http://pax.grsecurity.net> and read about the PaX
52418 + project. While you are there, download chpax and run it on
52419 + binaries that cause problems with PaX. Also remember that
52420 + since the /proc restrictions are enabled, you must run your
52421 + identd as gid 1001. This security level enables the following
52422 + features in addition to those listed in the low and medium
52423 + security levels:
52424 +
52425 + - Additional /proc restrictions
52426 + - Chmod restrictions in chroot
52427 + - No signals, ptrace, or viewing of processes outside of chroot
52428 + - Capability restrictions in chroot
52429 + - Deny fchdir out of chroot
52430 + - Priority restrictions in chroot
52431 + - Segmentation-based implementation of PaX
52432 + - Mprotect restrictions
52433 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
52434 + - Kernel stack randomization
52435 + - Mount/unmount/remount logging
52436 + - Kernel symbol hiding
52437 + - Prevention of memory exhaustion-based exploits
52438 + - Hardening of module auto-loading
52439 + - Ptrace restrictions
52440 + - Restricted vm86 mode
52441 + - Restricted sysfs/debugfs
52442 + - Active kernel exploit response
52443 +
52444 +config GRKERNSEC_CUSTOM
52445 + bool "Custom"
52446 + help
52447 + If you say Y here, you will be able to configure every grsecurity
52448 + option, which allows you to enable many more features that aren't
52449 + covered in the basic security levels. These additional features
52450 + include TPE, socket restrictions, and the sysctl system for
52451 + grsecurity. It is advised that you read through the help for
52452 + each option to determine its usefulness in your situation.
52453 +
52454 +endchoice
52455 +
52456 +menu "Address Space Protection"
52457 +depends on GRKERNSEC
52458 +
52459 +config GRKERNSEC_KMEM
52460 + bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
52461 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
52462 + help
52463 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
52464 + be written to via mmap or otherwise to modify the running kernel.
52465 + /dev/port will also not be allowed to be opened. If you have module
52466 + support disabled, enabling this will close up four ways that are
52467 + currently used to insert malicious code into the running kernel.
52468 + Even with all these features enabled, we still highly recommend that
52469 + you use the RBAC system, as it is still possible for an attacker to
52470 + modify the running kernel through privileged I/O granted by ioperm/iopl.
52471 + If you are not using XFree86, you may be able to stop this additional
52472 + case by enabling the 'Disable privileged I/O' option. Though nothing
52473 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
52474 + but only to video memory, which is the only writing we allow in this
52475 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
52476 + not be allowed to mprotect it with PROT_WRITE later.
52477 + It is highly recommended that you say Y here if you meet all the
52478 + conditions above.
52479 +
52480 +config GRKERNSEC_VM86
52481 + bool "Restrict VM86 mode"
52482 + depends on X86_32
52483 +
52484 + help
52485 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
52486 + make use of a special execution mode on 32bit x86 processors called
52487 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
52488 + video cards and will still work with this option enabled. The purpose
52489 + of the option is to prevent exploitation of emulation errors in
52490 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
52491 + Nearly all users should be able to enable this option.
52492 +
52493 +config GRKERNSEC_IO
52494 + bool "Disable privileged I/O"
52495 + depends on X86
52496 + select RTC_CLASS
52497 + select RTC_INTF_DEV
52498 + select RTC_DRV_CMOS
52499 +
52500 + help
52501 + If you say Y here, all ioperm and iopl calls will return an error.
52502 + Ioperm and iopl can be used to modify the running kernel.
52503 + Unfortunately, some programs need this access to operate properly,
52504 + the most notable of which are XFree86 and hwclock. hwclock can be
52505 + remedied by having RTC support in the kernel, so real-time
52506 + clock support is enabled if this option is enabled, to ensure
52507 + that hwclock operates correctly. XFree86 still will not
52508 + operate correctly with this option enabled, so DO NOT CHOOSE Y
52509 + IF YOU USE XFree86. If you use XFree86 and you still want to
52510 + protect your kernel against modification, use the RBAC system.
52511 +
52512 +config GRKERNSEC_PROC_MEMMAP
52513 + bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
52514 + default y if (PAX_NOEXEC || PAX_ASLR)
52515 + depends on PAX_NOEXEC || PAX_ASLR
52516 + help
52517 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
52518 + give no information about the addresses of its mappings if
52519 + PaX features that rely on random addresses are enabled on the task.
52520 + If you use PaX it is greatly recommended that you say Y here as it
52521 + closes up a hole that makes the full ASLR useless for suid
52522 + binaries.
52523 +
52524 +config GRKERNSEC_BRUTE
52525 + bool "Deter exploit bruteforcing"
52526 + help
52527 + If you say Y here, attempts to bruteforce exploits against forking
52528 + daemons such as apache or sshd, as well as against suid/sgid binaries
52529 + will be deterred. When a child of a forking daemon is killed by PaX
52530 + or crashes due to an illegal instruction or other suspicious signal,
52531 + the parent process will be delayed 30 seconds upon every subsequent
52532 + fork until the administrator is able to assess the situation and
52533 + restart the daemon.
52534 + In the suid/sgid case, the attempt is logged, the user has all their
52535 + processes terminated, and they are prevented from executing any further
52536 + processes for 15 minutes.
52537 + It is recommended that you also enable signal logging in the auditing
52538 + section so that logs are generated when a process triggers a suspicious
52539 + signal.
52540 +
52541 +config GRKERNSEC_MODHARDEN
52542 + bool "Harden module auto-loading"
52543 + depends on MODULES
52544 + help
52545 + If you say Y here, module auto-loading in response to use of some
52546 + feature implemented by an unloaded module will be restricted to
52547 + root users. Enabling this option helps defend against attacks
52548 + by unprivileged users who abuse the auto-loading behavior to
52549 + cause a vulnerable module to load that is then exploited.
52550 +
52551 + If this option prevents a legitimate use of auto-loading for a
52552 + non-root user, the administrator can execute modprobe manually
52553 + with the exact name of the module mentioned in the alert log.
52554 + Alternatively, the administrator can add the module to the list
52555 + of modules loaded at boot by modifying init scripts.
52556 +
52557 + Modification of init scripts will most likely be needed on
52558 + Ubuntu servers with encrypted home directory support enabled,
52559 + as the first non-root user logging in will cause the ecb(aes),
52560 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
52561 +
52562 +config GRKERNSEC_HIDESYM
52563 + bool "Hide kernel symbols"
52564 + help
52565 + If you say Y here, getting information on loaded modules, and
52566 + displaying all kernel symbols through a syscall will be restricted
52567 + to users with CAP_SYS_MODULE. For software compatibility reasons,
52568 + /proc/kallsyms will be restricted to the root user. The RBAC
52569 + system can hide that entry even from root.
52570 +
52571 + This option also prevents leaking of kernel addresses through
52572 + several /proc entries.
52573 +
52574 + Note that this option is only effective provided the following
52575 + conditions are met:
52576 + 1) The kernel using grsecurity is not precompiled by some distribution
52577 + 2) You have also enabled GRKERNSEC_DMESG
52578 + 3) You are using the RBAC system and hiding other files such as your
52579 + kernel image and System.map. Alternatively, enabling this option
52580 + causes the permissions on /boot, /lib/modules, and the kernel
52581 + source directory to change at compile time to prevent
52582 + reading by non-root users.
52583 + If the above conditions are met, this option will aid in providing a
52584 + useful protection against local kernel exploitation of overflows
52585 + and arbitrary read/write vulnerabilities.
52586 +
52587 +config GRKERNSEC_KERN_LOCKOUT
52588 + bool "Active kernel exploit response"
52589 + depends on X86
52590 + help
52591 + If you say Y here, when a PaX alert is triggered due to suspicious
52592 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
52593 + or an OOPs occurs due to bad memory accesses, instead of just
52594 + terminating the offending process (and potentially allowing
52595 + a subsequent exploit from the same user), we will take one of two
52596 + actions:
52597 + If the user was root, we will panic the system
52598 + If the user was non-root, we will log the attempt, terminate
52599 + all processes owned by the user, then prevent them from creating
52600 + any new processes until the system is restarted
52601 + This deters repeated kernel exploitation/bruteforcing attempts
52602 + and is useful for later forensics.
52603 +
52604 +endmenu
52605 +menu "Role Based Access Control Options"
52606 +depends on GRKERNSEC
52607 +
52608 +config GRKERNSEC_RBAC_DEBUG
52609 + bool
52610 +
52611 +config GRKERNSEC_NO_RBAC
52612 + bool "Disable RBAC system"
52613 + help
52614 + If you say Y here, the /dev/grsec device will be removed from the kernel,
52615 + preventing the RBAC system from being enabled. You should only say Y
52616 + here if you have no intention of using the RBAC system, so as to prevent
52617 + an attacker with root access from misusing the RBAC system to hide files
52618 + and processes when loadable module support and /dev/[k]mem have been
52619 + locked down.
52620 +
52621 +config GRKERNSEC_ACL_HIDEKERN
52622 + bool "Hide kernel processes"
52623 + help
52624 + If you say Y here, all kernel threads will be hidden to all
52625 + processes but those whose subject has the "view hidden processes"
52626 + flag.
52627 +
52628 +config GRKERNSEC_ACL_MAXTRIES
52629 + int "Maximum tries before password lockout"
52630 + default 3
52631 + help
52632 + This option enforces the maximum number of times a user can attempt
52633 + to authorize themselves with the grsecurity RBAC system before being
52634 + denied the ability to attempt authorization again for a specified time.
52635 + The lower the number, the harder it will be to brute-force a password.
52636 +
52637 +config GRKERNSEC_ACL_TIMEOUT
52638 + int "Time to wait after max password tries, in seconds"
52639 + default 30
52640 + help
52641 + This option specifies the time the user must wait after attempting to
52642 + authorize to the RBAC system with the maximum number of invalid
52643 + passwords. The higher the number, the harder it will be to brute-force
52644 + a password.
52645 +
52646 +endmenu
52647 +menu "Filesystem Protections"
52648 +depends on GRKERNSEC
52649 +
52650 +config GRKERNSEC_PROC
52651 + bool "Proc restrictions"
52652 + help
52653 + If you say Y here, the permissions of the /proc filesystem
52654 + will be altered to enhance system security and privacy. You MUST
52655 + choose either a user only restriction or a user and group restriction.
52656 + Depending upon the option you choose, you can either restrict users to
52657 + see only the processes they themselves run, or choose a group that can
52658 + view all processes and files normally restricted to root if you choose
52659 + the "restrict to user only" option. NOTE: If you're running identd as
52660 + a non-root user, you will have to run it as the group you specify here.
52661 +
52662 +config GRKERNSEC_PROC_USER
52663 + bool "Restrict /proc to user only"
52664 + depends on GRKERNSEC_PROC
52665 + help
52666 + If you say Y here, non-root users will only be able to view their own
52667 + processes, and restricts them from viewing network-related information,
52668 + and viewing kernel symbol and module information.
52669 +
52670 +config GRKERNSEC_PROC_USERGROUP
52671 + bool "Allow special group"
52672 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
52673 + help
52674 + If you say Y here, you will be able to select a group that will be
52675 + able to view all processes and network-related information. If you've
52676 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
52677 + remain hidden. This option is useful if you want to run identd as
52678 + a non-root user.
52679 +
52680 +config GRKERNSEC_PROC_GID
52681 + int "GID for special group"
52682 + depends on GRKERNSEC_PROC_USERGROUP
52683 + default 1001
52684 +
52685 +config GRKERNSEC_PROC_ADD
52686 + bool "Additional restrictions"
52687 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
52688 + help
52689 + If you say Y here, additional restrictions will be placed on
52690 + /proc that keep normal users from viewing device information and
52691 + slabinfo information that could be useful for exploits.
52692 +
52693 +config GRKERNSEC_LINK
52694 + bool "Linking restrictions"
52695 + help
52696 + If you say Y here, /tmp race exploits will be prevented, since users
52697 + will no longer be able to follow symlinks owned by other users in
52698 + world-writable +t directories (e.g. /tmp), unless the owner of the
52699 + symlink is the owner of the directory. users will also not be
52700 + able to hardlink to files they do not own. If the sysctl option is
52701 + enabled, a sysctl option with name "linking_restrictions" is created.
52702 +
52703 +config GRKERNSEC_FIFO
52704 + bool "FIFO restrictions"
52705 + help
52706 + If you say Y here, users will not be able to write to FIFOs they don't
52707 + own in world-writable +t directories (e.g. /tmp), unless the owner of
52708 + the FIFO is the same owner of the directory it's held in. If the sysctl
52709 + option is enabled, a sysctl option with name "fifo_restrictions" is
52710 + created.
52711 +
52712 +config GRKERNSEC_SYSFS_RESTRICT
52713 + bool "Sysfs/debugfs restriction"
52714 + depends on SYSFS
52715 + help
52716 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
52717 + any filesystem normally mounted under it (e.g. debugfs) will only
52718 + be accessible by root. These filesystems generally provide access
52719 + to hardware and debug information that isn't appropriate for unprivileged
52720 + users of the system. Sysfs and debugfs have also become a large source
52721 + of new vulnerabilities, ranging from infoleaks to local compromise.
52722 + There has been very little oversight with an eye toward security involved
52723 + in adding new exporters of information to these filesystems, so their
52724 + use is discouraged.
52725 + This option is equivalent to a chmod 0700 of the mount paths.
52726 +
52727 +config GRKERNSEC_ROFS
52728 + bool "Runtime read-only mount protection"
52729 + help
52730 + If you say Y here, a sysctl option with name "romount_protect" will
52731 + be created. By setting this option to 1 at runtime, filesystems
52732 + will be protected in the following ways:
52733 + * No new writable mounts will be allowed
52734 + * Existing read-only mounts won't be able to be remounted read/write
52735 + * Write operations will be denied on all block devices
52736 + This option acts independently of grsec_lock: once it is set to 1,
52737 + it cannot be turned off. Therefore, please be mindful of the resulting
52738 + behavior if this option is enabled in an init script on a read-only
52739 + filesystem. This feature is mainly intended for secure embedded systems.
52740 +
52741 +config GRKERNSEC_CHROOT
52742 + bool "Chroot jail restrictions"
52743 + help
52744 + If you say Y here, you will be able to choose several options that will
52745 + make breaking out of a chrooted jail much more difficult. If you
52746 + encounter no software incompatibilities with the following options, it
52747 + is recommended that you enable each one.
52748 +
52749 +config GRKERNSEC_CHROOT_MOUNT
52750 + bool "Deny mounts"
52751 + depends on GRKERNSEC_CHROOT
52752 + help
52753 + If you say Y here, processes inside a chroot will not be able to
52754 + mount or remount filesystems. If the sysctl option is enabled, a
52755 + sysctl option with name "chroot_deny_mount" is created.
52756 +
52757 +config GRKERNSEC_CHROOT_DOUBLE
52758 + bool "Deny double-chroots"
52759 + depends on GRKERNSEC_CHROOT
52760 + help
52761 + If you say Y here, processes inside a chroot will not be able to chroot
52762 + again outside the chroot. This is a widely used method of breaking
52763 + out of a chroot jail and should not be allowed. If the sysctl
52764 + option is enabled, a sysctl option with name
52765 + "chroot_deny_chroot" is created.
52766 +
52767 +config GRKERNSEC_CHROOT_PIVOT
52768 + bool "Deny pivot_root in chroot"
52769 + depends on GRKERNSEC_CHROOT
52770 + help
52771 + If you say Y here, processes inside a chroot will not be able to use
52772 + a function called pivot_root() that was introduced in Linux 2.3.41. It
52773 + works similar to chroot in that it changes the root filesystem. This
52774 + function could be misused in a chrooted process to attempt to break out
52775 + of the chroot, and therefore should not be allowed. If the sysctl
52776 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
52777 + created.
52778 +
52779 +config GRKERNSEC_CHROOT_CHDIR
52780 + bool "Enforce chdir(\"/\") on all chroots"
52781 + depends on GRKERNSEC_CHROOT
52782 + help
52783 + If you say Y here, the current working directory of all newly-chrooted
52784 + applications will be set to the the root directory of the chroot.
52785 + The man page on chroot(2) states:
52786 + Note that this call does not change the current working
52787 + directory, so that `.' can be outside the tree rooted at
52788 + `/'. In particular, the super-user can escape from a
52789 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
52790 +
52791 + It is recommended that you say Y here, since it's not known to break
52792 + any software. If the sysctl option is enabled, a sysctl option with
52793 + name "chroot_enforce_chdir" is created.
52794 +
52795 +config GRKERNSEC_CHROOT_CHMOD
52796 + bool "Deny (f)chmod +s"
52797 + depends on GRKERNSEC_CHROOT
52798 + help
52799 + If you say Y here, processes inside a chroot will not be able to chmod
52800 + or fchmod files to make them have suid or sgid bits. This protects
52801 + against another published method of breaking a chroot. If the sysctl
52802 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
52803 + created.
52804 +
52805 +config GRKERNSEC_CHROOT_FCHDIR
52806 + bool "Deny fchdir out of chroot"
52807 + depends on GRKERNSEC_CHROOT
52808 + help
52809 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
52810 + to a file descriptor of the chrooting process that points to a directory
52811 + outside the filesystem will be stopped. If the sysctl option
52812 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
52813 +
52814 +config GRKERNSEC_CHROOT_MKNOD
52815 + bool "Deny mknod"
52816 + depends on GRKERNSEC_CHROOT
52817 + help
52818 + If you say Y here, processes inside a chroot will not be allowed to
52819 + mknod. The problem with using mknod inside a chroot is that it
52820 + would allow an attacker to create a device entry that is the same
52821 + as one on the physical root of your system, which could range from
52822 + anything from the console device to a device for your harddrive (which
52823 + they could then use to wipe the drive or steal data). It is recommended
52824 + that you say Y here, unless you run into software incompatibilities.
52825 + If the sysctl option is enabled, a sysctl option with name
52826 + "chroot_deny_mknod" is created.
52827 +
52828 +config GRKERNSEC_CHROOT_SHMAT
52829 + bool "Deny shmat() out of chroot"
52830 + depends on GRKERNSEC_CHROOT
52831 + help
52832 + If you say Y here, processes inside a chroot will not be able to attach
52833 + to shared memory segments that were created outside of the chroot jail.
52834 + It is recommended that you say Y here. If the sysctl option is enabled,
52835 + a sysctl option with name "chroot_deny_shmat" is created.
52836 +
52837 +config GRKERNSEC_CHROOT_UNIX
52838 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
52839 + depends on GRKERNSEC_CHROOT
52840 + help
52841 + If you say Y here, processes inside a chroot will not be able to
52842 + connect to abstract (meaning not belonging to a filesystem) Unix
52843 + domain sockets that were bound outside of a chroot. It is recommended
52844 + that you say Y here. If the sysctl option is enabled, a sysctl option
52845 + with name "chroot_deny_unix" is created.
52846 +
52847 +config GRKERNSEC_CHROOT_FINDTASK
52848 + bool "Protect outside processes"
52849 + depends on GRKERNSEC_CHROOT
52850 + help
52851 + If you say Y here, processes inside a chroot will not be able to
52852 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
52853 + getsid, or view any process outside of the chroot. If the sysctl
52854 + option is enabled, a sysctl option with name "chroot_findtask" is
52855 + created.
52856 +
52857 +config GRKERNSEC_CHROOT_NICE
52858 + bool "Restrict priority changes"
52859 + depends on GRKERNSEC_CHROOT
52860 + help
52861 + If you say Y here, processes inside a chroot will not be able to raise
52862 + the priority of processes in the chroot, or alter the priority of
52863 + processes outside the chroot. This provides more security than simply
52864 + removing CAP_SYS_NICE from the process' capability set. If the
52865 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
52866 + is created.
52867 +
52868 +config GRKERNSEC_CHROOT_SYSCTL
52869 + bool "Deny sysctl writes"
52870 + depends on GRKERNSEC_CHROOT
52871 + help
52872 + If you say Y here, an attacker in a chroot will not be able to
52873 + write to sysctl entries, either by sysctl(2) or through a /proc
52874 + interface. It is strongly recommended that you say Y here. If the
52875 + sysctl option is enabled, a sysctl option with name
52876 + "chroot_deny_sysctl" is created.
52877 +
52878 +config GRKERNSEC_CHROOT_CAPS
52879 + bool "Capability restrictions"
52880 + depends on GRKERNSEC_CHROOT
52881 + help
52882 + If you say Y here, the capabilities on all root processes within a
52883 + chroot jail will be lowered to stop module insertion, raw i/o,
52884 + system and net admin tasks, rebooting the system, modifying immutable
52885 + files, modifying IPC owned by another, and changing the system time.
52886 + This is left an option because it can break some apps. Disable this
52887 + if your chrooted apps are having problems performing those kinds of
52888 + tasks. If the sysctl option is enabled, a sysctl option with
52889 + name "chroot_caps" is created.
52890 +
52891 +endmenu
52892 +menu "Kernel Auditing"
52893 +depends on GRKERNSEC
52894 +
52895 +config GRKERNSEC_AUDIT_GROUP
52896 + bool "Single group for auditing"
52897 + help
52898 + If you say Y here, the exec, chdir, and (un)mount logging features
52899 + will only operate on a group you specify. This option is recommended
52900 + if you only want to watch certain users instead of having a large
52901 + amount of logs from the entire system. If the sysctl option is enabled,
52902 + a sysctl option with name "audit_group" is created.
52903 +
52904 +config GRKERNSEC_AUDIT_GID
52905 + int "GID for auditing"
52906 + depends on GRKERNSEC_AUDIT_GROUP
52907 + default 1007
52908 +
52909 +config GRKERNSEC_EXECLOG
52910 + bool "Exec logging"
52911 + help
52912 + If you say Y here, all execve() calls will be logged (since the
52913 + other exec*() calls are frontends to execve(), all execution
52914 + will be logged). Useful for shell-servers that like to keep track
52915 + of their users. If the sysctl option is enabled, a sysctl option with
52916 + name "exec_logging" is created.
52917 + WARNING: This option when enabled will produce a LOT of logs, especially
52918 + on an active system.
52919 +
52920 +config GRKERNSEC_RESLOG
52921 + bool "Resource logging"
52922 + help
52923 + If you say Y here, all attempts to overstep resource limits will
52924 + be logged with the resource name, the requested size, and the current
52925 + limit. It is highly recommended that you say Y here. If the sysctl
52926 + option is enabled, a sysctl option with name "resource_logging" is
52927 + created. If the RBAC system is enabled, the sysctl value is ignored.
52928 +
52929 +config GRKERNSEC_CHROOT_EXECLOG
52930 + bool "Log execs within chroot"
52931 + help
52932 + If you say Y here, all executions inside a chroot jail will be logged
52933 + to syslog. This can cause a large amount of logs if certain
52934 + applications (eg. djb's daemontools) are installed on the system, and
52935 + is therefore left as an option. If the sysctl option is enabled, a
52936 + sysctl option with name "chroot_execlog" is created.
52937 +
52938 +config GRKERNSEC_AUDIT_PTRACE
52939 + bool "Ptrace logging"
52940 + help
52941 + If you say Y here, all attempts to attach to a process via ptrace
52942 + will be logged. If the sysctl option is enabled, a sysctl option
52943 + with name "audit_ptrace" is created.
52944 +
52945 +config GRKERNSEC_AUDIT_CHDIR
52946 + bool "Chdir logging"
52947 + help
52948 + If you say Y here, all chdir() calls will be logged. If the sysctl
52949 + option is enabled, a sysctl option with name "audit_chdir" is created.
52950 +
52951 +config GRKERNSEC_AUDIT_MOUNT
52952 + bool "(Un)Mount logging"
52953 + help
52954 + If you say Y here, all mounts and unmounts will be logged. If the
52955 + sysctl option is enabled, a sysctl option with name "audit_mount" is
52956 + created.
52957 +
52958 +config GRKERNSEC_SIGNAL
52959 + bool "Signal logging"
52960 + help
52961 + If you say Y here, certain important signals will be logged, such as
52962 + SIGSEGV, which will as a result inform you of when a error in a program
52963 + occurred, which in some cases could mean a possible exploit attempt.
52964 + If the sysctl option is enabled, a sysctl option with name
52965 + "signal_logging" is created.
52966 +
52967 +config GRKERNSEC_FORKFAIL
52968 + bool "Fork failure logging"
52969 + help
52970 + If you say Y here, all failed fork() attempts will be logged.
52971 + This could suggest a fork bomb, or someone attempting to overstep
52972 + their process limit. If the sysctl option is enabled, a sysctl option
52973 + with name "forkfail_logging" is created.
52974 +
52975 +config GRKERNSEC_TIME
52976 + bool "Time change logging"
52977 + help
52978 + If you say Y here, any changes of the system clock will be logged.
52979 + If the sysctl option is enabled, a sysctl option with name
52980 + "timechange_logging" is created.
52981 +
52982 +config GRKERNSEC_PROC_IPADDR
52983 + bool "/proc/<pid>/ipaddr support"
52984 + help
52985 + If you say Y here, a new entry will be added to each /proc/<pid>
52986 + directory that contains the IP address of the person using the task.
52987 + The IP is carried across local TCP and AF_UNIX stream sockets.
52988 + This information can be useful for IDS/IPSes to perform remote response
52989 + to a local attack. The entry is readable by only the owner of the
52990 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
52991 + the RBAC system), and thus does not create privacy concerns.
52992 +
52993 +config GRKERNSEC_RWXMAP_LOG
52994 + bool 'Denied RWX mmap/mprotect logging'
52995 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
52996 + help
52997 + If you say Y here, calls to mmap() and mprotect() with explicit
52998 + usage of PROT_WRITE and PROT_EXEC together will be logged when
52999 + denied by the PAX_MPROTECT feature. If the sysctl option is
53000 + enabled, a sysctl option with name "rwxmap_logging" is created.
53001 +
53002 +config GRKERNSEC_AUDIT_TEXTREL
53003 + bool 'ELF text relocations logging (READ HELP)'
53004 + depends on PAX_MPROTECT
53005 + help
53006 + If you say Y here, text relocations will be logged with the filename
53007 + of the offending library or binary. The purpose of the feature is
53008 + to help Linux distribution developers get rid of libraries and
53009 + binaries that need text relocations which hinder the future progress
53010 + of PaX. Only Linux distribution developers should say Y here, and
53011 + never on a production machine, as this option creates an information
53012 + leak that could aid an attacker in defeating the randomization of
53013 + a single memory region. If the sysctl option is enabled, a sysctl
53014 + option with name "audit_textrel" is created.
53015 +
53016 +endmenu
53017 +
53018 +menu "Executable Protections"
53019 +depends on GRKERNSEC
53020 +
53021 +config GRKERNSEC_EXECVE
53022 + bool "Enforce RLIMIT_NPROC on execs"
53023 + help
53024 + If you say Y here, users with a resource limit on processes will
53025 + have the value checked during execve() calls. The current system
53026 + only checks the system limit during fork() calls. If the sysctl option
53027 + is enabled, a sysctl option with name "execve_limiting" is created.
53028 +
53029 +config GRKERNSEC_DMESG
53030 + bool "Dmesg(8) restriction"
53031 + help
53032 + If you say Y here, non-root users will not be able to use dmesg(8)
53033 + to view up to the last 4kb of messages in the kernel's log buffer.
53034 + The kernel's log buffer often contains kernel addresses and other
53035 + identifying information useful to an attacker in fingerprinting a
53036 + system for a targeted exploit.
53037 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
53038 + created.
53039 +
53040 +config GRKERNSEC_HARDEN_PTRACE
53041 + bool "Deter ptrace-based process snooping"
53042 + help
53043 + If you say Y here, TTY sniffers and other malicious monitoring
53044 + programs implemented through ptrace will be defeated. If you
53045 + have been using the RBAC system, this option has already been
53046 + enabled for several years for all users, with the ability to make
53047 + fine-grained exceptions.
53048 +
53049 + This option only affects the ability of non-root users to ptrace
53050 + processes that are not a descendent of the ptracing process.
53051 + This means that strace ./binary and gdb ./binary will still work,
53052 + but attaching to arbitrary processes will not. If the sysctl
53053 + option is enabled, a sysctl option with name "harden_ptrace" is
53054 + created.
53055 +
53056 +config GRKERNSEC_TPE
53057 + bool "Trusted Path Execution (TPE)"
53058 + help
53059 + If you say Y here, you will be able to choose a gid to add to the
53060 + supplementary groups of users you want to mark as "untrusted."
53061 + These users will not be able to execute any files that are not in
53062 + root-owned directories writable only by root. If the sysctl option
53063 + is enabled, a sysctl option with name "tpe" is created.
53064 +
53065 +config GRKERNSEC_TPE_ALL
53066 + bool "Partially restrict all non-root users"
53067 + depends on GRKERNSEC_TPE
53068 + help
53069 + If you say Y here, all non-root users will be covered under
53070 + a weaker TPE restriction. This is separate from, and in addition to,
53071 + the main TPE options that you have selected elsewhere. Thus, if a
53072 + "trusted" GID is chosen, this restriction applies to even that GID.
53073 + Under this restriction, all non-root users will only be allowed to
53074 + execute files in directories they own that are not group or
53075 + world-writable, or in directories owned by root and writable only by
53076 + root. If the sysctl option is enabled, a sysctl option with name
53077 + "tpe_restrict_all" is created.
53078 +
53079 +config GRKERNSEC_TPE_INVERT
53080 + bool "Invert GID option"
53081 + depends on GRKERNSEC_TPE
53082 + help
53083 + If you say Y here, the group you specify in the TPE configuration will
53084 + decide what group TPE restrictions will be *disabled* for. This
53085 + option is useful if you want TPE restrictions to be applied to most
53086 + users on the system. If the sysctl option is enabled, a sysctl option
53087 + with name "tpe_invert" is created. Unlike other sysctl options, this
53088 + entry will default to on for backward-compatibility.
53089 +
53090 +config GRKERNSEC_TPE_GID
53091 + int "GID for untrusted users"
53092 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
53093 + default 1005
53094 + help
53095 + Setting this GID determines what group TPE restrictions will be
53096 + *enabled* for. If the sysctl option is enabled, a sysctl option
53097 + with name "tpe_gid" is created.
53098 +
53099 +config GRKERNSEC_TPE_GID
53100 + int "GID for trusted users"
53101 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
53102 + default 1005
53103 + help
53104 + Setting this GID determines what group TPE restrictions will be
53105 + *disabled* for. If the sysctl option is enabled, a sysctl option
53106 + with name "tpe_gid" is created.
53107 +
53108 +endmenu
53109 +menu "Network Protections"
53110 +depends on GRKERNSEC
53111 +
53112 +config GRKERNSEC_RANDNET
53113 + bool "Larger entropy pools"
53114 + help
53115 + If you say Y here, the entropy pools used for many features of Linux
53116 + and grsecurity will be doubled in size. Since several grsecurity
53117 + features use additional randomness, it is recommended that you say Y
53118 + here. Saying Y here has a similar effect as modifying
53119 + /proc/sys/kernel/random/poolsize.
53120 +
53121 +config GRKERNSEC_BLACKHOLE
53122 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
53123 + help
53124 + If you say Y here, neither TCP resets nor ICMP
53125 + destination-unreachable packets will be sent in response to packets
53126 + sent to ports for which no associated listening process exists.
53127 + This feature supports both IPV4 and IPV6 and exempts the
53128 + loopback interface from blackholing. Enabling this feature
53129 + makes a host more resilient to DoS attacks and reduces network
53130 + visibility against scanners.
53131 +
53132 + The blackhole feature as-implemented is equivalent to the FreeBSD
53133 + blackhole feature, as it prevents RST responses to all packets, not
53134 + just SYNs. Under most application behavior this causes no
53135 + problems, but applications (like haproxy) may not close certain
53136 + connections in a way that cleanly terminates them on the remote
53137 + end, leaving the remote host in LAST_ACK state. Because of this
53138 + side-effect and to prevent intentional LAST_ACK DoSes, this
53139 + feature also adds automatic mitigation against such attacks.
53140 + The mitigation drastically reduces the amount of time a socket
53141 + can spend in LAST_ACK state. If you're using haproxy and not
53142 + all servers it connects to have this option enabled, consider
53143 + disabling this feature on the haproxy host.
53144 +
53145 + If the sysctl option is enabled, two sysctl options with names
53146 + "ip_blackhole" and "lastack_retries" will be created.
53147 + While "ip_blackhole" takes the standard zero/non-zero on/off
53148 + toggle, "lastack_retries" uses the same kinds of values as
53149 + "tcp_retries1" and "tcp_retries2". The default value of 4
53150 + prevents a socket from lasting more than 45 seconds in LAST_ACK
53151 + state.
53152 +
53153 +config GRKERNSEC_SOCKET
53154 + bool "Socket restrictions"
53155 + help
53156 + If you say Y here, you will be able to choose from several options.
53157 + If you assign a GID on your system and add it to the supplementary
53158 + groups of users you want to restrict socket access to, this patch
53159 + will perform up to three things, based on the option(s) you choose.
53160 +
53161 +config GRKERNSEC_SOCKET_ALL
53162 + bool "Deny any sockets to group"
53163 + depends on GRKERNSEC_SOCKET
53164 + help
53165 + If you say Y here, you will be able to choose a GID of whose users will
53166 + be unable to connect to other hosts from your machine or run server
53167 + applications from your machine. If the sysctl option is enabled, a
53168 + sysctl option with name "socket_all" is created.
53169 +
53170 +config GRKERNSEC_SOCKET_ALL_GID
53171 + int "GID to deny all sockets for"
53172 + depends on GRKERNSEC_SOCKET_ALL
53173 + default 1004
53174 + help
53175 + Here you can choose the GID to disable socket access for. Remember to
53176 + add the users you want socket access disabled for to the GID
53177 + specified here. If the sysctl option is enabled, a sysctl option
53178 + with name "socket_all_gid" is created.
53179 +
53180 +config GRKERNSEC_SOCKET_CLIENT
53181 + bool "Deny client sockets to group"
53182 + depends on GRKERNSEC_SOCKET
53183 + help
53184 + If you say Y here, you will be able to choose a GID of whose users will
53185 + be unable to connect to other hosts from your machine, but will be
53186 + able to run servers. If this option is enabled, all users in the group
53187 + you specify will have to use passive mode when initiating ftp transfers
53188 + from the shell on your machine. If the sysctl option is enabled, a
53189 + sysctl option with name "socket_client" is created.
53190 +
53191 +config GRKERNSEC_SOCKET_CLIENT_GID
53192 + int "GID to deny client sockets for"
53193 + depends on GRKERNSEC_SOCKET_CLIENT
53194 + default 1003
53195 + help
53196 + Here you can choose the GID to disable client socket access for.
53197 + Remember to add the users you want client socket access disabled for to
53198 + the GID specified here. If the sysctl option is enabled, a sysctl
53199 + option with name "socket_client_gid" is created.
53200 +
53201 +config GRKERNSEC_SOCKET_SERVER
53202 + bool "Deny server sockets to group"
53203 + depends on GRKERNSEC_SOCKET
53204 + help
53205 + If you say Y here, you will be able to choose a GID of whose users will
53206 + be unable to run server applications from your machine. If the sysctl
53207 + option is enabled, a sysctl option with name "socket_server" is created.
53208 +
53209 +config GRKERNSEC_SOCKET_SERVER_GID
53210 + int "GID to deny server sockets for"
53211 + depends on GRKERNSEC_SOCKET_SERVER
53212 + default 1002
53213 + help
53214 + Here you can choose the GID to disable server socket access for.
53215 + Remember to add the users you want server socket access disabled for to
53216 + the GID specified here. If the sysctl option is enabled, a sysctl
53217 + option with name "socket_server_gid" is created.
53218 +
53219 +endmenu
53220 +menu "Sysctl support"
53221 +depends on GRKERNSEC && SYSCTL
53222 +
53223 +config GRKERNSEC_SYSCTL
53224 + bool "Sysctl support"
53225 + help
53226 + If you say Y here, you will be able to change the options that
53227 + grsecurity runs with at bootup, without having to recompile your
53228 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
53229 + to enable (1) or disable (0) various features. All the sysctl entries
53230 + are mutable until the "grsec_lock" entry is set to a non-zero value.
53231 + All features enabled in the kernel configuration are disabled at boot
53232 + if you do not say Y to the "Turn on features by default" option.
53233 + All options should be set at startup, and the grsec_lock entry should
53234 + be set to a non-zero value after all the options are set.
53235 + *THIS IS EXTREMELY IMPORTANT*
53236 +
53237 +config GRKERNSEC_SYSCTL_DISTRO
53238 + bool "Extra sysctl support for distro makers (READ HELP)"
53239 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
53240 + help
53241 + If you say Y here, additional sysctl options will be created
53242 + for features that affect processes running as root. Therefore,
53243 + it is critical when using this option that the grsec_lock entry be
53244 + enabled after boot. Only distros with prebuilt kernel packages
53245 + with this option enabled that can ensure grsec_lock is enabled
53246 + after boot should use this option.
53247 + *Failure to set grsec_lock after boot makes all grsec features
53248 + this option covers useless*
53249 +
53250 + Currently this option creates the following sysctl entries:
53251 + "Disable Privileged I/O": "disable_priv_io"
53252 +
53253 +config GRKERNSEC_SYSCTL_ON
53254 + bool "Turn on features by default"
53255 + depends on GRKERNSEC_SYSCTL
53256 + help
53257 + If you say Y here, instead of having all features enabled in the
53258 + kernel configuration disabled at boot time, the features will be
53259 + enabled at boot time. It is recommended you say Y here unless
53260 + there is some reason you would want all sysctl-tunable features to
53261 + be disabled by default. As mentioned elsewhere, it is important
53262 + to enable the grsec_lock entry once you have finished modifying
53263 + the sysctl entries.
53264 +
53265 +endmenu
53266 +menu "Logging Options"
53267 +depends on GRKERNSEC
53268 +
53269 +config GRKERNSEC_FLOODTIME
53270 + int "Seconds in between log messages (minimum)"
53271 + default 10
53272 + help
53273 + This option allows you to enforce the number of seconds between
53274 + grsecurity log messages. The default should be suitable for most
53275 + people, however, if you choose to change it, choose a value small enough
53276 + to allow informative logs to be produced, but large enough to
53277 + prevent flooding.
53278 +
53279 +config GRKERNSEC_FLOODBURST
53280 + int "Number of messages in a burst (maximum)"
53281 + default 4
53282 + help
53283 + This option allows you to choose the maximum number of messages allowed
53284 + within the flood time interval you chose in a separate option. The
53285 + default should be suitable for most people, however if you find that
53286 + many of your logs are being interpreted as flooding, you may want to
53287 + raise this value.
53288 +
53289 +endmenu
53290 +
53291 +endmenu
53292 diff -urNp linux-2.6.32.41/grsecurity/Makefile linux-2.6.32.41/grsecurity/Makefile
53293 --- linux-2.6.32.41/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
53294 +++ linux-2.6.32.41/grsecurity/Makefile 2011-05-24 20:27:46.000000000 -0400
53295 @@ -0,0 +1,33 @@
53296 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
53297 +# during 2001-2009 it has been completely redesigned by Brad Spengler
53298 +# into an RBAC system
53299 +#
53300 +# All code in this directory and various hooks inserted throughout the kernel
53301 +# are copyright Brad Spengler - Open Source Security, Inc., and released
53302 +# under the GPL v2 or higher
53303 +
53304 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
53305 + grsec_mount.o grsec_sig.o grsec_sock.o grsec_sysctl.o \
53306 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
53307 +
53308 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
53309 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
53310 + gracl_learn.o grsec_log.o
53311 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
53312 +
53313 +ifdef CONFIG_NET
53314 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
53315 +endif
53316 +
53317 +ifndef CONFIG_GRKERNSEC
53318 +obj-y += grsec_disabled.o
53319 +endif
53320 +
53321 +ifdef CONFIG_GRKERNSEC_HIDESYM
53322 +extra-y := grsec_hidesym.o
53323 +$(obj)/grsec_hidesym.o:
53324 + @-chmod -f 500 /boot
53325 + @-chmod -f 500 /lib/modules
53326 + @-chmod -f 700 .
53327 + @echo ' grsec: protected kernel image paths'
53328 +endif
53329 diff -urNp linux-2.6.32.41/include/acpi/acpi_drivers.h linux-2.6.32.41/include/acpi/acpi_drivers.h
53330 --- linux-2.6.32.41/include/acpi/acpi_drivers.h 2011-03-27 14:31:47.000000000 -0400
53331 +++ linux-2.6.32.41/include/acpi/acpi_drivers.h 2011-04-17 15:56:46.000000000 -0400
53332 @@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acp
53333 Dock Station
53334 -------------------------------------------------------------------------- */
53335 struct acpi_dock_ops {
53336 - acpi_notify_handler handler;
53337 - acpi_notify_handler uevent;
53338 + const acpi_notify_handler handler;
53339 + const acpi_notify_handler uevent;
53340 };
53341
53342 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
53343 @@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle ha
53344 extern int register_dock_notifier(struct notifier_block *nb);
53345 extern void unregister_dock_notifier(struct notifier_block *nb);
53346 extern int register_hotplug_dock_device(acpi_handle handle,
53347 - struct acpi_dock_ops *ops,
53348 + const struct acpi_dock_ops *ops,
53349 void *context);
53350 extern void unregister_hotplug_dock_device(acpi_handle handle);
53351 #else
53352 @@ -144,7 +144,7 @@ static inline void unregister_dock_notif
53353 {
53354 }
53355 static inline int register_hotplug_dock_device(acpi_handle handle,
53356 - struct acpi_dock_ops *ops,
53357 + const struct acpi_dock_ops *ops,
53358 void *context)
53359 {
53360 return -ENODEV;
53361 diff -urNp linux-2.6.32.41/include/asm-generic/atomic-long.h linux-2.6.32.41/include/asm-generic/atomic-long.h
53362 --- linux-2.6.32.41/include/asm-generic/atomic-long.h 2011-03-27 14:31:47.000000000 -0400
53363 +++ linux-2.6.32.41/include/asm-generic/atomic-long.h 2011-05-16 21:46:57.000000000 -0400
53364 @@ -22,6 +22,12 @@
53365
53366 typedef atomic64_t atomic_long_t;
53367
53368 +#ifdef CONFIG_PAX_REFCOUNT
53369 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
53370 +#else
53371 +typedef atomic64_t atomic_long_unchecked_t;
53372 +#endif
53373 +
53374 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
53375
53376 static inline long atomic_long_read(atomic_long_t *l)
53377 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
53378 return (long)atomic64_read(v);
53379 }
53380
53381 +#ifdef CONFIG_PAX_REFCOUNT
53382 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
53383 +{
53384 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53385 +
53386 + return (long)atomic64_read_unchecked(v);
53387 +}
53388 +#endif
53389 +
53390 static inline void atomic_long_set(atomic_long_t *l, long i)
53391 {
53392 atomic64_t *v = (atomic64_t *)l;
53393 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
53394 atomic64_set(v, i);
53395 }
53396
53397 +#ifdef CONFIG_PAX_REFCOUNT
53398 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
53399 +{
53400 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53401 +
53402 + atomic64_set_unchecked(v, i);
53403 +}
53404 +#endif
53405 +
53406 static inline void atomic_long_inc(atomic_long_t *l)
53407 {
53408 atomic64_t *v = (atomic64_t *)l;
53409 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
53410 atomic64_inc(v);
53411 }
53412
53413 +#ifdef CONFIG_PAX_REFCOUNT
53414 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
53415 +{
53416 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53417 +
53418 + atomic64_inc_unchecked(v);
53419 +}
53420 +#endif
53421 +
53422 static inline void atomic_long_dec(atomic_long_t *l)
53423 {
53424 atomic64_t *v = (atomic64_t *)l;
53425 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
53426 atomic64_dec(v);
53427 }
53428
53429 +#ifdef CONFIG_PAX_REFCOUNT
53430 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
53431 +{
53432 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53433 +
53434 + atomic64_dec_unchecked(v);
53435 +}
53436 +#endif
53437 +
53438 static inline void atomic_long_add(long i, atomic_long_t *l)
53439 {
53440 atomic64_t *v = (atomic64_t *)l;
53441 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long
53442 atomic64_add(i, v);
53443 }
53444
53445 +#ifdef CONFIG_PAX_REFCOUNT
53446 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
53447 +{
53448 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53449 +
53450 + atomic64_add_unchecked(i, v);
53451 +}
53452 +#endif
53453 +
53454 static inline void atomic_long_sub(long i, atomic_long_t *l)
53455 {
53456 atomic64_t *v = (atomic64_t *)l;
53457 @@ -115,6 +166,15 @@ static inline long atomic_long_inc_retur
53458 return (long)atomic64_inc_return(v);
53459 }
53460
53461 +#ifdef CONFIG_PAX_REFCOUNT
53462 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
53463 +{
53464 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53465 +
53466 + return (long)atomic64_inc_return_unchecked(v);
53467 +}
53468 +#endif
53469 +
53470 static inline long atomic_long_dec_return(atomic_long_t *l)
53471 {
53472 atomic64_t *v = (atomic64_t *)l;
53473 @@ -140,6 +200,12 @@ static inline long atomic_long_add_unles
53474
53475 typedef atomic_t atomic_long_t;
53476
53477 +#ifdef CONFIG_PAX_REFCOUNT
53478 +typedef atomic_unchecked_t atomic_long_unchecked_t;
53479 +#else
53480 +typedef atomic_t atomic_long_unchecked_t;
53481 +#endif
53482 +
53483 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
53484 static inline long atomic_long_read(atomic_long_t *l)
53485 {
53486 @@ -148,6 +214,15 @@ static inline long atomic_long_read(atom
53487 return (long)atomic_read(v);
53488 }
53489
53490 +#ifdef CONFIG_PAX_REFCOUNT
53491 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
53492 +{
53493 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53494 +
53495 + return (long)atomic_read_unchecked(v);
53496 +}
53497 +#endif
53498 +
53499 static inline void atomic_long_set(atomic_long_t *l, long i)
53500 {
53501 atomic_t *v = (atomic_t *)l;
53502 @@ -155,6 +230,15 @@ static inline void atomic_long_set(atomi
53503 atomic_set(v, i);
53504 }
53505
53506 +#ifdef CONFIG_PAX_REFCOUNT
53507 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
53508 +{
53509 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53510 +
53511 + atomic_set_unchecked(v, i);
53512 +}
53513 +#endif
53514 +
53515 static inline void atomic_long_inc(atomic_long_t *l)
53516 {
53517 atomic_t *v = (atomic_t *)l;
53518 @@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomi
53519 atomic_inc(v);
53520 }
53521
53522 +#ifdef CONFIG_PAX_REFCOUNT
53523 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
53524 +{
53525 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53526 +
53527 + atomic_inc_unchecked(v);
53528 +}
53529 +#endif
53530 +
53531 static inline void atomic_long_dec(atomic_long_t *l)
53532 {
53533 atomic_t *v = (atomic_t *)l;
53534 @@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomi
53535 atomic_dec(v);
53536 }
53537
53538 +#ifdef CONFIG_PAX_REFCOUNT
53539 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
53540 +{
53541 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53542 +
53543 + atomic_dec_unchecked(v);
53544 +}
53545 +#endif
53546 +
53547 static inline void atomic_long_add(long i, atomic_long_t *l)
53548 {
53549 atomic_t *v = (atomic_t *)l;
53550 @@ -176,6 +278,15 @@ static inline void atomic_long_add(long
53551 atomic_add(i, v);
53552 }
53553
53554 +#ifdef CONFIG_PAX_REFCOUNT
53555 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
53556 +{
53557 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53558 +
53559 + atomic_add_unchecked(i, v);
53560 +}
53561 +#endif
53562 +
53563 static inline void atomic_long_sub(long i, atomic_long_t *l)
53564 {
53565 atomic_t *v = (atomic_t *)l;
53566 @@ -232,6 +343,15 @@ static inline long atomic_long_inc_retur
53567 return (long)atomic_inc_return(v);
53568 }
53569
53570 +#ifdef CONFIG_PAX_REFCOUNT
53571 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
53572 +{
53573 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53574 +
53575 + return (long)atomic_inc_return_unchecked(v);
53576 +}
53577 +#endif
53578 +
53579 static inline long atomic_long_dec_return(atomic_long_t *l)
53580 {
53581 atomic_t *v = (atomic_t *)l;
53582 @@ -255,4 +375,47 @@ static inline long atomic_long_add_unles
53583
53584 #endif /* BITS_PER_LONG == 64 */
53585
53586 +#ifdef CONFIG_PAX_REFCOUNT
53587 +static inline void pax_refcount_needs_these_functions(void)
53588 +{
53589 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
53590 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
53591 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
53592 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
53593 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
53594 + atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
53595 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
53596 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
53597 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
53598 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
53599 + atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
53600 +
53601 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
53602 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
53603 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
53604 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
53605 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
53606 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
53607 +}
53608 +#else
53609 +#define atomic_read_unchecked(v) atomic_read(v)
53610 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
53611 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
53612 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
53613 +#define atomic_inc_unchecked(v) atomic_inc(v)
53614 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
53615 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
53616 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
53617 +#define atomic_dec_unchecked(v) atomic_dec(v)
53618 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
53619 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
53620 +
53621 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
53622 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
53623 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
53624 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
53625 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
53626 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
53627 +#endif
53628 +
53629 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
53630 diff -urNp linux-2.6.32.41/include/asm-generic/cache.h linux-2.6.32.41/include/asm-generic/cache.h
53631 --- linux-2.6.32.41/include/asm-generic/cache.h 2011-03-27 14:31:47.000000000 -0400
53632 +++ linux-2.6.32.41/include/asm-generic/cache.h 2011-05-04 17:56:28.000000000 -0400
53633 @@ -6,7 +6,7 @@
53634 * cache lines need to provide their own cache.h.
53635 */
53636
53637 -#define L1_CACHE_SHIFT 5
53638 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
53639 +#define L1_CACHE_SHIFT 5U
53640 +#define L1_CACHE_BYTES (1U << L1_CACHE_SHIFT)
53641
53642 #endif /* __ASM_GENERIC_CACHE_H */
53643 diff -urNp linux-2.6.32.41/include/asm-generic/dma-mapping-common.h linux-2.6.32.41/include/asm-generic/dma-mapping-common.h
53644 --- linux-2.6.32.41/include/asm-generic/dma-mapping-common.h 2011-03-27 14:31:47.000000000 -0400
53645 +++ linux-2.6.32.41/include/asm-generic/dma-mapping-common.h 2011-04-17 15:56:46.000000000 -0400
53646 @@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_
53647 enum dma_data_direction dir,
53648 struct dma_attrs *attrs)
53649 {
53650 - struct dma_map_ops *ops = get_dma_ops(dev);
53651 + const struct dma_map_ops *ops = get_dma_ops(dev);
53652 dma_addr_t addr;
53653
53654 kmemcheck_mark_initialized(ptr, size);
53655 @@ -30,7 +30,7 @@ static inline void dma_unmap_single_attr
53656 enum dma_data_direction dir,
53657 struct dma_attrs *attrs)
53658 {
53659 - struct dma_map_ops *ops = get_dma_ops(dev);
53660 + const struct dma_map_ops *ops = get_dma_ops(dev);
53661
53662 BUG_ON(!valid_dma_direction(dir));
53663 if (ops->unmap_page)
53664 @@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struc
53665 int nents, enum dma_data_direction dir,
53666 struct dma_attrs *attrs)
53667 {
53668 - struct dma_map_ops *ops = get_dma_ops(dev);
53669 + const struct dma_map_ops *ops = get_dma_ops(dev);
53670 int i, ents;
53671 struct scatterlist *s;
53672
53673 @@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(st
53674 int nents, enum dma_data_direction dir,
53675 struct dma_attrs *attrs)
53676 {
53677 - struct dma_map_ops *ops = get_dma_ops(dev);
53678 + const struct dma_map_ops *ops = get_dma_ops(dev);
53679
53680 BUG_ON(!valid_dma_direction(dir));
53681 debug_dma_unmap_sg(dev, sg, nents, dir);
53682 @@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(st
53683 size_t offset, size_t size,
53684 enum dma_data_direction dir)
53685 {
53686 - struct dma_map_ops *ops = get_dma_ops(dev);
53687 + const struct dma_map_ops *ops = get_dma_ops(dev);
53688 dma_addr_t addr;
53689
53690 kmemcheck_mark_initialized(page_address(page) + offset, size);
53691 @@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(st
53692 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
53693 size_t size, enum dma_data_direction dir)
53694 {
53695 - struct dma_map_ops *ops = get_dma_ops(dev);
53696 + const struct dma_map_ops *ops = get_dma_ops(dev);
53697
53698 BUG_ON(!valid_dma_direction(dir));
53699 if (ops->unmap_page)
53700 @@ -97,7 +97,7 @@ static inline void dma_sync_single_for_c
53701 size_t size,
53702 enum dma_data_direction dir)
53703 {
53704 - struct dma_map_ops *ops = get_dma_ops(dev);
53705 + const struct dma_map_ops *ops = get_dma_ops(dev);
53706
53707 BUG_ON(!valid_dma_direction(dir));
53708 if (ops->sync_single_for_cpu)
53709 @@ -109,7 +109,7 @@ static inline void dma_sync_single_for_d
53710 dma_addr_t addr, size_t size,
53711 enum dma_data_direction dir)
53712 {
53713 - struct dma_map_ops *ops = get_dma_ops(dev);
53714 + const struct dma_map_ops *ops = get_dma_ops(dev);
53715
53716 BUG_ON(!valid_dma_direction(dir));
53717 if (ops->sync_single_for_device)
53718 @@ -123,7 +123,7 @@ static inline void dma_sync_single_range
53719 size_t size,
53720 enum dma_data_direction dir)
53721 {
53722 - struct dma_map_ops *ops = get_dma_ops(dev);
53723 + const struct dma_map_ops *ops = get_dma_ops(dev);
53724
53725 BUG_ON(!valid_dma_direction(dir));
53726 if (ops->sync_single_range_for_cpu) {
53727 @@ -140,7 +140,7 @@ static inline void dma_sync_single_range
53728 size_t size,
53729 enum dma_data_direction dir)
53730 {
53731 - struct dma_map_ops *ops = get_dma_ops(dev);
53732 + const struct dma_map_ops *ops = get_dma_ops(dev);
53733
53734 BUG_ON(!valid_dma_direction(dir));
53735 if (ops->sync_single_range_for_device) {
53736 @@ -155,7 +155,7 @@ static inline void
53737 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
53738 int nelems, enum dma_data_direction dir)
53739 {
53740 - struct dma_map_ops *ops = get_dma_ops(dev);
53741 + const struct dma_map_ops *ops = get_dma_ops(dev);
53742
53743 BUG_ON(!valid_dma_direction(dir));
53744 if (ops->sync_sg_for_cpu)
53745 @@ -167,7 +167,7 @@ static inline void
53746 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
53747 int nelems, enum dma_data_direction dir)
53748 {
53749 - struct dma_map_ops *ops = get_dma_ops(dev);
53750 + const struct dma_map_ops *ops = get_dma_ops(dev);
53751
53752 BUG_ON(!valid_dma_direction(dir));
53753 if (ops->sync_sg_for_device)
53754 diff -urNp linux-2.6.32.41/include/asm-generic/futex.h linux-2.6.32.41/include/asm-generic/futex.h
53755 --- linux-2.6.32.41/include/asm-generic/futex.h 2011-03-27 14:31:47.000000000 -0400
53756 +++ linux-2.6.32.41/include/asm-generic/futex.h 2011-04-17 15:56:46.000000000 -0400
53757 @@ -6,7 +6,7 @@
53758 #include <asm/errno.h>
53759
53760 static inline int
53761 -futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
53762 +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
53763 {
53764 int op = (encoded_op >> 28) & 7;
53765 int cmp = (encoded_op >> 24) & 15;
53766 @@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op,
53767 }
53768
53769 static inline int
53770 -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
53771 +futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
53772 {
53773 return -ENOSYS;
53774 }
53775 diff -urNp linux-2.6.32.41/include/asm-generic/int-l64.h linux-2.6.32.41/include/asm-generic/int-l64.h
53776 --- linux-2.6.32.41/include/asm-generic/int-l64.h 2011-03-27 14:31:47.000000000 -0400
53777 +++ linux-2.6.32.41/include/asm-generic/int-l64.h 2011-04-17 15:56:46.000000000 -0400
53778 @@ -46,6 +46,8 @@ typedef unsigned int u32;
53779 typedef signed long s64;
53780 typedef unsigned long u64;
53781
53782 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
53783 +
53784 #define S8_C(x) x
53785 #define U8_C(x) x ## U
53786 #define S16_C(x) x
53787 diff -urNp linux-2.6.32.41/include/asm-generic/int-ll64.h linux-2.6.32.41/include/asm-generic/int-ll64.h
53788 --- linux-2.6.32.41/include/asm-generic/int-ll64.h 2011-03-27 14:31:47.000000000 -0400
53789 +++ linux-2.6.32.41/include/asm-generic/int-ll64.h 2011-04-17 15:56:46.000000000 -0400
53790 @@ -51,6 +51,8 @@ typedef unsigned int u32;
53791 typedef signed long long s64;
53792 typedef unsigned long long u64;
53793
53794 +typedef unsigned long long intoverflow_t;
53795 +
53796 #define S8_C(x) x
53797 #define U8_C(x) x ## U
53798 #define S16_C(x) x
53799 diff -urNp linux-2.6.32.41/include/asm-generic/kmap_types.h linux-2.6.32.41/include/asm-generic/kmap_types.h
53800 --- linux-2.6.32.41/include/asm-generic/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
53801 +++ linux-2.6.32.41/include/asm-generic/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
53802 @@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
53803 KMAP_D(16) KM_IRQ_PTE,
53804 KMAP_D(17) KM_NMI,
53805 KMAP_D(18) KM_NMI_PTE,
53806 -KMAP_D(19) KM_TYPE_NR
53807 +KMAP_D(19) KM_CLEARPAGE,
53808 +KMAP_D(20) KM_TYPE_NR
53809 };
53810
53811 #undef KMAP_D
53812 diff -urNp linux-2.6.32.41/include/asm-generic/pgtable.h linux-2.6.32.41/include/asm-generic/pgtable.h
53813 --- linux-2.6.32.41/include/asm-generic/pgtable.h 2011-03-27 14:31:47.000000000 -0400
53814 +++ linux-2.6.32.41/include/asm-generic/pgtable.h 2011-04-17 15:56:46.000000000 -0400
53815 @@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_ar
53816 unsigned long size);
53817 #endif
53818
53819 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
53820 +static inline unsigned long pax_open_kernel(void) { return 0; }
53821 +#endif
53822 +
53823 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
53824 +static inline unsigned long pax_close_kernel(void) { return 0; }
53825 +#endif
53826 +
53827 #endif /* !__ASSEMBLY__ */
53828
53829 #endif /* _ASM_GENERIC_PGTABLE_H */
53830 diff -urNp linux-2.6.32.41/include/asm-generic/pgtable-nopmd.h linux-2.6.32.41/include/asm-generic/pgtable-nopmd.h
53831 --- linux-2.6.32.41/include/asm-generic/pgtable-nopmd.h 2011-03-27 14:31:47.000000000 -0400
53832 +++ linux-2.6.32.41/include/asm-generic/pgtable-nopmd.h 2011-04-17 15:56:46.000000000 -0400
53833 @@ -1,14 +1,19 @@
53834 #ifndef _PGTABLE_NOPMD_H
53835 #define _PGTABLE_NOPMD_H
53836
53837 -#ifndef __ASSEMBLY__
53838 -
53839 #include <asm-generic/pgtable-nopud.h>
53840
53841 -struct mm_struct;
53842 -
53843 #define __PAGETABLE_PMD_FOLDED
53844
53845 +#define PMD_SHIFT PUD_SHIFT
53846 +#define PTRS_PER_PMD 1
53847 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
53848 +#define PMD_MASK (~(PMD_SIZE-1))
53849 +
53850 +#ifndef __ASSEMBLY__
53851 +
53852 +struct mm_struct;
53853 +
53854 /*
53855 * Having the pmd type consist of a pud gets the size right, and allows
53856 * us to conceptually access the pud entry that this pmd is folded into
53857 @@ -16,11 +21,6 @@ struct mm_struct;
53858 */
53859 typedef struct { pud_t pud; } pmd_t;
53860
53861 -#define PMD_SHIFT PUD_SHIFT
53862 -#define PTRS_PER_PMD 1
53863 -#define PMD_SIZE (1UL << PMD_SHIFT)
53864 -#define PMD_MASK (~(PMD_SIZE-1))
53865 -
53866 /*
53867 * The "pud_xxx()" functions here are trivial for a folded two-level
53868 * setup: the pmd is never bad, and a pmd always exists (as it's folded
53869 diff -urNp linux-2.6.32.41/include/asm-generic/pgtable-nopud.h linux-2.6.32.41/include/asm-generic/pgtable-nopud.h
53870 --- linux-2.6.32.41/include/asm-generic/pgtable-nopud.h 2011-03-27 14:31:47.000000000 -0400
53871 +++ linux-2.6.32.41/include/asm-generic/pgtable-nopud.h 2011-04-17 15:56:46.000000000 -0400
53872 @@ -1,10 +1,15 @@
53873 #ifndef _PGTABLE_NOPUD_H
53874 #define _PGTABLE_NOPUD_H
53875
53876 -#ifndef __ASSEMBLY__
53877 -
53878 #define __PAGETABLE_PUD_FOLDED
53879
53880 +#define PUD_SHIFT PGDIR_SHIFT
53881 +#define PTRS_PER_PUD 1
53882 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
53883 +#define PUD_MASK (~(PUD_SIZE-1))
53884 +
53885 +#ifndef __ASSEMBLY__
53886 +
53887 /*
53888 * Having the pud type consist of a pgd gets the size right, and allows
53889 * us to conceptually access the pgd entry that this pud is folded into
53890 @@ -12,11 +17,6 @@
53891 */
53892 typedef struct { pgd_t pgd; } pud_t;
53893
53894 -#define PUD_SHIFT PGDIR_SHIFT
53895 -#define PTRS_PER_PUD 1
53896 -#define PUD_SIZE (1UL << PUD_SHIFT)
53897 -#define PUD_MASK (~(PUD_SIZE-1))
53898 -
53899 /*
53900 * The "pgd_xxx()" functions here are trivial for a folded two-level
53901 * setup: the pud is never bad, and a pud always exists (as it's folded
53902 diff -urNp linux-2.6.32.41/include/asm-generic/vmlinux.lds.h linux-2.6.32.41/include/asm-generic/vmlinux.lds.h
53903 --- linux-2.6.32.41/include/asm-generic/vmlinux.lds.h 2011-03-27 14:31:47.000000000 -0400
53904 +++ linux-2.6.32.41/include/asm-generic/vmlinux.lds.h 2011-04-17 15:56:46.000000000 -0400
53905 @@ -199,6 +199,7 @@
53906 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
53907 VMLINUX_SYMBOL(__start_rodata) = .; \
53908 *(.rodata) *(.rodata.*) \
53909 + *(.data.read_only) \
53910 *(__vermagic) /* Kernel version magic */ \
53911 *(__markers_strings) /* Markers: strings */ \
53912 *(__tracepoints_strings)/* Tracepoints: strings */ \
53913 @@ -656,22 +657,24 @@
53914 * section in the linker script will go there too. @phdr should have
53915 * a leading colon.
53916 *
53917 - * Note that this macros defines __per_cpu_load as an absolute symbol.
53918 + * Note that this macros defines per_cpu_load as an absolute symbol.
53919 * If there is no need to put the percpu section at a predetermined
53920 * address, use PERCPU().
53921 */
53922 #define PERCPU_VADDR(vaddr, phdr) \
53923 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
53924 - .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
53925 + per_cpu_load = .; \
53926 + .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
53927 - LOAD_OFFSET) { \
53928 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
53929 VMLINUX_SYMBOL(__per_cpu_start) = .; \
53930 *(.data.percpu.first) \
53931 - *(.data.percpu.page_aligned) \
53932 *(.data.percpu) \
53933 + . = ALIGN(PAGE_SIZE); \
53934 + *(.data.percpu.page_aligned) \
53935 *(.data.percpu.shared_aligned) \
53936 VMLINUX_SYMBOL(__per_cpu_end) = .; \
53937 } phdr \
53938 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
53939 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
53940
53941 /**
53942 * PERCPU - define output section for percpu area, simple version
53943 diff -urNp linux-2.6.32.41/include/drm/drmP.h linux-2.6.32.41/include/drm/drmP.h
53944 --- linux-2.6.32.41/include/drm/drmP.h 2011-03-27 14:31:47.000000000 -0400
53945 +++ linux-2.6.32.41/include/drm/drmP.h 2011-04-17 15:56:46.000000000 -0400
53946 @@ -71,6 +71,7 @@
53947 #include <linux/workqueue.h>
53948 #include <linux/poll.h>
53949 #include <asm/pgalloc.h>
53950 +#include <asm/local.h>
53951 #include "drm.h"
53952
53953 #include <linux/idr.h>
53954 @@ -814,7 +815,7 @@ struct drm_driver {
53955 void (*vgaarb_irq)(struct drm_device *dev, bool state);
53956
53957 /* Driver private ops for this object */
53958 - struct vm_operations_struct *gem_vm_ops;
53959 + const struct vm_operations_struct *gem_vm_ops;
53960
53961 int major;
53962 int minor;
53963 @@ -917,7 +918,7 @@ struct drm_device {
53964
53965 /** \name Usage Counters */
53966 /*@{ */
53967 - int open_count; /**< Outstanding files open */
53968 + local_t open_count; /**< Outstanding files open */
53969 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
53970 atomic_t vma_count; /**< Outstanding vma areas open */
53971 int buf_use; /**< Buffers in use -- cannot alloc */
53972 @@ -928,7 +929,7 @@ struct drm_device {
53973 /*@{ */
53974 unsigned long counters;
53975 enum drm_stat_type types[15];
53976 - atomic_t counts[15];
53977 + atomic_unchecked_t counts[15];
53978 /*@} */
53979
53980 struct list_head filelist;
53981 @@ -1016,7 +1017,7 @@ struct drm_device {
53982 struct pci_controller *hose;
53983 #endif
53984 struct drm_sg_mem *sg; /**< Scatter gather memory */
53985 - unsigned int num_crtcs; /**< Number of CRTCs on this device */
53986 + unsigned int num_crtcs; /**< Number of CRTCs on this device */
53987 void *dev_private; /**< device private data */
53988 void *mm_private;
53989 struct address_space *dev_mapping;
53990 @@ -1042,11 +1043,11 @@ struct drm_device {
53991 spinlock_t object_name_lock;
53992 struct idr object_name_idr;
53993 atomic_t object_count;
53994 - atomic_t object_memory;
53995 + atomic_unchecked_t object_memory;
53996 atomic_t pin_count;
53997 - atomic_t pin_memory;
53998 + atomic_unchecked_t pin_memory;
53999 atomic_t gtt_count;
54000 - atomic_t gtt_memory;
54001 + atomic_unchecked_t gtt_memory;
54002 uint32_t gtt_total;
54003 uint32_t invalidate_domains; /* domains pending invalidation */
54004 uint32_t flush_domains; /* domains pending flush */
54005 diff -urNp linux-2.6.32.41/include/linux/a.out.h linux-2.6.32.41/include/linux/a.out.h
54006 --- linux-2.6.32.41/include/linux/a.out.h 2011-03-27 14:31:47.000000000 -0400
54007 +++ linux-2.6.32.41/include/linux/a.out.h 2011-04-17 15:56:46.000000000 -0400
54008 @@ -39,6 +39,14 @@ enum machine_type {
54009 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
54010 };
54011
54012 +/* Constants for the N_FLAGS field */
54013 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
54014 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
54015 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
54016 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
54017 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
54018 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
54019 +
54020 #if !defined (N_MAGIC)
54021 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
54022 #endif
54023 diff -urNp linux-2.6.32.41/include/linux/atmdev.h linux-2.6.32.41/include/linux/atmdev.h
54024 --- linux-2.6.32.41/include/linux/atmdev.h 2011-03-27 14:31:47.000000000 -0400
54025 +++ linux-2.6.32.41/include/linux/atmdev.h 2011-04-17 15:56:46.000000000 -0400
54026 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
54027 #endif
54028
54029 struct k_atm_aal_stats {
54030 -#define __HANDLE_ITEM(i) atomic_t i
54031 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
54032 __AAL_STAT_ITEMS
54033 #undef __HANDLE_ITEM
54034 };
54035 diff -urNp linux-2.6.32.41/include/linux/backlight.h linux-2.6.32.41/include/linux/backlight.h
54036 --- linux-2.6.32.41/include/linux/backlight.h 2011-03-27 14:31:47.000000000 -0400
54037 +++ linux-2.6.32.41/include/linux/backlight.h 2011-04-17 15:56:46.000000000 -0400
54038 @@ -36,18 +36,18 @@ struct backlight_device;
54039 struct fb_info;
54040
54041 struct backlight_ops {
54042 - unsigned int options;
54043 + const unsigned int options;
54044
54045 #define BL_CORE_SUSPENDRESUME (1 << 0)
54046
54047 /* Notify the backlight driver some property has changed */
54048 - int (*update_status)(struct backlight_device *);
54049 + int (* const update_status)(struct backlight_device *);
54050 /* Return the current backlight brightness (accounting for power,
54051 fb_blank etc.) */
54052 - int (*get_brightness)(struct backlight_device *);
54053 + int (* const get_brightness)(struct backlight_device *);
54054 /* Check if given framebuffer device is the one bound to this backlight;
54055 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
54056 - int (*check_fb)(struct fb_info *);
54057 + int (* const check_fb)(struct fb_info *);
54058 };
54059
54060 /* This structure defines all the properties of a backlight */
54061 @@ -86,7 +86,7 @@ struct backlight_device {
54062 registered this device has been unloaded, and if class_get_devdata()
54063 points to something in the body of that driver, it is also invalid. */
54064 struct mutex ops_lock;
54065 - struct backlight_ops *ops;
54066 + const struct backlight_ops *ops;
54067
54068 /* The framebuffer notifier block */
54069 struct notifier_block fb_notif;
54070 @@ -103,7 +103,7 @@ static inline void backlight_update_stat
54071 }
54072
54073 extern struct backlight_device *backlight_device_register(const char *name,
54074 - struct device *dev, void *devdata, struct backlight_ops *ops);
54075 + struct device *dev, void *devdata, const struct backlight_ops *ops);
54076 extern void backlight_device_unregister(struct backlight_device *bd);
54077 extern void backlight_force_update(struct backlight_device *bd,
54078 enum backlight_update_reason reason);
54079 diff -urNp linux-2.6.32.41/include/linux/binfmts.h linux-2.6.32.41/include/linux/binfmts.h
54080 --- linux-2.6.32.41/include/linux/binfmts.h 2011-04-17 17:00:52.000000000 -0400
54081 +++ linux-2.6.32.41/include/linux/binfmts.h 2011-04-17 15:56:46.000000000 -0400
54082 @@ -83,6 +83,7 @@ struct linux_binfmt {
54083 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
54084 int (*load_shlib)(struct file *);
54085 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
54086 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
54087 unsigned long min_coredump; /* minimal dump size */
54088 int hasvdso;
54089 };
54090 diff -urNp linux-2.6.32.41/include/linux/blkdev.h linux-2.6.32.41/include/linux/blkdev.h
54091 --- linux-2.6.32.41/include/linux/blkdev.h 2011-03-27 14:31:47.000000000 -0400
54092 +++ linux-2.6.32.41/include/linux/blkdev.h 2011-04-17 15:56:46.000000000 -0400
54093 @@ -1265,19 +1265,19 @@ static inline int blk_integrity_rq(struc
54094 #endif /* CONFIG_BLK_DEV_INTEGRITY */
54095
54096 struct block_device_operations {
54097 - int (*open) (struct block_device *, fmode_t);
54098 - int (*release) (struct gendisk *, fmode_t);
54099 - int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54100 - int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54101 - int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54102 - int (*direct_access) (struct block_device *, sector_t,
54103 + int (* const open) (struct block_device *, fmode_t);
54104 + int (* const release) (struct gendisk *, fmode_t);
54105 + int (* const locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54106 + int (* const ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54107 + int (* const compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54108 + int (* const direct_access) (struct block_device *, sector_t,
54109 void **, unsigned long *);
54110 - int (*media_changed) (struct gendisk *);
54111 - unsigned long long (*set_capacity) (struct gendisk *,
54112 + int (* const media_changed) (struct gendisk *);
54113 + unsigned long long (* const set_capacity) (struct gendisk *,
54114 unsigned long long);
54115 - int (*revalidate_disk) (struct gendisk *);
54116 - int (*getgeo)(struct block_device *, struct hd_geometry *);
54117 - struct module *owner;
54118 + int (* const revalidate_disk) (struct gendisk *);
54119 + int (*const getgeo)(struct block_device *, struct hd_geometry *);
54120 + struct module * const owner;
54121 };
54122
54123 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
54124 diff -urNp linux-2.6.32.41/include/linux/blktrace_api.h linux-2.6.32.41/include/linux/blktrace_api.h
54125 --- linux-2.6.32.41/include/linux/blktrace_api.h 2011-03-27 14:31:47.000000000 -0400
54126 +++ linux-2.6.32.41/include/linux/blktrace_api.h 2011-05-04 17:56:28.000000000 -0400
54127 @@ -160,7 +160,7 @@ struct blk_trace {
54128 struct dentry *dir;
54129 struct dentry *dropped_file;
54130 struct dentry *msg_file;
54131 - atomic_t dropped;
54132 + atomic_unchecked_t dropped;
54133 };
54134
54135 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
54136 diff -urNp linux-2.6.32.41/include/linux/byteorder/little_endian.h linux-2.6.32.41/include/linux/byteorder/little_endian.h
54137 --- linux-2.6.32.41/include/linux/byteorder/little_endian.h 2011-03-27 14:31:47.000000000 -0400
54138 +++ linux-2.6.32.41/include/linux/byteorder/little_endian.h 2011-04-17 15:56:46.000000000 -0400
54139 @@ -42,51 +42,51 @@
54140
54141 static inline __le64 __cpu_to_le64p(const __u64 *p)
54142 {
54143 - return (__force __le64)*p;
54144 + return (__force const __le64)*p;
54145 }
54146 static inline __u64 __le64_to_cpup(const __le64 *p)
54147 {
54148 - return (__force __u64)*p;
54149 + return (__force const __u64)*p;
54150 }
54151 static inline __le32 __cpu_to_le32p(const __u32 *p)
54152 {
54153 - return (__force __le32)*p;
54154 + return (__force const __le32)*p;
54155 }
54156 static inline __u32 __le32_to_cpup(const __le32 *p)
54157 {
54158 - return (__force __u32)*p;
54159 + return (__force const __u32)*p;
54160 }
54161 static inline __le16 __cpu_to_le16p(const __u16 *p)
54162 {
54163 - return (__force __le16)*p;
54164 + return (__force const __le16)*p;
54165 }
54166 static inline __u16 __le16_to_cpup(const __le16 *p)
54167 {
54168 - return (__force __u16)*p;
54169 + return (__force const __u16)*p;
54170 }
54171 static inline __be64 __cpu_to_be64p(const __u64 *p)
54172 {
54173 - return (__force __be64)__swab64p(p);
54174 + return (__force const __be64)__swab64p(p);
54175 }
54176 static inline __u64 __be64_to_cpup(const __be64 *p)
54177 {
54178 - return __swab64p((__u64 *)p);
54179 + return __swab64p((const __u64 *)p);
54180 }
54181 static inline __be32 __cpu_to_be32p(const __u32 *p)
54182 {
54183 - return (__force __be32)__swab32p(p);
54184 + return (__force const __be32)__swab32p(p);
54185 }
54186 static inline __u32 __be32_to_cpup(const __be32 *p)
54187 {
54188 - return __swab32p((__u32 *)p);
54189 + return __swab32p((const __u32 *)p);
54190 }
54191 static inline __be16 __cpu_to_be16p(const __u16 *p)
54192 {
54193 - return (__force __be16)__swab16p(p);
54194 + return (__force const __be16)__swab16p(p);
54195 }
54196 static inline __u16 __be16_to_cpup(const __be16 *p)
54197 {
54198 - return __swab16p((__u16 *)p);
54199 + return __swab16p((const __u16 *)p);
54200 }
54201 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
54202 #define __le64_to_cpus(x) do { (void)(x); } while (0)
54203 diff -urNp linux-2.6.32.41/include/linux/cache.h linux-2.6.32.41/include/linux/cache.h
54204 --- linux-2.6.32.41/include/linux/cache.h 2011-03-27 14:31:47.000000000 -0400
54205 +++ linux-2.6.32.41/include/linux/cache.h 2011-04-17 15:56:46.000000000 -0400
54206 @@ -16,6 +16,10 @@
54207 #define __read_mostly
54208 #endif
54209
54210 +#ifndef __read_only
54211 +#define __read_only __read_mostly
54212 +#endif
54213 +
54214 #ifndef ____cacheline_aligned
54215 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
54216 #endif
54217 diff -urNp linux-2.6.32.41/include/linux/capability.h linux-2.6.32.41/include/linux/capability.h
54218 --- linux-2.6.32.41/include/linux/capability.h 2011-03-27 14:31:47.000000000 -0400
54219 +++ linux-2.6.32.41/include/linux/capability.h 2011-04-17 15:56:46.000000000 -0400
54220 @@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff
54221 (security_real_capable_noaudit((t), (cap)) == 0)
54222
54223 extern int capable(int cap);
54224 +int capable_nolog(int cap);
54225
54226 /* audit system wants to get cap info from files as well */
54227 struct dentry;
54228 diff -urNp linux-2.6.32.41/include/linux/compiler-gcc4.h linux-2.6.32.41/include/linux/compiler-gcc4.h
54229 --- linux-2.6.32.41/include/linux/compiler-gcc4.h 2011-03-27 14:31:47.000000000 -0400
54230 +++ linux-2.6.32.41/include/linux/compiler-gcc4.h 2011-04-17 15:56:46.000000000 -0400
54231 @@ -36,4 +36,8 @@
54232 the kernel context */
54233 #define __cold __attribute__((__cold__))
54234
54235 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
54236 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
54237 +#define __bos0(ptr) __bos((ptr), 0)
54238 +#define __bos1(ptr) __bos((ptr), 1)
54239 #endif
54240 diff -urNp linux-2.6.32.41/include/linux/compiler.h linux-2.6.32.41/include/linux/compiler.h
54241 --- linux-2.6.32.41/include/linux/compiler.h 2011-03-27 14:31:47.000000000 -0400
54242 +++ linux-2.6.32.41/include/linux/compiler.h 2011-04-17 15:56:46.000000000 -0400
54243 @@ -256,6 +256,22 @@ void ftrace_likely_update(struct ftrace_
54244 #define __cold
54245 #endif
54246
54247 +#ifndef __alloc_size
54248 +#define __alloc_size
54249 +#endif
54250 +
54251 +#ifndef __bos
54252 +#define __bos
54253 +#endif
54254 +
54255 +#ifndef __bos0
54256 +#define __bos0
54257 +#endif
54258 +
54259 +#ifndef __bos1
54260 +#define __bos1
54261 +#endif
54262 +
54263 /* Simple shorthand for a section definition */
54264 #ifndef __section
54265 # define __section(S) __attribute__ ((__section__(#S)))
54266 @@ -278,6 +294,7 @@ void ftrace_likely_update(struct ftrace_
54267 * use is to mediate communication between process-level code and irq/NMI
54268 * handlers, all running on the same CPU.
54269 */
54270 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
54271 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
54272 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
54273
54274 #endif /* __LINUX_COMPILER_H */
54275 diff -urNp linux-2.6.32.41/include/linux/dcache.h linux-2.6.32.41/include/linux/dcache.h
54276 --- linux-2.6.32.41/include/linux/dcache.h 2011-03-27 14:31:47.000000000 -0400
54277 +++ linux-2.6.32.41/include/linux/dcache.h 2011-04-23 13:34:46.000000000 -0400
54278 @@ -119,6 +119,8 @@ struct dentry {
54279 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
54280 };
54281
54282 +#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
54283 +
54284 /*
54285 * dentry->d_lock spinlock nesting subclasses:
54286 *
54287 diff -urNp linux-2.6.32.41/include/linux/decompress/mm.h linux-2.6.32.41/include/linux/decompress/mm.h
54288 --- linux-2.6.32.41/include/linux/decompress/mm.h 2011-03-27 14:31:47.000000000 -0400
54289 +++ linux-2.6.32.41/include/linux/decompress/mm.h 2011-04-17 15:56:46.000000000 -0400
54290 @@ -78,7 +78,7 @@ static void free(void *where)
54291 * warnings when not needed (indeed large_malloc / large_free are not
54292 * needed by inflate */
54293
54294 -#define malloc(a) kmalloc(a, GFP_KERNEL)
54295 +#define malloc(a) kmalloc((a), GFP_KERNEL)
54296 #define free(a) kfree(a)
54297
54298 #define large_malloc(a) vmalloc(a)
54299 diff -urNp linux-2.6.32.41/include/linux/dma-mapping.h linux-2.6.32.41/include/linux/dma-mapping.h
54300 --- linux-2.6.32.41/include/linux/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
54301 +++ linux-2.6.32.41/include/linux/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
54302 @@ -16,50 +16,50 @@ enum dma_data_direction {
54303 };
54304
54305 struct dma_map_ops {
54306 - void* (*alloc_coherent)(struct device *dev, size_t size,
54307 + void* (* const alloc_coherent)(struct device *dev, size_t size,
54308 dma_addr_t *dma_handle, gfp_t gfp);
54309 - void (*free_coherent)(struct device *dev, size_t size,
54310 + void (* const free_coherent)(struct device *dev, size_t size,
54311 void *vaddr, dma_addr_t dma_handle);
54312 - dma_addr_t (*map_page)(struct device *dev, struct page *page,
54313 + dma_addr_t (* const map_page)(struct device *dev, struct page *page,
54314 unsigned long offset, size_t size,
54315 enum dma_data_direction dir,
54316 struct dma_attrs *attrs);
54317 - void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
54318 + void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
54319 size_t size, enum dma_data_direction dir,
54320 struct dma_attrs *attrs);
54321 - int (*map_sg)(struct device *dev, struct scatterlist *sg,
54322 + int (* const map_sg)(struct device *dev, struct scatterlist *sg,
54323 int nents, enum dma_data_direction dir,
54324 struct dma_attrs *attrs);
54325 - void (*unmap_sg)(struct device *dev,
54326 + void (* const unmap_sg)(struct device *dev,
54327 struct scatterlist *sg, int nents,
54328 enum dma_data_direction dir,
54329 struct dma_attrs *attrs);
54330 - void (*sync_single_for_cpu)(struct device *dev,
54331 + void (* const sync_single_for_cpu)(struct device *dev,
54332 dma_addr_t dma_handle, size_t size,
54333 enum dma_data_direction dir);
54334 - void (*sync_single_for_device)(struct device *dev,
54335 + void (* const sync_single_for_device)(struct device *dev,
54336 dma_addr_t dma_handle, size_t size,
54337 enum dma_data_direction dir);
54338 - void (*sync_single_range_for_cpu)(struct device *dev,
54339 + void (* const sync_single_range_for_cpu)(struct device *dev,
54340 dma_addr_t dma_handle,
54341 unsigned long offset,
54342 size_t size,
54343 enum dma_data_direction dir);
54344 - void (*sync_single_range_for_device)(struct device *dev,
54345 + void (* const sync_single_range_for_device)(struct device *dev,
54346 dma_addr_t dma_handle,
54347 unsigned long offset,
54348 size_t size,
54349 enum dma_data_direction dir);
54350 - void (*sync_sg_for_cpu)(struct device *dev,
54351 + void (* const sync_sg_for_cpu)(struct device *dev,
54352 struct scatterlist *sg, int nents,
54353 enum dma_data_direction dir);
54354 - void (*sync_sg_for_device)(struct device *dev,
54355 + void (* const sync_sg_for_device)(struct device *dev,
54356 struct scatterlist *sg, int nents,
54357 enum dma_data_direction dir);
54358 - int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
54359 - int (*dma_supported)(struct device *dev, u64 mask);
54360 + int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
54361 + int (* const dma_supported)(struct device *dev, u64 mask);
54362 int (*set_dma_mask)(struct device *dev, u64 mask);
54363 - int is_phys;
54364 + const int is_phys;
54365 };
54366
54367 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
54368 diff -urNp linux-2.6.32.41/include/linux/dst.h linux-2.6.32.41/include/linux/dst.h
54369 --- linux-2.6.32.41/include/linux/dst.h 2011-03-27 14:31:47.000000000 -0400
54370 +++ linux-2.6.32.41/include/linux/dst.h 2011-04-17 15:56:46.000000000 -0400
54371 @@ -380,7 +380,7 @@ struct dst_node
54372 struct thread_pool *pool;
54373
54374 /* Transaction IDs live here */
54375 - atomic_long_t gen;
54376 + atomic_long_unchecked_t gen;
54377
54378 /*
54379 * How frequently and how many times transaction
54380 diff -urNp linux-2.6.32.41/include/linux/elf.h linux-2.6.32.41/include/linux/elf.h
54381 --- linux-2.6.32.41/include/linux/elf.h 2011-03-27 14:31:47.000000000 -0400
54382 +++ linux-2.6.32.41/include/linux/elf.h 2011-04-17 15:56:46.000000000 -0400
54383 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
54384 #define PT_GNU_EH_FRAME 0x6474e550
54385
54386 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
54387 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
54388 +
54389 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
54390 +
54391 +/* Constants for the e_flags field */
54392 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
54393 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
54394 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
54395 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
54396 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
54397 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
54398
54399 /* These constants define the different elf file types */
54400 #define ET_NONE 0
54401 @@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
54402 #define DT_DEBUG 21
54403 #define DT_TEXTREL 22
54404 #define DT_JMPREL 23
54405 +#define DT_FLAGS 30
54406 + #define DF_TEXTREL 0x00000004
54407 #define DT_ENCODING 32
54408 #define OLD_DT_LOOS 0x60000000
54409 #define DT_LOOS 0x6000000d
54410 @@ -230,6 +243,19 @@ typedef struct elf64_hdr {
54411 #define PF_W 0x2
54412 #define PF_X 0x1
54413
54414 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
54415 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
54416 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
54417 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
54418 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
54419 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
54420 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
54421 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
54422 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
54423 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
54424 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
54425 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
54426 +
54427 typedef struct elf32_phdr{
54428 Elf32_Word p_type;
54429 Elf32_Off p_offset;
54430 @@ -322,6 +348,8 @@ typedef struct elf64_shdr {
54431 #define EI_OSABI 7
54432 #define EI_PAD 8
54433
54434 +#define EI_PAX 14
54435 +
54436 #define ELFMAG0 0x7f /* EI_MAG */
54437 #define ELFMAG1 'E'
54438 #define ELFMAG2 'L'
54439 @@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
54440 #define elf_phdr elf32_phdr
54441 #define elf_note elf32_note
54442 #define elf_addr_t Elf32_Off
54443 +#define elf_dyn Elf32_Dyn
54444
54445 #else
54446
54447 @@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
54448 #define elf_phdr elf64_phdr
54449 #define elf_note elf64_note
54450 #define elf_addr_t Elf64_Off
54451 +#define elf_dyn Elf64_Dyn
54452
54453 #endif
54454
54455 diff -urNp linux-2.6.32.41/include/linux/fscache-cache.h linux-2.6.32.41/include/linux/fscache-cache.h
54456 --- linux-2.6.32.41/include/linux/fscache-cache.h 2011-03-27 14:31:47.000000000 -0400
54457 +++ linux-2.6.32.41/include/linux/fscache-cache.h 2011-05-04 17:56:28.000000000 -0400
54458 @@ -116,7 +116,7 @@ struct fscache_operation {
54459 #endif
54460 };
54461
54462 -extern atomic_t fscache_op_debug_id;
54463 +extern atomic_unchecked_t fscache_op_debug_id;
54464 extern const struct slow_work_ops fscache_op_slow_work_ops;
54465
54466 extern void fscache_enqueue_operation(struct fscache_operation *);
54467 @@ -134,7 +134,7 @@ static inline void fscache_operation_ini
54468 fscache_operation_release_t release)
54469 {
54470 atomic_set(&op->usage, 1);
54471 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
54472 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
54473 op->release = release;
54474 INIT_LIST_HEAD(&op->pend_link);
54475 fscache_set_op_state(op, "Init");
54476 diff -urNp linux-2.6.32.41/include/linux/fs.h linux-2.6.32.41/include/linux/fs.h
54477 --- linux-2.6.32.41/include/linux/fs.h 2011-03-27 14:31:47.000000000 -0400
54478 +++ linux-2.6.32.41/include/linux/fs.h 2011-04-17 15:56:46.000000000 -0400
54479 @@ -90,6 +90,11 @@ struct inodes_stat_t {
54480 /* Expect random access pattern */
54481 #define FMODE_RANDOM ((__force fmode_t)4096)
54482
54483 +/* Hack for grsec so as not to require read permission simply to execute
54484 + * a binary
54485 + */
54486 +#define FMODE_GREXEC ((__force fmode_t)0x2000000)
54487 +
54488 /*
54489 * The below are the various read and write types that we support. Some of
54490 * them include behavioral modifiers that send information down to the
54491 @@ -568,41 +573,41 @@ typedef int (*read_actor_t)(read_descrip
54492 unsigned long, unsigned long);
54493
54494 struct address_space_operations {
54495 - int (*writepage)(struct page *page, struct writeback_control *wbc);
54496 - int (*readpage)(struct file *, struct page *);
54497 - void (*sync_page)(struct page *);
54498 + int (* const writepage)(struct page *page, struct writeback_control *wbc);
54499 + int (* const readpage)(struct file *, struct page *);
54500 + void (* const sync_page)(struct page *);
54501
54502 /* Write back some dirty pages from this mapping. */
54503 - int (*writepages)(struct address_space *, struct writeback_control *);
54504 + int (* const writepages)(struct address_space *, struct writeback_control *);
54505
54506 /* Set a page dirty. Return true if this dirtied it */
54507 - int (*set_page_dirty)(struct page *page);
54508 + int (* const set_page_dirty)(struct page *page);
54509
54510 - int (*readpages)(struct file *filp, struct address_space *mapping,
54511 + int (* const readpages)(struct file *filp, struct address_space *mapping,
54512 struct list_head *pages, unsigned nr_pages);
54513
54514 - int (*write_begin)(struct file *, struct address_space *mapping,
54515 + int (* const write_begin)(struct file *, struct address_space *mapping,
54516 loff_t pos, unsigned len, unsigned flags,
54517 struct page **pagep, void **fsdata);
54518 - int (*write_end)(struct file *, struct address_space *mapping,
54519 + int (* const write_end)(struct file *, struct address_space *mapping,
54520 loff_t pos, unsigned len, unsigned copied,
54521 struct page *page, void *fsdata);
54522
54523 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
54524 - sector_t (*bmap)(struct address_space *, sector_t);
54525 - void (*invalidatepage) (struct page *, unsigned long);
54526 - int (*releasepage) (struct page *, gfp_t);
54527 - ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
54528 + sector_t (* const bmap)(struct address_space *, sector_t);
54529 + void (* const invalidatepage) (struct page *, unsigned long);
54530 + int (* const releasepage) (struct page *, gfp_t);
54531 + ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
54532 loff_t offset, unsigned long nr_segs);
54533 - int (*get_xip_mem)(struct address_space *, pgoff_t, int,
54534 + int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
54535 void **, unsigned long *);
54536 /* migrate the contents of a page to the specified target */
54537 - int (*migratepage) (struct address_space *,
54538 + int (* const migratepage) (struct address_space *,
54539 struct page *, struct page *);
54540 - int (*launder_page) (struct page *);
54541 - int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
54542 + int (* const launder_page) (struct page *);
54543 + int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
54544 unsigned long);
54545 - int (*error_remove_page)(struct address_space *, struct page *);
54546 + int (* const error_remove_page)(struct address_space *, struct page *);
54547 };
54548
54549 /*
54550 @@ -1030,19 +1035,19 @@ static inline int file_check_writeable(s
54551 typedef struct files_struct *fl_owner_t;
54552
54553 struct file_lock_operations {
54554 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
54555 - void (*fl_release_private)(struct file_lock *);
54556 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
54557 + void (* const fl_release_private)(struct file_lock *);
54558 };
54559
54560 struct lock_manager_operations {
54561 - int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
54562 - void (*fl_notify)(struct file_lock *); /* unblock callback */
54563 - int (*fl_grant)(struct file_lock *, struct file_lock *, int);
54564 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
54565 - void (*fl_release_private)(struct file_lock *);
54566 - void (*fl_break)(struct file_lock *);
54567 - int (*fl_mylease)(struct file_lock *, struct file_lock *);
54568 - int (*fl_change)(struct file_lock **, int);
54569 + int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
54570 + void (* const fl_notify)(struct file_lock *); /* unblock callback */
54571 + int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
54572 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
54573 + void (* const fl_release_private)(struct file_lock *);
54574 + void (* const fl_break)(struct file_lock *);
54575 + int (* const fl_mylease)(struct file_lock *, struct file_lock *);
54576 + int (* const fl_change)(struct file_lock **, int);
54577 };
54578
54579 struct lock_manager {
54580 @@ -1441,7 +1446,7 @@ struct fiemap_extent_info {
54581 unsigned int fi_flags; /* Flags as passed from user */
54582 unsigned int fi_extents_mapped; /* Number of mapped extents */
54583 unsigned int fi_extents_max; /* Size of fiemap_extent array */
54584 - struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
54585 + struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
54586 * array */
54587 };
54588 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
54589 @@ -1558,30 +1563,30 @@ extern ssize_t vfs_writev(struct file *,
54590 unsigned long, loff_t *);
54591
54592 struct super_operations {
54593 - struct inode *(*alloc_inode)(struct super_block *sb);
54594 - void (*destroy_inode)(struct inode *);
54595 + struct inode *(* const alloc_inode)(struct super_block *sb);
54596 + void (* const destroy_inode)(struct inode *);
54597
54598 - void (*dirty_inode) (struct inode *);
54599 - int (*write_inode) (struct inode *, int);
54600 - void (*drop_inode) (struct inode *);
54601 - void (*delete_inode) (struct inode *);
54602 - void (*put_super) (struct super_block *);
54603 - void (*write_super) (struct super_block *);
54604 - int (*sync_fs)(struct super_block *sb, int wait);
54605 - int (*freeze_fs) (struct super_block *);
54606 - int (*unfreeze_fs) (struct super_block *);
54607 - int (*statfs) (struct dentry *, struct kstatfs *);
54608 - int (*remount_fs) (struct super_block *, int *, char *);
54609 - void (*clear_inode) (struct inode *);
54610 - void (*umount_begin) (struct super_block *);
54611 + void (* const dirty_inode) (struct inode *);
54612 + int (* const write_inode) (struct inode *, int);
54613 + void (* const drop_inode) (struct inode *);
54614 + void (* const delete_inode) (struct inode *);
54615 + void (* const put_super) (struct super_block *);
54616 + void (* const write_super) (struct super_block *);
54617 + int (* const sync_fs)(struct super_block *sb, int wait);
54618 + int (* const freeze_fs) (struct super_block *);
54619 + int (* const unfreeze_fs) (struct super_block *);
54620 + int (* const statfs) (struct dentry *, struct kstatfs *);
54621 + int (* const remount_fs) (struct super_block *, int *, char *);
54622 + void (* const clear_inode) (struct inode *);
54623 + void (* const umount_begin) (struct super_block *);
54624
54625 - int (*show_options)(struct seq_file *, struct vfsmount *);
54626 - int (*show_stats)(struct seq_file *, struct vfsmount *);
54627 + int (* const show_options)(struct seq_file *, struct vfsmount *);
54628 + int (* const show_stats)(struct seq_file *, struct vfsmount *);
54629 #ifdef CONFIG_QUOTA
54630 - ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
54631 - ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
54632 + ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
54633 + ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
54634 #endif
54635 - int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
54636 + int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
54637 };
54638
54639 /*
54640 diff -urNp linux-2.6.32.41/include/linux/fs_struct.h linux-2.6.32.41/include/linux/fs_struct.h
54641 --- linux-2.6.32.41/include/linux/fs_struct.h 2011-03-27 14:31:47.000000000 -0400
54642 +++ linux-2.6.32.41/include/linux/fs_struct.h 2011-04-17 15:56:46.000000000 -0400
54643 @@ -4,7 +4,7 @@
54644 #include <linux/path.h>
54645
54646 struct fs_struct {
54647 - int users;
54648 + atomic_t users;
54649 rwlock_t lock;
54650 int umask;
54651 int in_exec;
54652 diff -urNp linux-2.6.32.41/include/linux/ftrace_event.h linux-2.6.32.41/include/linux/ftrace_event.h
54653 --- linux-2.6.32.41/include/linux/ftrace_event.h 2011-03-27 14:31:47.000000000 -0400
54654 +++ linux-2.6.32.41/include/linux/ftrace_event.h 2011-05-04 17:56:28.000000000 -0400
54655 @@ -163,7 +163,7 @@ extern int trace_define_field(struct ftr
54656 int filter_type);
54657 extern int trace_define_common_fields(struct ftrace_event_call *call);
54658
54659 -#define is_signed_type(type) (((type)(-1)) < 0)
54660 +#define is_signed_type(type) (((type)(-1)) < (type)1)
54661
54662 int trace_set_clr_event(const char *system, const char *event, int set);
54663
54664 diff -urNp linux-2.6.32.41/include/linux/genhd.h linux-2.6.32.41/include/linux/genhd.h
54665 --- linux-2.6.32.41/include/linux/genhd.h 2011-03-27 14:31:47.000000000 -0400
54666 +++ linux-2.6.32.41/include/linux/genhd.h 2011-04-17 15:56:46.000000000 -0400
54667 @@ -161,7 +161,7 @@ struct gendisk {
54668
54669 struct timer_rand_state *random;
54670
54671 - atomic_t sync_io; /* RAID */
54672 + atomic_unchecked_t sync_io; /* RAID */
54673 struct work_struct async_notify;
54674 #ifdef CONFIG_BLK_DEV_INTEGRITY
54675 struct blk_integrity *integrity;
54676 diff -urNp linux-2.6.32.41/include/linux/gracl.h linux-2.6.32.41/include/linux/gracl.h
54677 --- linux-2.6.32.41/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
54678 +++ linux-2.6.32.41/include/linux/gracl.h 2011-04-17 15:56:46.000000000 -0400
54679 @@ -0,0 +1,317 @@
54680 +#ifndef GR_ACL_H
54681 +#define GR_ACL_H
54682 +
54683 +#include <linux/grdefs.h>
54684 +#include <linux/resource.h>
54685 +#include <linux/capability.h>
54686 +#include <linux/dcache.h>
54687 +#include <asm/resource.h>
54688 +
54689 +/* Major status information */
54690 +
54691 +#define GR_VERSION "grsecurity 2.2.2"
54692 +#define GRSECURITY_VERSION 0x2202
54693 +
54694 +enum {
54695 + GR_SHUTDOWN = 0,
54696 + GR_ENABLE = 1,
54697 + GR_SPROLE = 2,
54698 + GR_RELOAD = 3,
54699 + GR_SEGVMOD = 4,
54700 + GR_STATUS = 5,
54701 + GR_UNSPROLE = 6,
54702 + GR_PASSSET = 7,
54703 + GR_SPROLEPAM = 8,
54704 +};
54705 +
54706 +/* Password setup definitions
54707 + * kernel/grhash.c */
54708 +enum {
54709 + GR_PW_LEN = 128,
54710 + GR_SALT_LEN = 16,
54711 + GR_SHA_LEN = 32,
54712 +};
54713 +
54714 +enum {
54715 + GR_SPROLE_LEN = 64,
54716 +};
54717 +
54718 +enum {
54719 + GR_NO_GLOB = 0,
54720 + GR_REG_GLOB,
54721 + GR_CREATE_GLOB
54722 +};
54723 +
54724 +#define GR_NLIMITS 32
54725 +
54726 +/* Begin Data Structures */
54727 +
54728 +struct sprole_pw {
54729 + unsigned char *rolename;
54730 + unsigned char salt[GR_SALT_LEN];
54731 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
54732 +};
54733 +
54734 +struct name_entry {
54735 + __u32 key;
54736 + ino_t inode;
54737 + dev_t device;
54738 + char *name;
54739 + __u16 len;
54740 + __u8 deleted;
54741 + struct name_entry *prev;
54742 + struct name_entry *next;
54743 +};
54744 +
54745 +struct inodev_entry {
54746 + struct name_entry *nentry;
54747 + struct inodev_entry *prev;
54748 + struct inodev_entry *next;
54749 +};
54750 +
54751 +struct acl_role_db {
54752 + struct acl_role_label **r_hash;
54753 + __u32 r_size;
54754 +};
54755 +
54756 +struct inodev_db {
54757 + struct inodev_entry **i_hash;
54758 + __u32 i_size;
54759 +};
54760 +
54761 +struct name_db {
54762 + struct name_entry **n_hash;
54763 + __u32 n_size;
54764 +};
54765 +
54766 +struct crash_uid {
54767 + uid_t uid;
54768 + unsigned long expires;
54769 +};
54770 +
54771 +struct gr_hash_struct {
54772 + void **table;
54773 + void **nametable;
54774 + void *first;
54775 + __u32 table_size;
54776 + __u32 used_size;
54777 + int type;
54778 +};
54779 +
54780 +/* Userspace Grsecurity ACL data structures */
54781 +
54782 +struct acl_subject_label {
54783 + char *filename;
54784 + ino_t inode;
54785 + dev_t device;
54786 + __u32 mode;
54787 + kernel_cap_t cap_mask;
54788 + kernel_cap_t cap_lower;
54789 + kernel_cap_t cap_invert_audit;
54790 +
54791 + struct rlimit res[GR_NLIMITS];
54792 + __u32 resmask;
54793 +
54794 + __u8 user_trans_type;
54795 + __u8 group_trans_type;
54796 + uid_t *user_transitions;
54797 + gid_t *group_transitions;
54798 + __u16 user_trans_num;
54799 + __u16 group_trans_num;
54800 +
54801 + __u32 sock_families[2];
54802 + __u32 ip_proto[8];
54803 + __u32 ip_type;
54804 + struct acl_ip_label **ips;
54805 + __u32 ip_num;
54806 + __u32 inaddr_any_override;
54807 +
54808 + __u32 crashes;
54809 + unsigned long expires;
54810 +
54811 + struct acl_subject_label *parent_subject;
54812 + struct gr_hash_struct *hash;
54813 + struct acl_subject_label *prev;
54814 + struct acl_subject_label *next;
54815 +
54816 + struct acl_object_label **obj_hash;
54817 + __u32 obj_hash_size;
54818 + __u16 pax_flags;
54819 +};
54820 +
54821 +struct role_allowed_ip {
54822 + __u32 addr;
54823 + __u32 netmask;
54824 +
54825 + struct role_allowed_ip *prev;
54826 + struct role_allowed_ip *next;
54827 +};
54828 +
54829 +struct role_transition {
54830 + char *rolename;
54831 +
54832 + struct role_transition *prev;
54833 + struct role_transition *next;
54834 +};
54835 +
54836 +struct acl_role_label {
54837 + char *rolename;
54838 + uid_t uidgid;
54839 + __u16 roletype;
54840 +
54841 + __u16 auth_attempts;
54842 + unsigned long expires;
54843 +
54844 + struct acl_subject_label *root_label;
54845 + struct gr_hash_struct *hash;
54846 +
54847 + struct acl_role_label *prev;
54848 + struct acl_role_label *next;
54849 +
54850 + struct role_transition *transitions;
54851 + struct role_allowed_ip *allowed_ips;
54852 + uid_t *domain_children;
54853 + __u16 domain_child_num;
54854 +
54855 + struct acl_subject_label **subj_hash;
54856 + __u32 subj_hash_size;
54857 +};
54858 +
54859 +struct user_acl_role_db {
54860 + struct acl_role_label **r_table;
54861 + __u32 num_pointers; /* Number of allocations to track */
54862 + __u32 num_roles; /* Number of roles */
54863 + __u32 num_domain_children; /* Number of domain children */
54864 + __u32 num_subjects; /* Number of subjects */
54865 + __u32 num_objects; /* Number of objects */
54866 +};
54867 +
54868 +struct acl_object_label {
54869 + char *filename;
54870 + ino_t inode;
54871 + dev_t device;
54872 + __u32 mode;
54873 +
54874 + struct acl_subject_label *nested;
54875 + struct acl_object_label *globbed;
54876 +
54877 + /* next two structures not used */
54878 +
54879 + struct acl_object_label *prev;
54880 + struct acl_object_label *next;
54881 +};
54882 +
54883 +struct acl_ip_label {
54884 + char *iface;
54885 + __u32 addr;
54886 + __u32 netmask;
54887 + __u16 low, high;
54888 + __u8 mode;
54889 + __u32 type;
54890 + __u32 proto[8];
54891 +
54892 + /* next two structures not used */
54893 +
54894 + struct acl_ip_label *prev;
54895 + struct acl_ip_label *next;
54896 +};
54897 +
54898 +struct gr_arg {
54899 + struct user_acl_role_db role_db;
54900 + unsigned char pw[GR_PW_LEN];
54901 + unsigned char salt[GR_SALT_LEN];
54902 + unsigned char sum[GR_SHA_LEN];
54903 + unsigned char sp_role[GR_SPROLE_LEN];
54904 + struct sprole_pw *sprole_pws;
54905 + dev_t segv_device;
54906 + ino_t segv_inode;
54907 + uid_t segv_uid;
54908 + __u16 num_sprole_pws;
54909 + __u16 mode;
54910 +};
54911 +
54912 +struct gr_arg_wrapper {
54913 + struct gr_arg *arg;
54914 + __u32 version;
54915 + __u32 size;
54916 +};
54917 +
54918 +struct subject_map {
54919 + struct acl_subject_label *user;
54920 + struct acl_subject_label *kernel;
54921 + struct subject_map *prev;
54922 + struct subject_map *next;
54923 +};
54924 +
54925 +struct acl_subj_map_db {
54926 + struct subject_map **s_hash;
54927 + __u32 s_size;
54928 +};
54929 +
54930 +/* End Data Structures Section */
54931 +
54932 +/* Hash functions generated by empirical testing by Brad Spengler
54933 + Makes good use of the low bits of the inode. Generally 0-1 times
54934 + in loop for successful match. 0-3 for unsuccessful match.
54935 + Shift/add algorithm with modulus of table size and an XOR*/
54936 +
54937 +static __inline__ unsigned int
54938 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
54939 +{
54940 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
54941 +}
54942 +
54943 + static __inline__ unsigned int
54944 +shash(const struct acl_subject_label *userp, const unsigned int sz)
54945 +{
54946 + return ((const unsigned long)userp % sz);
54947 +}
54948 +
54949 +static __inline__ unsigned int
54950 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
54951 +{
54952 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
54953 +}
54954 +
54955 +static __inline__ unsigned int
54956 +nhash(const char *name, const __u16 len, const unsigned int sz)
54957 +{
54958 + return full_name_hash((const unsigned char *)name, len) % sz;
54959 +}
54960 +
54961 +#define FOR_EACH_ROLE_START(role) \
54962 + role = role_list; \
54963 + while (role) {
54964 +
54965 +#define FOR_EACH_ROLE_END(role) \
54966 + role = role->prev; \
54967 + }
54968 +
54969 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
54970 + subj = NULL; \
54971 + iter = 0; \
54972 + while (iter < role->subj_hash_size) { \
54973 + if (subj == NULL) \
54974 + subj = role->subj_hash[iter]; \
54975 + if (subj == NULL) { \
54976 + iter++; \
54977 + continue; \
54978 + }
54979 +
54980 +#define FOR_EACH_SUBJECT_END(subj,iter) \
54981 + subj = subj->next; \
54982 + if (subj == NULL) \
54983 + iter++; \
54984 + }
54985 +
54986 +
54987 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
54988 + subj = role->hash->first; \
54989 + while (subj != NULL) {
54990 +
54991 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
54992 + subj = subj->next; \
54993 + }
54994 +
54995 +#endif
54996 +
54997 diff -urNp linux-2.6.32.41/include/linux/gralloc.h linux-2.6.32.41/include/linux/gralloc.h
54998 --- linux-2.6.32.41/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
54999 +++ linux-2.6.32.41/include/linux/gralloc.h 2011-04-17 15:56:46.000000000 -0400
55000 @@ -0,0 +1,9 @@
55001 +#ifndef __GRALLOC_H
55002 +#define __GRALLOC_H
55003 +
55004 +void acl_free_all(void);
55005 +int acl_alloc_stack_init(unsigned long size);
55006 +void *acl_alloc(unsigned long len);
55007 +void *acl_alloc_num(unsigned long num, unsigned long len);
55008 +
55009 +#endif
55010 diff -urNp linux-2.6.32.41/include/linux/grdefs.h linux-2.6.32.41/include/linux/grdefs.h
55011 --- linux-2.6.32.41/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
55012 +++ linux-2.6.32.41/include/linux/grdefs.h 2011-04-17 15:56:46.000000000 -0400
55013 @@ -0,0 +1,139 @@
55014 +#ifndef GRDEFS_H
55015 +#define GRDEFS_H
55016 +
55017 +/* Begin grsecurity status declarations */
55018 +
55019 +enum {
55020 + GR_READY = 0x01,
55021 + GR_STATUS_INIT = 0x00 // disabled state
55022 +};
55023 +
55024 +/* Begin ACL declarations */
55025 +
55026 +/* Role flags */
55027 +
55028 +enum {
55029 + GR_ROLE_USER = 0x0001,
55030 + GR_ROLE_GROUP = 0x0002,
55031 + GR_ROLE_DEFAULT = 0x0004,
55032 + GR_ROLE_SPECIAL = 0x0008,
55033 + GR_ROLE_AUTH = 0x0010,
55034 + GR_ROLE_NOPW = 0x0020,
55035 + GR_ROLE_GOD = 0x0040,
55036 + GR_ROLE_LEARN = 0x0080,
55037 + GR_ROLE_TPE = 0x0100,
55038 + GR_ROLE_DOMAIN = 0x0200,
55039 + GR_ROLE_PAM = 0x0400,
55040 + GR_ROLE_PERSIST = 0x800
55041 +};
55042 +
55043 +/* ACL Subject and Object mode flags */
55044 +enum {
55045 + GR_DELETED = 0x80000000
55046 +};
55047 +
55048 +/* ACL Object-only mode flags */
55049 +enum {
55050 + GR_READ = 0x00000001,
55051 + GR_APPEND = 0x00000002,
55052 + GR_WRITE = 0x00000004,
55053 + GR_EXEC = 0x00000008,
55054 + GR_FIND = 0x00000010,
55055 + GR_INHERIT = 0x00000020,
55056 + GR_SETID = 0x00000040,
55057 + GR_CREATE = 0x00000080,
55058 + GR_DELETE = 0x00000100,
55059 + GR_LINK = 0x00000200,
55060 + GR_AUDIT_READ = 0x00000400,
55061 + GR_AUDIT_APPEND = 0x00000800,
55062 + GR_AUDIT_WRITE = 0x00001000,
55063 + GR_AUDIT_EXEC = 0x00002000,
55064 + GR_AUDIT_FIND = 0x00004000,
55065 + GR_AUDIT_INHERIT= 0x00008000,
55066 + GR_AUDIT_SETID = 0x00010000,
55067 + GR_AUDIT_CREATE = 0x00020000,
55068 + GR_AUDIT_DELETE = 0x00040000,
55069 + GR_AUDIT_LINK = 0x00080000,
55070 + GR_PTRACERD = 0x00100000,
55071 + GR_NOPTRACE = 0x00200000,
55072 + GR_SUPPRESS = 0x00400000,
55073 + GR_NOLEARN = 0x00800000,
55074 + GR_INIT_TRANSFER= 0x01000000
55075 +};
55076 +
55077 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
55078 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
55079 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
55080 +
55081 +/* ACL subject-only mode flags */
55082 +enum {
55083 + GR_KILL = 0x00000001,
55084 + GR_VIEW = 0x00000002,
55085 + GR_PROTECTED = 0x00000004,
55086 + GR_LEARN = 0x00000008,
55087 + GR_OVERRIDE = 0x00000010,
55088 + /* just a placeholder, this mode is only used in userspace */
55089 + GR_DUMMY = 0x00000020,
55090 + GR_PROTSHM = 0x00000040,
55091 + GR_KILLPROC = 0x00000080,
55092 + GR_KILLIPPROC = 0x00000100,
55093 + /* just a placeholder, this mode is only used in userspace */
55094 + GR_NOTROJAN = 0x00000200,
55095 + GR_PROTPROCFD = 0x00000400,
55096 + GR_PROCACCT = 0x00000800,
55097 + GR_RELAXPTRACE = 0x00001000,
55098 + GR_NESTED = 0x00002000,
55099 + GR_INHERITLEARN = 0x00004000,
55100 + GR_PROCFIND = 0x00008000,
55101 + GR_POVERRIDE = 0x00010000,
55102 + GR_KERNELAUTH = 0x00020000,
55103 + GR_ATSECURE = 0x00040000
55104 +};
55105 +
55106 +enum {
55107 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
55108 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
55109 + GR_PAX_ENABLE_MPROTECT = 0x0004,
55110 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
55111 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
55112 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
55113 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
55114 + GR_PAX_DISABLE_MPROTECT = 0x0400,
55115 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
55116 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
55117 +};
55118 +
55119 +enum {
55120 + GR_ID_USER = 0x01,
55121 + GR_ID_GROUP = 0x02,
55122 +};
55123 +
55124 +enum {
55125 + GR_ID_ALLOW = 0x01,
55126 + GR_ID_DENY = 0x02,
55127 +};
55128 +
55129 +#define GR_CRASH_RES 31
55130 +#define GR_UIDTABLE_MAX 500
55131 +
55132 +/* begin resource learning section */
55133 +enum {
55134 + GR_RLIM_CPU_BUMP = 60,
55135 + GR_RLIM_FSIZE_BUMP = 50000,
55136 + GR_RLIM_DATA_BUMP = 10000,
55137 + GR_RLIM_STACK_BUMP = 1000,
55138 + GR_RLIM_CORE_BUMP = 10000,
55139 + GR_RLIM_RSS_BUMP = 500000,
55140 + GR_RLIM_NPROC_BUMP = 1,
55141 + GR_RLIM_NOFILE_BUMP = 5,
55142 + GR_RLIM_MEMLOCK_BUMP = 50000,
55143 + GR_RLIM_AS_BUMP = 500000,
55144 + GR_RLIM_LOCKS_BUMP = 2,
55145 + GR_RLIM_SIGPENDING_BUMP = 5,
55146 + GR_RLIM_MSGQUEUE_BUMP = 10000,
55147 + GR_RLIM_NICE_BUMP = 1,
55148 + GR_RLIM_RTPRIO_BUMP = 1,
55149 + GR_RLIM_RTTIME_BUMP = 1000000
55150 +};
55151 +
55152 +#endif
55153 diff -urNp linux-2.6.32.41/include/linux/grinternal.h linux-2.6.32.41/include/linux/grinternal.h
55154 --- linux-2.6.32.41/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
55155 +++ linux-2.6.32.41/include/linux/grinternal.h 2011-04-17 15:56:46.000000000 -0400
55156 @@ -0,0 +1,218 @@
55157 +#ifndef __GRINTERNAL_H
55158 +#define __GRINTERNAL_H
55159 +
55160 +#ifdef CONFIG_GRKERNSEC
55161 +
55162 +#include <linux/fs.h>
55163 +#include <linux/mnt_namespace.h>
55164 +#include <linux/nsproxy.h>
55165 +#include <linux/gracl.h>
55166 +#include <linux/grdefs.h>
55167 +#include <linux/grmsg.h>
55168 +
55169 +void gr_add_learn_entry(const char *fmt, ...)
55170 + __attribute__ ((format (printf, 1, 2)));
55171 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
55172 + const struct vfsmount *mnt);
55173 +__u32 gr_check_create(const struct dentry *new_dentry,
55174 + const struct dentry *parent,
55175 + const struct vfsmount *mnt, const __u32 mode);
55176 +int gr_check_protected_task(const struct task_struct *task);
55177 +__u32 to_gr_audit(const __u32 reqmode);
55178 +int gr_set_acls(const int type);
55179 +int gr_apply_subject_to_task(struct task_struct *task);
55180 +int gr_acl_is_enabled(void);
55181 +char gr_roletype_to_char(void);
55182 +
55183 +void gr_handle_alertkill(struct task_struct *task);
55184 +char *gr_to_filename(const struct dentry *dentry,
55185 + const struct vfsmount *mnt);
55186 +char *gr_to_filename1(const struct dentry *dentry,
55187 + const struct vfsmount *mnt);
55188 +char *gr_to_filename2(const struct dentry *dentry,
55189 + const struct vfsmount *mnt);
55190 +char *gr_to_filename3(const struct dentry *dentry,
55191 + const struct vfsmount *mnt);
55192 +
55193 +extern int grsec_enable_harden_ptrace;
55194 +extern int grsec_enable_link;
55195 +extern int grsec_enable_fifo;
55196 +extern int grsec_enable_execve;
55197 +extern int grsec_enable_shm;
55198 +extern int grsec_enable_execlog;
55199 +extern int grsec_enable_signal;
55200 +extern int grsec_enable_audit_ptrace;
55201 +extern int grsec_enable_forkfail;
55202 +extern int grsec_enable_time;
55203 +extern int grsec_enable_rofs;
55204 +extern int grsec_enable_chroot_shmat;
55205 +extern int grsec_enable_chroot_findtask;
55206 +extern int grsec_enable_chroot_mount;
55207 +extern int grsec_enable_chroot_double;
55208 +extern int grsec_enable_chroot_pivot;
55209 +extern int grsec_enable_chroot_chdir;
55210 +extern int grsec_enable_chroot_chmod;
55211 +extern int grsec_enable_chroot_mknod;
55212 +extern int grsec_enable_chroot_fchdir;
55213 +extern int grsec_enable_chroot_nice;
55214 +extern int grsec_enable_chroot_execlog;
55215 +extern int grsec_enable_chroot_caps;
55216 +extern int grsec_enable_chroot_sysctl;
55217 +extern int grsec_enable_chroot_unix;
55218 +extern int grsec_enable_tpe;
55219 +extern int grsec_tpe_gid;
55220 +extern int grsec_enable_tpe_all;
55221 +extern int grsec_enable_tpe_invert;
55222 +extern int grsec_enable_socket_all;
55223 +extern int grsec_socket_all_gid;
55224 +extern int grsec_enable_socket_client;
55225 +extern int grsec_socket_client_gid;
55226 +extern int grsec_enable_socket_server;
55227 +extern int grsec_socket_server_gid;
55228 +extern int grsec_audit_gid;
55229 +extern int grsec_enable_group;
55230 +extern int grsec_enable_audit_textrel;
55231 +extern int grsec_enable_log_rwxmaps;
55232 +extern int grsec_enable_mount;
55233 +extern int grsec_enable_chdir;
55234 +extern int grsec_resource_logging;
55235 +extern int grsec_enable_blackhole;
55236 +extern int grsec_lastack_retries;
55237 +extern int grsec_lock;
55238 +
55239 +extern spinlock_t grsec_alert_lock;
55240 +extern unsigned long grsec_alert_wtime;
55241 +extern unsigned long grsec_alert_fyet;
55242 +
55243 +extern spinlock_t grsec_audit_lock;
55244 +
55245 +extern rwlock_t grsec_exec_file_lock;
55246 +
55247 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
55248 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
55249 + (tsk)->exec_file->f_vfsmnt) : "/")
55250 +
55251 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
55252 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
55253 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
55254 +
55255 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
55256 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
55257 + (tsk)->exec_file->f_vfsmnt) : "/")
55258 +
55259 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
55260 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
55261 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
55262 +
55263 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
55264 +
55265 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
55266 +
55267 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
55268 + (task)->pid, (cred)->uid, \
55269 + (cred)->euid, (cred)->gid, (cred)->egid, \
55270 + gr_parent_task_fullpath(task), \
55271 + (task)->real_parent->comm, (task)->real_parent->pid, \
55272 + (pcred)->uid, (pcred)->euid, \
55273 + (pcred)->gid, (pcred)->egid
55274 +
55275 +#define GR_CHROOT_CAPS {{ \
55276 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
55277 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
55278 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
55279 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
55280 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
55281 + CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
55282 +
55283 +#define security_learn(normal_msg,args...) \
55284 +({ \
55285 + read_lock(&grsec_exec_file_lock); \
55286 + gr_add_learn_entry(normal_msg "\n", ## args); \
55287 + read_unlock(&grsec_exec_file_lock); \
55288 +})
55289 +
55290 +enum {
55291 + GR_DO_AUDIT,
55292 + GR_DONT_AUDIT,
55293 + GR_DONT_AUDIT_GOOD
55294 +};
55295 +
55296 +enum {
55297 + GR_TTYSNIFF,
55298 + GR_RBAC,
55299 + GR_RBAC_STR,
55300 + GR_STR_RBAC,
55301 + GR_RBAC_MODE2,
55302 + GR_RBAC_MODE3,
55303 + GR_FILENAME,
55304 + GR_SYSCTL_HIDDEN,
55305 + GR_NOARGS,
55306 + GR_ONE_INT,
55307 + GR_ONE_INT_TWO_STR,
55308 + GR_ONE_STR,
55309 + GR_STR_INT,
55310 + GR_TWO_STR_INT,
55311 + GR_TWO_INT,
55312 + GR_TWO_U64,
55313 + GR_THREE_INT,
55314 + GR_FIVE_INT_TWO_STR,
55315 + GR_TWO_STR,
55316 + GR_THREE_STR,
55317 + GR_FOUR_STR,
55318 + GR_STR_FILENAME,
55319 + GR_FILENAME_STR,
55320 + GR_FILENAME_TWO_INT,
55321 + GR_FILENAME_TWO_INT_STR,
55322 + GR_TEXTREL,
55323 + GR_PTRACE,
55324 + GR_RESOURCE,
55325 + GR_CAP,
55326 + GR_SIG,
55327 + GR_SIG2,
55328 + GR_CRASH1,
55329 + GR_CRASH2,
55330 + GR_PSACCT,
55331 + GR_RWXMAP
55332 +};
55333 +
55334 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
55335 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
55336 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
55337 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
55338 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
55339 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
55340 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
55341 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
55342 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
55343 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
55344 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
55345 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
55346 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
55347 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
55348 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
55349 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
55350 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
55351 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
55352 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
55353 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
55354 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
55355 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
55356 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
55357 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
55358 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
55359 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
55360 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
55361 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
55362 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
55363 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
55364 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
55365 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
55366 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
55367 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
55368 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
55369 +
55370 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
55371 +
55372 +#endif
55373 +
55374 +#endif
55375 diff -urNp linux-2.6.32.41/include/linux/grmsg.h linux-2.6.32.41/include/linux/grmsg.h
55376 --- linux-2.6.32.41/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
55377 +++ linux-2.6.32.41/include/linux/grmsg.h 2011-04-17 15:56:46.000000000 -0400
55378 @@ -0,0 +1,108 @@
55379 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
55380 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
55381 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
55382 +#define GR_STOPMOD_MSG "denied modification of module state by "
55383 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
55384 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
55385 +#define GR_IOPERM_MSG "denied use of ioperm() by "
55386 +#define GR_IOPL_MSG "denied use of iopl() by "
55387 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
55388 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
55389 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
55390 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
55391 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
55392 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
55393 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
55394 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
55395 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
55396 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
55397 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
55398 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
55399 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
55400 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
55401 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
55402 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
55403 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
55404 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
55405 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
55406 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
55407 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
55408 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
55409 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
55410 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
55411 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
55412 +#define GR_NPROC_MSG "denied overstep of process limit by "
55413 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
55414 +#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
55415 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
55416 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
55417 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
55418 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
55419 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
55420 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
55421 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
55422 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
55423 +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
55424 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
55425 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
55426 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
55427 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
55428 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
55429 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
55430 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
55431 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
55432 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
55433 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
55434 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
55435 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
55436 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
55437 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
55438 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
55439 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
55440 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
55441 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
55442 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
55443 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
55444 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
55445 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
55446 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
55447 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
55448 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
55449 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
55450 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
55451 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
55452 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
55453 +#define GR_NICE_CHROOT_MSG "denied priority change by "
55454 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
55455 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
55456 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
55457 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
55458 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
55459 +#define GR_TIME_MSG "time set by "
55460 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
55461 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
55462 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
55463 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
55464 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
55465 +#define GR_BIND_MSG "denied bind() by "
55466 +#define GR_CONNECT_MSG "denied connect() by "
55467 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
55468 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
55469 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
55470 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
55471 +#define GR_CAP_ACL_MSG "use of %s denied for "
55472 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
55473 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
55474 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
55475 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
55476 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
55477 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
55478 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
55479 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
55480 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
55481 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
55482 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
55483 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
55484 +#define GR_VM86_MSG "denied use of vm86 by "
55485 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
55486 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
55487 diff -urNp linux-2.6.32.41/include/linux/grsecurity.h linux-2.6.32.41/include/linux/grsecurity.h
55488 --- linux-2.6.32.41/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
55489 +++ linux-2.6.32.41/include/linux/grsecurity.h 2011-04-17 15:56:46.000000000 -0400
55490 @@ -0,0 +1,212 @@
55491 +#ifndef GR_SECURITY_H
55492 +#define GR_SECURITY_H
55493 +#include <linux/fs.h>
55494 +#include <linux/fs_struct.h>
55495 +#include <linux/binfmts.h>
55496 +#include <linux/gracl.h>
55497 +#include <linux/compat.h>
55498 +
55499 +/* notify of brain-dead configs */
55500 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
55501 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
55502 +#endif
55503 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
55504 +#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
55505 +#endif
55506 +#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
55507 +#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
55508 +#endif
55509 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
55510 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
55511 +#endif
55512 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
55513 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
55514 +#endif
55515 +
55516 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
55517 +void gr_handle_brute_check(void);
55518 +void gr_handle_kernel_exploit(void);
55519 +int gr_process_user_ban(void);
55520 +
55521 +char gr_roletype_to_char(void);
55522 +
55523 +int gr_acl_enable_at_secure(void);
55524 +
55525 +int gr_check_user_change(int real, int effective, int fs);
55526 +int gr_check_group_change(int real, int effective, int fs);
55527 +
55528 +void gr_del_task_from_ip_table(struct task_struct *p);
55529 +
55530 +int gr_pid_is_chrooted(struct task_struct *p);
55531 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
55532 +int gr_handle_chroot_nice(void);
55533 +int gr_handle_chroot_sysctl(const int op);
55534 +int gr_handle_chroot_setpriority(struct task_struct *p,
55535 + const int niceval);
55536 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
55537 +int gr_handle_chroot_chroot(const struct dentry *dentry,
55538 + const struct vfsmount *mnt);
55539 +int gr_handle_chroot_caps(struct path *path);
55540 +void gr_handle_chroot_chdir(struct path *path);
55541 +int gr_handle_chroot_chmod(const struct dentry *dentry,
55542 + const struct vfsmount *mnt, const int mode);
55543 +int gr_handle_chroot_mknod(const struct dentry *dentry,
55544 + const struct vfsmount *mnt, const int mode);
55545 +int gr_handle_chroot_mount(const struct dentry *dentry,
55546 + const struct vfsmount *mnt,
55547 + const char *dev_name);
55548 +int gr_handle_chroot_pivot(void);
55549 +int gr_handle_chroot_unix(const pid_t pid);
55550 +
55551 +int gr_handle_rawio(const struct inode *inode);
55552 +int gr_handle_nproc(void);
55553 +
55554 +void gr_handle_ioperm(void);
55555 +void gr_handle_iopl(void);
55556 +
55557 +int gr_tpe_allow(const struct file *file);
55558 +
55559 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
55560 +void gr_clear_chroot_entries(struct task_struct *task);
55561 +
55562 +void gr_log_forkfail(const int retval);
55563 +void gr_log_timechange(void);
55564 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
55565 +void gr_log_chdir(const struct dentry *dentry,
55566 + const struct vfsmount *mnt);
55567 +void gr_log_chroot_exec(const struct dentry *dentry,
55568 + const struct vfsmount *mnt);
55569 +void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
55570 +#ifdef CONFIG_COMPAT
55571 +void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
55572 +#endif
55573 +void gr_log_remount(const char *devname, const int retval);
55574 +void gr_log_unmount(const char *devname, const int retval);
55575 +void gr_log_mount(const char *from, const char *to, const int retval);
55576 +void gr_log_textrel(struct vm_area_struct *vma);
55577 +void gr_log_rwxmmap(struct file *file);
55578 +void gr_log_rwxmprotect(struct file *file);
55579 +
55580 +int gr_handle_follow_link(const struct inode *parent,
55581 + const struct inode *inode,
55582 + const struct dentry *dentry,
55583 + const struct vfsmount *mnt);
55584 +int gr_handle_fifo(const struct dentry *dentry,
55585 + const struct vfsmount *mnt,
55586 + const struct dentry *dir, const int flag,
55587 + const int acc_mode);
55588 +int gr_handle_hardlink(const struct dentry *dentry,
55589 + const struct vfsmount *mnt,
55590 + struct inode *inode,
55591 + const int mode, const char *to);
55592 +
55593 +int gr_is_capable(const int cap);
55594 +int gr_is_capable_nolog(const int cap);
55595 +void gr_learn_resource(const struct task_struct *task, const int limit,
55596 + const unsigned long wanted, const int gt);
55597 +void gr_copy_label(struct task_struct *tsk);
55598 +void gr_handle_crash(struct task_struct *task, const int sig);
55599 +int gr_handle_signal(const struct task_struct *p, const int sig);
55600 +int gr_check_crash_uid(const uid_t uid);
55601 +int gr_check_protected_task(const struct task_struct *task);
55602 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
55603 +int gr_acl_handle_mmap(const struct file *file,
55604 + const unsigned long prot);
55605 +int gr_acl_handle_mprotect(const struct file *file,
55606 + const unsigned long prot);
55607 +int gr_check_hidden_task(const struct task_struct *tsk);
55608 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
55609 + const struct vfsmount *mnt);
55610 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
55611 + const struct vfsmount *mnt);
55612 +__u32 gr_acl_handle_access(const struct dentry *dentry,
55613 + const struct vfsmount *mnt, const int fmode);
55614 +__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
55615 + const struct vfsmount *mnt, mode_t mode);
55616 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
55617 + const struct vfsmount *mnt, mode_t mode);
55618 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
55619 + const struct vfsmount *mnt);
55620 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
55621 + const struct vfsmount *mnt);
55622 +int gr_handle_ptrace(struct task_struct *task, const long request);
55623 +int gr_handle_proc_ptrace(struct task_struct *task);
55624 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
55625 + const struct vfsmount *mnt);
55626 +int gr_check_crash_exec(const struct file *filp);
55627 +int gr_acl_is_enabled(void);
55628 +void gr_set_kernel_label(struct task_struct *task);
55629 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
55630 + const gid_t gid);
55631 +int gr_set_proc_label(const struct dentry *dentry,
55632 + const struct vfsmount *mnt,
55633 + const int unsafe_share);
55634 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
55635 + const struct vfsmount *mnt);
55636 +__u32 gr_acl_handle_open(const struct dentry *dentry,
55637 + const struct vfsmount *mnt, const int fmode);
55638 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
55639 + const struct dentry *p_dentry,
55640 + const struct vfsmount *p_mnt, const int fmode,
55641 + const int imode);
55642 +void gr_handle_create(const struct dentry *dentry,
55643 + const struct vfsmount *mnt);
55644 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
55645 + const struct dentry *parent_dentry,
55646 + const struct vfsmount *parent_mnt,
55647 + const int mode);
55648 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
55649 + const struct dentry *parent_dentry,
55650 + const struct vfsmount *parent_mnt);
55651 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
55652 + const struct vfsmount *mnt);
55653 +void gr_handle_delete(const ino_t ino, const dev_t dev);
55654 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
55655 + const struct vfsmount *mnt);
55656 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
55657 + const struct dentry *parent_dentry,
55658 + const struct vfsmount *parent_mnt,
55659 + const char *from);
55660 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
55661 + const struct dentry *parent_dentry,
55662 + const struct vfsmount *parent_mnt,
55663 + const struct dentry *old_dentry,
55664 + const struct vfsmount *old_mnt, const char *to);
55665 +int gr_acl_handle_rename(struct dentry *new_dentry,
55666 + struct dentry *parent_dentry,
55667 + const struct vfsmount *parent_mnt,
55668 + struct dentry *old_dentry,
55669 + struct inode *old_parent_inode,
55670 + struct vfsmount *old_mnt, const char *newname);
55671 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
55672 + struct dentry *old_dentry,
55673 + struct dentry *new_dentry,
55674 + struct vfsmount *mnt, const __u8 replace);
55675 +__u32 gr_check_link(const struct dentry *new_dentry,
55676 + const struct dentry *parent_dentry,
55677 + const struct vfsmount *parent_mnt,
55678 + const struct dentry *old_dentry,
55679 + const struct vfsmount *old_mnt);
55680 +int gr_acl_handle_filldir(const struct file *file, const char *name,
55681 + const unsigned int namelen, const ino_t ino);
55682 +
55683 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
55684 + const struct vfsmount *mnt);
55685 +void gr_acl_handle_exit(void);
55686 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
55687 +int gr_acl_handle_procpidmem(const struct task_struct *task);
55688 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
55689 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
55690 +void gr_audit_ptrace(struct task_struct *task);
55691 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
55692 +
55693 +#ifdef CONFIG_GRKERNSEC
55694 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
55695 +void gr_handle_vm86(void);
55696 +void gr_handle_mem_readwrite(u64 from, u64 to);
55697 +
55698 +extern int grsec_enable_dmesg;
55699 +extern int grsec_disable_privio;
55700 +#endif
55701 +
55702 +#endif
55703 diff -urNp linux-2.6.32.41/include/linux/hdpu_features.h linux-2.6.32.41/include/linux/hdpu_features.h
55704 --- linux-2.6.32.41/include/linux/hdpu_features.h 2011-03-27 14:31:47.000000000 -0400
55705 +++ linux-2.6.32.41/include/linux/hdpu_features.h 2011-04-17 15:56:46.000000000 -0400
55706 @@ -3,7 +3,7 @@
55707 struct cpustate_t {
55708 spinlock_t lock;
55709 int excl;
55710 - int open_count;
55711 + atomic_t open_count;
55712 unsigned char cached_val;
55713 int inited;
55714 unsigned long *set_addr;
55715 diff -urNp linux-2.6.32.41/include/linux/highmem.h linux-2.6.32.41/include/linux/highmem.h
55716 --- linux-2.6.32.41/include/linux/highmem.h 2011-03-27 14:31:47.000000000 -0400
55717 +++ linux-2.6.32.41/include/linux/highmem.h 2011-04-17 15:56:46.000000000 -0400
55718 @@ -137,6 +137,18 @@ static inline void clear_highpage(struct
55719 kunmap_atomic(kaddr, KM_USER0);
55720 }
55721
55722 +static inline void sanitize_highpage(struct page *page)
55723 +{
55724 + void *kaddr;
55725 + unsigned long flags;
55726 +
55727 + local_irq_save(flags);
55728 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
55729 + clear_page(kaddr);
55730 + kunmap_atomic(kaddr, KM_CLEARPAGE);
55731 + local_irq_restore(flags);
55732 +}
55733 +
55734 static inline void zero_user_segments(struct page *page,
55735 unsigned start1, unsigned end1,
55736 unsigned start2, unsigned end2)
55737 diff -urNp linux-2.6.32.41/include/linux/i2o.h linux-2.6.32.41/include/linux/i2o.h
55738 --- linux-2.6.32.41/include/linux/i2o.h 2011-03-27 14:31:47.000000000 -0400
55739 +++ linux-2.6.32.41/include/linux/i2o.h 2011-05-04 17:56:28.000000000 -0400
55740 @@ -564,7 +564,7 @@ struct i2o_controller {
55741 struct i2o_device *exec; /* Executive */
55742 #if BITS_PER_LONG == 64
55743 spinlock_t context_list_lock; /* lock for context_list */
55744 - atomic_t context_list_counter; /* needed for unique contexts */
55745 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
55746 struct list_head context_list; /* list of context id's
55747 and pointers */
55748 #endif
55749 diff -urNp linux-2.6.32.41/include/linux/init_task.h linux-2.6.32.41/include/linux/init_task.h
55750 --- linux-2.6.32.41/include/linux/init_task.h 2011-03-27 14:31:47.000000000 -0400
55751 +++ linux-2.6.32.41/include/linux/init_task.h 2011-05-18 20:44:59.000000000 -0400
55752 @@ -83,6 +83,12 @@ extern struct group_info init_groups;
55753 #define INIT_IDS
55754 #endif
55755
55756 +#ifdef CONFIG_X86
55757 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
55758 +#else
55759 +#define INIT_TASK_THREAD_INFO
55760 +#endif
55761 +
55762 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
55763 /*
55764 * Because of the reduced scope of CAP_SETPCAP when filesystem
55765 @@ -156,6 +162,7 @@ extern struct cred init_cred;
55766 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
55767 .comm = "swapper", \
55768 .thread = INIT_THREAD, \
55769 + INIT_TASK_THREAD_INFO \
55770 .fs = &init_fs, \
55771 .files = &init_files, \
55772 .signal = &init_signals, \
55773 diff -urNp linux-2.6.32.41/include/linux/interrupt.h linux-2.6.32.41/include/linux/interrupt.h
55774 --- linux-2.6.32.41/include/linux/interrupt.h 2011-03-27 14:31:47.000000000 -0400
55775 +++ linux-2.6.32.41/include/linux/interrupt.h 2011-04-17 15:56:46.000000000 -0400
55776 @@ -362,7 +362,7 @@ enum
55777 /* map softirq index to softirq name. update 'softirq_to_name' in
55778 * kernel/softirq.c when adding a new softirq.
55779 */
55780 -extern char *softirq_to_name[NR_SOFTIRQS];
55781 +extern const char * const softirq_to_name[NR_SOFTIRQS];
55782
55783 /* softirq mask and active fields moved to irq_cpustat_t in
55784 * asm/hardirq.h to get better cache usage. KAO
55785 @@ -370,12 +370,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
55786
55787 struct softirq_action
55788 {
55789 - void (*action)(struct softirq_action *);
55790 + void (*action)(void);
55791 };
55792
55793 asmlinkage void do_softirq(void);
55794 asmlinkage void __do_softirq(void);
55795 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
55796 +extern void open_softirq(int nr, void (*action)(void));
55797 extern void softirq_init(void);
55798 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
55799 extern void raise_softirq_irqoff(unsigned int nr);
55800 diff -urNp linux-2.6.32.41/include/linux/irq.h linux-2.6.32.41/include/linux/irq.h
55801 --- linux-2.6.32.41/include/linux/irq.h 2011-03-27 14:31:47.000000000 -0400
55802 +++ linux-2.6.32.41/include/linux/irq.h 2011-04-17 15:56:46.000000000 -0400
55803 @@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq,
55804 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
55805 bool boot)
55806 {
55807 +#ifdef CONFIG_CPUMASK_OFFSTACK
55808 gfp_t gfp = GFP_ATOMIC;
55809
55810 if (boot)
55811 gfp = GFP_NOWAIT;
55812
55813 -#ifdef CONFIG_CPUMASK_OFFSTACK
55814 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
55815 return false;
55816
55817 diff -urNp linux-2.6.32.41/include/linux/kallsyms.h linux-2.6.32.41/include/linux/kallsyms.h
55818 --- linux-2.6.32.41/include/linux/kallsyms.h 2011-03-27 14:31:47.000000000 -0400
55819 +++ linux-2.6.32.41/include/linux/kallsyms.h 2011-04-17 15:56:46.000000000 -0400
55820 @@ -15,7 +15,8 @@
55821
55822 struct module;
55823
55824 -#ifdef CONFIG_KALLSYMS
55825 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
55826 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55827 /* Lookup the address for a symbol. Returns 0 if not found. */
55828 unsigned long kallsyms_lookup_name(const char *name);
55829
55830 @@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(un
55831 /* Stupid that this does nothing, but I didn't create this mess. */
55832 #define __print_symbol(fmt, addr)
55833 #endif /*CONFIG_KALLSYMS*/
55834 +#else /* when included by kallsyms.c, vsnprintf.c, or
55835 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
55836 +extern void __print_symbol(const char *fmt, unsigned long address);
55837 +extern int sprint_symbol(char *buffer, unsigned long address);
55838 +const char *kallsyms_lookup(unsigned long addr,
55839 + unsigned long *symbolsize,
55840 + unsigned long *offset,
55841 + char **modname, char *namebuf);
55842 +#endif
55843
55844 /* This macro allows us to keep printk typechecking */
55845 static void __check_printsym_format(const char *fmt, ...)
55846 diff -urNp linux-2.6.32.41/include/linux/kgdb.h linux-2.6.32.41/include/linux/kgdb.h
55847 --- linux-2.6.32.41/include/linux/kgdb.h 2011-03-27 14:31:47.000000000 -0400
55848 +++ linux-2.6.32.41/include/linux/kgdb.h 2011-05-04 17:56:20.000000000 -0400
55849 @@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
55850
55851 extern int kgdb_connected;
55852
55853 -extern atomic_t kgdb_setting_breakpoint;
55854 -extern atomic_t kgdb_cpu_doing_single_step;
55855 +extern atomic_unchecked_t kgdb_setting_breakpoint;
55856 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
55857
55858 extern struct task_struct *kgdb_usethread;
55859 extern struct task_struct *kgdb_contthread;
55860 @@ -251,20 +251,20 @@ struct kgdb_arch {
55861 */
55862 struct kgdb_io {
55863 const char *name;
55864 - int (*read_char) (void);
55865 - void (*write_char) (u8);
55866 - void (*flush) (void);
55867 - int (*init) (void);
55868 - void (*pre_exception) (void);
55869 - void (*post_exception) (void);
55870 + int (* const read_char) (void);
55871 + void (* const write_char) (u8);
55872 + void (* const flush) (void);
55873 + int (* const init) (void);
55874 + void (* const pre_exception) (void);
55875 + void (* const post_exception) (void);
55876 };
55877
55878 -extern struct kgdb_arch arch_kgdb_ops;
55879 +extern const struct kgdb_arch arch_kgdb_ops;
55880
55881 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
55882
55883 -extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
55884 -extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
55885 +extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
55886 +extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
55887
55888 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
55889 extern int kgdb_mem2hex(char *mem, char *buf, int count);
55890 diff -urNp linux-2.6.32.41/include/linux/kmod.h linux-2.6.32.41/include/linux/kmod.h
55891 --- linux-2.6.32.41/include/linux/kmod.h 2011-03-27 14:31:47.000000000 -0400
55892 +++ linux-2.6.32.41/include/linux/kmod.h 2011-04-17 15:56:46.000000000 -0400
55893 @@ -31,6 +31,8 @@
55894 * usually useless though. */
55895 extern int __request_module(bool wait, const char *name, ...) \
55896 __attribute__((format(printf, 2, 3)));
55897 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
55898 + __attribute__((format(printf, 3, 4)));
55899 #define request_module(mod...) __request_module(true, mod)
55900 #define request_module_nowait(mod...) __request_module(false, mod)
55901 #define try_then_request_module(x, mod...) \
55902 diff -urNp linux-2.6.32.41/include/linux/kobject.h linux-2.6.32.41/include/linux/kobject.h
55903 --- linux-2.6.32.41/include/linux/kobject.h 2011-03-27 14:31:47.000000000 -0400
55904 +++ linux-2.6.32.41/include/linux/kobject.h 2011-04-17 15:56:46.000000000 -0400
55905 @@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kob
55906
55907 struct kobj_type {
55908 void (*release)(struct kobject *kobj);
55909 - struct sysfs_ops *sysfs_ops;
55910 + const struct sysfs_ops *sysfs_ops;
55911 struct attribute **default_attrs;
55912 };
55913
55914 @@ -118,9 +118,9 @@ struct kobj_uevent_env {
55915 };
55916
55917 struct kset_uevent_ops {
55918 - int (*filter)(struct kset *kset, struct kobject *kobj);
55919 - const char *(*name)(struct kset *kset, struct kobject *kobj);
55920 - int (*uevent)(struct kset *kset, struct kobject *kobj,
55921 + int (* const filter)(struct kset *kset, struct kobject *kobj);
55922 + const char *(* const name)(struct kset *kset, struct kobject *kobj);
55923 + int (* const uevent)(struct kset *kset, struct kobject *kobj,
55924 struct kobj_uevent_env *env);
55925 };
55926
55927 @@ -132,7 +132,7 @@ struct kobj_attribute {
55928 const char *buf, size_t count);
55929 };
55930
55931 -extern struct sysfs_ops kobj_sysfs_ops;
55932 +extern const struct sysfs_ops kobj_sysfs_ops;
55933
55934 /**
55935 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
55936 @@ -155,14 +155,14 @@ struct kset {
55937 struct list_head list;
55938 spinlock_t list_lock;
55939 struct kobject kobj;
55940 - struct kset_uevent_ops *uevent_ops;
55941 + const struct kset_uevent_ops *uevent_ops;
55942 };
55943
55944 extern void kset_init(struct kset *kset);
55945 extern int __must_check kset_register(struct kset *kset);
55946 extern void kset_unregister(struct kset *kset);
55947 extern struct kset * __must_check kset_create_and_add(const char *name,
55948 - struct kset_uevent_ops *u,
55949 + const struct kset_uevent_ops *u,
55950 struct kobject *parent_kobj);
55951
55952 static inline struct kset *to_kset(struct kobject *kobj)
55953 diff -urNp linux-2.6.32.41/include/linux/kvm_host.h linux-2.6.32.41/include/linux/kvm_host.h
55954 --- linux-2.6.32.41/include/linux/kvm_host.h 2011-03-27 14:31:47.000000000 -0400
55955 +++ linux-2.6.32.41/include/linux/kvm_host.h 2011-04-17 15:56:46.000000000 -0400
55956 @@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
55957 void vcpu_load(struct kvm_vcpu *vcpu);
55958 void vcpu_put(struct kvm_vcpu *vcpu);
55959
55960 -int kvm_init(void *opaque, unsigned int vcpu_size,
55961 +int kvm_init(const void *opaque, unsigned int vcpu_size,
55962 struct module *module);
55963 void kvm_exit(void);
55964
55965 @@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
55966 struct kvm_guest_debug *dbg);
55967 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
55968
55969 -int kvm_arch_init(void *opaque);
55970 +int kvm_arch_init(const void *opaque);
55971 void kvm_arch_exit(void);
55972
55973 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
55974 diff -urNp linux-2.6.32.41/include/linux/libata.h linux-2.6.32.41/include/linux/libata.h
55975 --- linux-2.6.32.41/include/linux/libata.h 2011-03-27 14:31:47.000000000 -0400
55976 +++ linux-2.6.32.41/include/linux/libata.h 2011-04-23 12:56:11.000000000 -0400
55977 @@ -525,11 +525,11 @@ struct ata_ioports {
55978
55979 struct ata_host {
55980 spinlock_t lock;
55981 - struct device *dev;
55982 + struct device *dev;
55983 void __iomem * const *iomap;
55984 unsigned int n_ports;
55985 void *private_data;
55986 - struct ata_port_operations *ops;
55987 + const struct ata_port_operations *ops;
55988 unsigned long flags;
55989 #ifdef CONFIG_ATA_ACPI
55990 acpi_handle acpi_handle;
55991 @@ -710,7 +710,7 @@ struct ata_link {
55992
55993 struct ata_port {
55994 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
55995 - struct ata_port_operations *ops;
55996 + const struct ata_port_operations *ops;
55997 spinlock_t *lock;
55998 /* Flags owned by the EH context. Only EH should touch these once the
55999 port is active */
56000 @@ -892,7 +892,7 @@ struct ata_port_info {
56001 unsigned long pio_mask;
56002 unsigned long mwdma_mask;
56003 unsigned long udma_mask;
56004 - struct ata_port_operations *port_ops;
56005 + const struct ata_port_operations *port_ops;
56006 void *private_data;
56007 };
56008
56009 @@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timi
56010 extern const unsigned long sata_deb_timing_hotplug[];
56011 extern const unsigned long sata_deb_timing_long[];
56012
56013 -extern struct ata_port_operations ata_dummy_port_ops;
56014 +extern const struct ata_port_operations ata_dummy_port_ops;
56015 extern const struct ata_port_info ata_dummy_port_info;
56016
56017 static inline const unsigned long *
56018 @@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_
56019 struct scsi_host_template *sht);
56020 extern void ata_host_detach(struct ata_host *host);
56021 extern void ata_host_init(struct ata_host *, struct device *,
56022 - unsigned long, struct ata_port_operations *);
56023 + unsigned long, const struct ata_port_operations *);
56024 extern int ata_scsi_detect(struct scsi_host_template *sht);
56025 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
56026 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
56027 diff -urNp linux-2.6.32.41/include/linux/lockd/bind.h linux-2.6.32.41/include/linux/lockd/bind.h
56028 --- linux-2.6.32.41/include/linux/lockd/bind.h 2011-03-27 14:31:47.000000000 -0400
56029 +++ linux-2.6.32.41/include/linux/lockd/bind.h 2011-04-17 15:56:46.000000000 -0400
56030 @@ -23,13 +23,13 @@ struct svc_rqst;
56031 * This is the set of functions for lockd->nfsd communication
56032 */
56033 struct nlmsvc_binding {
56034 - __be32 (*fopen)(struct svc_rqst *,
56035 + __be32 (* const fopen)(struct svc_rqst *,
56036 struct nfs_fh *,
56037 struct file **);
56038 - void (*fclose)(struct file *);
56039 + void (* const fclose)(struct file *);
56040 };
56041
56042 -extern struct nlmsvc_binding * nlmsvc_ops;
56043 +extern const struct nlmsvc_binding * nlmsvc_ops;
56044
56045 /*
56046 * Similar to nfs_client_initdata, but without the NFS-specific
56047 diff -urNp linux-2.6.32.41/include/linux/mm.h linux-2.6.32.41/include/linux/mm.h
56048 --- linux-2.6.32.41/include/linux/mm.h 2011-03-27 14:31:47.000000000 -0400
56049 +++ linux-2.6.32.41/include/linux/mm.h 2011-04-17 15:56:46.000000000 -0400
56050 @@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void
56051
56052 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
56053 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
56054 +
56055 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
56056 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
56057 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
56058 +#else
56059 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
56060 +#endif
56061 +
56062 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
56063 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
56064
56065 @@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
56066 int set_page_dirty_lock(struct page *page);
56067 int clear_page_dirty_for_io(struct page *page);
56068
56069 -/* Is the vma a continuation of the stack vma above it? */
56070 -static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
56071 -{
56072 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
56073 -}
56074 -
56075 extern unsigned long move_page_tables(struct vm_area_struct *vma,
56076 unsigned long old_addr, struct vm_area_struct *new_vma,
56077 unsigned long new_addr, unsigned long len);
56078 @@ -890,6 +891,8 @@ struct shrinker {
56079 extern void register_shrinker(struct shrinker *);
56080 extern void unregister_shrinker(struct shrinker *);
56081
56082 +pgprot_t vm_get_page_prot(unsigned long vm_flags);
56083 +
56084 int vma_wants_writenotify(struct vm_area_struct *vma);
56085
56086 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
56087 @@ -1162,6 +1165,7 @@ out:
56088 }
56089
56090 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
56091 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
56092
56093 extern unsigned long do_brk(unsigned long, unsigned long);
56094
56095 @@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(
56096 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
56097 struct vm_area_struct **pprev);
56098
56099 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
56100 +extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
56101 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
56102 +
56103 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
56104 NULL if none. Assume start_addr < end_addr. */
56105 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
56106 @@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(st
56107 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
56108 }
56109
56110 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
56111 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
56112 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
56113 unsigned long pfn, unsigned long size, pgprot_t);
56114 @@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long
56115 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
56116 extern int sysctl_memory_failure_early_kill;
56117 extern int sysctl_memory_failure_recovery;
56118 -extern atomic_long_t mce_bad_pages;
56119 +extern atomic_long_unchecked_t mce_bad_pages;
56120 +
56121 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
56122 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
56123 +#else
56124 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
56125 +#endif
56126
56127 #endif /* __KERNEL__ */
56128 #endif /* _LINUX_MM_H */
56129 diff -urNp linux-2.6.32.41/include/linux/mm_types.h linux-2.6.32.41/include/linux/mm_types.h
56130 --- linux-2.6.32.41/include/linux/mm_types.h 2011-03-27 14:31:47.000000000 -0400
56131 +++ linux-2.6.32.41/include/linux/mm_types.h 2011-04-17 15:56:46.000000000 -0400
56132 @@ -186,6 +186,8 @@ struct vm_area_struct {
56133 #ifdef CONFIG_NUMA
56134 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
56135 #endif
56136 +
56137 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
56138 };
56139
56140 struct core_thread {
56141 @@ -287,6 +289,24 @@ struct mm_struct {
56142 #ifdef CONFIG_MMU_NOTIFIER
56143 struct mmu_notifier_mm *mmu_notifier_mm;
56144 #endif
56145 +
56146 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
56147 + unsigned long pax_flags;
56148 +#endif
56149 +
56150 +#ifdef CONFIG_PAX_DLRESOLVE
56151 + unsigned long call_dl_resolve;
56152 +#endif
56153 +
56154 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
56155 + unsigned long call_syscall;
56156 +#endif
56157 +
56158 +#ifdef CONFIG_PAX_ASLR
56159 + unsigned long delta_mmap; /* randomized offset */
56160 + unsigned long delta_stack; /* randomized offset */
56161 +#endif
56162 +
56163 };
56164
56165 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
56166 diff -urNp linux-2.6.32.41/include/linux/mmu_notifier.h linux-2.6.32.41/include/linux/mmu_notifier.h
56167 --- linux-2.6.32.41/include/linux/mmu_notifier.h 2011-03-27 14:31:47.000000000 -0400
56168 +++ linux-2.6.32.41/include/linux/mmu_notifier.h 2011-04-17 15:56:46.000000000 -0400
56169 @@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destr
56170 */
56171 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
56172 ({ \
56173 - pte_t __pte; \
56174 + pte_t ___pte; \
56175 struct vm_area_struct *___vma = __vma; \
56176 unsigned long ___address = __address; \
56177 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
56178 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
56179 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
56180 - __pte; \
56181 + ___pte; \
56182 })
56183
56184 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
56185 diff -urNp linux-2.6.32.41/include/linux/mmzone.h linux-2.6.32.41/include/linux/mmzone.h
56186 --- linux-2.6.32.41/include/linux/mmzone.h 2011-03-27 14:31:47.000000000 -0400
56187 +++ linux-2.6.32.41/include/linux/mmzone.h 2011-04-17 15:56:46.000000000 -0400
56188 @@ -350,7 +350,7 @@ struct zone {
56189 unsigned long flags; /* zone flags, see below */
56190
56191 /* Zone statistics */
56192 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
56193 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
56194
56195 /*
56196 * prev_priority holds the scanning priority for this zone. It is
56197 diff -urNp linux-2.6.32.41/include/linux/mod_devicetable.h linux-2.6.32.41/include/linux/mod_devicetable.h
56198 --- linux-2.6.32.41/include/linux/mod_devicetable.h 2011-03-27 14:31:47.000000000 -0400
56199 +++ linux-2.6.32.41/include/linux/mod_devicetable.h 2011-04-17 15:56:46.000000000 -0400
56200 @@ -12,7 +12,7 @@
56201 typedef unsigned long kernel_ulong_t;
56202 #endif
56203
56204 -#define PCI_ANY_ID (~0)
56205 +#define PCI_ANY_ID ((__u16)~0)
56206
56207 struct pci_device_id {
56208 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
56209 @@ -131,7 +131,7 @@ struct usb_device_id {
56210 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
56211 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
56212
56213 -#define HID_ANY_ID (~0)
56214 +#define HID_ANY_ID (~0U)
56215
56216 struct hid_device_id {
56217 __u16 bus;
56218 diff -urNp linux-2.6.32.41/include/linux/module.h linux-2.6.32.41/include/linux/module.h
56219 --- linux-2.6.32.41/include/linux/module.h 2011-03-27 14:31:47.000000000 -0400
56220 +++ linux-2.6.32.41/include/linux/module.h 2011-04-17 15:56:46.000000000 -0400
56221 @@ -287,16 +287,16 @@ struct module
56222 int (*init)(void);
56223
56224 /* If this is non-NULL, vfree after init() returns */
56225 - void *module_init;
56226 + void *module_init_rx, *module_init_rw;
56227
56228 /* Here is the actual code + data, vfree'd on unload. */
56229 - void *module_core;
56230 + void *module_core_rx, *module_core_rw;
56231
56232 /* Here are the sizes of the init and core sections */
56233 - unsigned int init_size, core_size;
56234 + unsigned int init_size_rw, core_size_rw;
56235
56236 /* The size of the executable code in each section. */
56237 - unsigned int init_text_size, core_text_size;
56238 + unsigned int init_size_rx, core_size_rx;
56239
56240 /* Arch-specific module values */
56241 struct mod_arch_specific arch;
56242 @@ -393,16 +393,46 @@ struct module *__module_address(unsigned
56243 bool is_module_address(unsigned long addr);
56244 bool is_module_text_address(unsigned long addr);
56245
56246 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
56247 +{
56248 +
56249 +#ifdef CONFIG_PAX_KERNEXEC
56250 + if (ktla_ktva(addr) >= (unsigned long)start &&
56251 + ktla_ktva(addr) < (unsigned long)start + size)
56252 + return 1;
56253 +#endif
56254 +
56255 + return ((void *)addr >= start && (void *)addr < start + size);
56256 +}
56257 +
56258 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
56259 +{
56260 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
56261 +}
56262 +
56263 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
56264 +{
56265 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
56266 +}
56267 +
56268 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
56269 +{
56270 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
56271 +}
56272 +
56273 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
56274 +{
56275 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
56276 +}
56277 +
56278 static inline int within_module_core(unsigned long addr, struct module *mod)
56279 {
56280 - return (unsigned long)mod->module_core <= addr &&
56281 - addr < (unsigned long)mod->module_core + mod->core_size;
56282 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
56283 }
56284
56285 static inline int within_module_init(unsigned long addr, struct module *mod)
56286 {
56287 - return (unsigned long)mod->module_init <= addr &&
56288 - addr < (unsigned long)mod->module_init + mod->init_size;
56289 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
56290 }
56291
56292 /* Search for module by name: must hold module_mutex. */
56293 diff -urNp linux-2.6.32.41/include/linux/moduleloader.h linux-2.6.32.41/include/linux/moduleloader.h
56294 --- linux-2.6.32.41/include/linux/moduleloader.h 2011-03-27 14:31:47.000000000 -0400
56295 +++ linux-2.6.32.41/include/linux/moduleloader.h 2011-04-17 15:56:46.000000000 -0400
56296 @@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
56297 sections. Returns NULL on failure. */
56298 void *module_alloc(unsigned long size);
56299
56300 +#ifdef CONFIG_PAX_KERNEXEC
56301 +void *module_alloc_exec(unsigned long size);
56302 +#else
56303 +#define module_alloc_exec(x) module_alloc(x)
56304 +#endif
56305 +
56306 /* Free memory returned from module_alloc. */
56307 void module_free(struct module *mod, void *module_region);
56308
56309 +#ifdef CONFIG_PAX_KERNEXEC
56310 +void module_free_exec(struct module *mod, void *module_region);
56311 +#else
56312 +#define module_free_exec(x, y) module_free((x), (y))
56313 +#endif
56314 +
56315 /* Apply the given relocation to the (simplified) ELF. Return -error
56316 or 0. */
56317 int apply_relocate(Elf_Shdr *sechdrs,
56318 diff -urNp linux-2.6.32.41/include/linux/moduleparam.h linux-2.6.32.41/include/linux/moduleparam.h
56319 --- linux-2.6.32.41/include/linux/moduleparam.h 2011-03-27 14:31:47.000000000 -0400
56320 +++ linux-2.6.32.41/include/linux/moduleparam.h 2011-04-17 15:56:46.000000000 -0400
56321 @@ -132,7 +132,7 @@ struct kparam_array
56322
56323 /* Actually copy string: maxlen param is usually sizeof(string). */
56324 #define module_param_string(name, string, len, perm) \
56325 - static const struct kparam_string __param_string_##name \
56326 + static const struct kparam_string __param_string_##name __used \
56327 = { len, string }; \
56328 __module_param_call(MODULE_PARAM_PREFIX, name, \
56329 param_set_copystring, param_get_string, \
56330 @@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffe
56331
56332 /* Comma-separated array: *nump is set to number they actually specified. */
56333 #define module_param_array_named(name, array, type, nump, perm) \
56334 - static const struct kparam_array __param_arr_##name \
56335 + static const struct kparam_array __param_arr_##name __used \
56336 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
56337 sizeof(array[0]), array }; \
56338 __module_param_call(MODULE_PARAM_PREFIX, name, \
56339 diff -urNp linux-2.6.32.41/include/linux/mutex.h linux-2.6.32.41/include/linux/mutex.h
56340 --- linux-2.6.32.41/include/linux/mutex.h 2011-03-27 14:31:47.000000000 -0400
56341 +++ linux-2.6.32.41/include/linux/mutex.h 2011-04-17 15:56:46.000000000 -0400
56342 @@ -51,7 +51,7 @@ struct mutex {
56343 spinlock_t wait_lock;
56344 struct list_head wait_list;
56345 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
56346 - struct thread_info *owner;
56347 + struct task_struct *owner;
56348 #endif
56349 #ifdef CONFIG_DEBUG_MUTEXES
56350 const char *name;
56351 diff -urNp linux-2.6.32.41/include/linux/namei.h linux-2.6.32.41/include/linux/namei.h
56352 --- linux-2.6.32.41/include/linux/namei.h 2011-03-27 14:31:47.000000000 -0400
56353 +++ linux-2.6.32.41/include/linux/namei.h 2011-04-17 15:56:46.000000000 -0400
56354 @@ -22,7 +22,7 @@ struct nameidata {
56355 unsigned int flags;
56356 int last_type;
56357 unsigned depth;
56358 - char *saved_names[MAX_NESTED_LINKS + 1];
56359 + const char *saved_names[MAX_NESTED_LINKS + 1];
56360
56361 /* Intent data */
56362 union {
56363 @@ -84,12 +84,12 @@ extern int follow_up(struct path *);
56364 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
56365 extern void unlock_rename(struct dentry *, struct dentry *);
56366
56367 -static inline void nd_set_link(struct nameidata *nd, char *path)
56368 +static inline void nd_set_link(struct nameidata *nd, const char *path)
56369 {
56370 nd->saved_names[nd->depth] = path;
56371 }
56372
56373 -static inline char *nd_get_link(struct nameidata *nd)
56374 +static inline const char *nd_get_link(const struct nameidata *nd)
56375 {
56376 return nd->saved_names[nd->depth];
56377 }
56378 diff -urNp linux-2.6.32.41/include/linux/netfilter/xt_gradm.h linux-2.6.32.41/include/linux/netfilter/xt_gradm.h
56379 --- linux-2.6.32.41/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
56380 +++ linux-2.6.32.41/include/linux/netfilter/xt_gradm.h 2011-04-17 15:56:46.000000000 -0400
56381 @@ -0,0 +1,9 @@
56382 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
56383 +#define _LINUX_NETFILTER_XT_GRADM_H 1
56384 +
56385 +struct xt_gradm_mtinfo {
56386 + __u16 flags;
56387 + __u16 invflags;
56388 +};
56389 +
56390 +#endif
56391 diff -urNp linux-2.6.32.41/include/linux/nodemask.h linux-2.6.32.41/include/linux/nodemask.h
56392 --- linux-2.6.32.41/include/linux/nodemask.h 2011-03-27 14:31:47.000000000 -0400
56393 +++ linux-2.6.32.41/include/linux/nodemask.h 2011-04-17 15:56:46.000000000 -0400
56394 @@ -464,11 +464,11 @@ static inline int num_node_state(enum no
56395
56396 #define any_online_node(mask) \
56397 ({ \
56398 - int node; \
56399 - for_each_node_mask(node, (mask)) \
56400 - if (node_online(node)) \
56401 + int __node; \
56402 + for_each_node_mask(__node, (mask)) \
56403 + if (node_online(__node)) \
56404 break; \
56405 - node; \
56406 + __node; \
56407 })
56408
56409 #define num_online_nodes() num_node_state(N_ONLINE)
56410 diff -urNp linux-2.6.32.41/include/linux/oprofile.h linux-2.6.32.41/include/linux/oprofile.h
56411 --- linux-2.6.32.41/include/linux/oprofile.h 2011-03-27 14:31:47.000000000 -0400
56412 +++ linux-2.6.32.41/include/linux/oprofile.h 2011-04-17 15:56:46.000000000 -0400
56413 @@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super
56414 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
56415 char const * name, ulong * val);
56416
56417 -/** Create a file for read-only access to an atomic_t. */
56418 +/** Create a file for read-only access to an atomic_unchecked_t. */
56419 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
56420 - char const * name, atomic_t * val);
56421 + char const * name, atomic_unchecked_t * val);
56422
56423 /** create a directory */
56424 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
56425 diff -urNp linux-2.6.32.41/include/linux/perf_event.h linux-2.6.32.41/include/linux/perf_event.h
56426 --- linux-2.6.32.41/include/linux/perf_event.h 2011-03-27 14:31:47.000000000 -0400
56427 +++ linux-2.6.32.41/include/linux/perf_event.h 2011-05-04 17:56:28.000000000 -0400
56428 @@ -476,7 +476,7 @@ struct hw_perf_event {
56429 struct hrtimer hrtimer;
56430 };
56431 };
56432 - atomic64_t prev_count;
56433 + atomic64_unchecked_t prev_count;
56434 u64 sample_period;
56435 u64 last_period;
56436 atomic64_t period_left;
56437 @@ -557,7 +557,7 @@ struct perf_event {
56438 const struct pmu *pmu;
56439
56440 enum perf_event_active_state state;
56441 - atomic64_t count;
56442 + atomic64_unchecked_t count;
56443
56444 /*
56445 * These are the total time in nanoseconds that the event
56446 @@ -595,8 +595,8 @@ struct perf_event {
56447 * These accumulate total time (in nanoseconds) that children
56448 * events have been enabled and running, respectively.
56449 */
56450 - atomic64_t child_total_time_enabled;
56451 - atomic64_t child_total_time_running;
56452 + atomic64_unchecked_t child_total_time_enabled;
56453 + atomic64_unchecked_t child_total_time_running;
56454
56455 /*
56456 * Protect attach/detach and child_list:
56457 diff -urNp linux-2.6.32.41/include/linux/pipe_fs_i.h linux-2.6.32.41/include/linux/pipe_fs_i.h
56458 --- linux-2.6.32.41/include/linux/pipe_fs_i.h 2011-03-27 14:31:47.000000000 -0400
56459 +++ linux-2.6.32.41/include/linux/pipe_fs_i.h 2011-04-17 15:56:46.000000000 -0400
56460 @@ -46,9 +46,9 @@ struct pipe_inode_info {
56461 wait_queue_head_t wait;
56462 unsigned int nrbufs, curbuf;
56463 struct page *tmp_page;
56464 - unsigned int readers;
56465 - unsigned int writers;
56466 - unsigned int waiting_writers;
56467 + atomic_t readers;
56468 + atomic_t writers;
56469 + atomic_t waiting_writers;
56470 unsigned int r_counter;
56471 unsigned int w_counter;
56472 struct fasync_struct *fasync_readers;
56473 diff -urNp linux-2.6.32.41/include/linux/poison.h linux-2.6.32.41/include/linux/poison.h
56474 --- linux-2.6.32.41/include/linux/poison.h 2011-03-27 14:31:47.000000000 -0400
56475 +++ linux-2.6.32.41/include/linux/poison.h 2011-04-17 15:56:46.000000000 -0400
56476 @@ -19,8 +19,8 @@
56477 * under normal circumstances, used to verify that nobody uses
56478 * non-initialized list entries.
56479 */
56480 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
56481 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
56482 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
56483 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
56484
56485 /********** include/linux/timer.h **********/
56486 /*
56487 diff -urNp linux-2.6.32.41/include/linux/proc_fs.h linux-2.6.32.41/include/linux/proc_fs.h
56488 --- linux-2.6.32.41/include/linux/proc_fs.h 2011-03-27 14:31:47.000000000 -0400
56489 +++ linux-2.6.32.41/include/linux/proc_fs.h 2011-04-17 15:56:46.000000000 -0400
56490 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
56491 return proc_create_data(name, mode, parent, proc_fops, NULL);
56492 }
56493
56494 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
56495 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
56496 +{
56497 +#ifdef CONFIG_GRKERNSEC_PROC_USER
56498 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
56499 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
56500 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
56501 +#else
56502 + return proc_create_data(name, mode, parent, proc_fops, NULL);
56503 +#endif
56504 +}
56505 +
56506 +
56507 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
56508 mode_t mode, struct proc_dir_entry *base,
56509 read_proc_t *read_proc, void * data)
56510 diff -urNp linux-2.6.32.41/include/linux/ptrace.h linux-2.6.32.41/include/linux/ptrace.h
56511 --- linux-2.6.32.41/include/linux/ptrace.h 2011-03-27 14:31:47.000000000 -0400
56512 +++ linux-2.6.32.41/include/linux/ptrace.h 2011-04-17 15:56:46.000000000 -0400
56513 @@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_
56514 extern void exit_ptrace(struct task_struct *tracer);
56515 #define PTRACE_MODE_READ 1
56516 #define PTRACE_MODE_ATTACH 2
56517 -/* Returns 0 on success, -errno on denial. */
56518 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
56519 /* Returns true on success, false on denial. */
56520 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
56521 +/* Returns true on success, false on denial. */
56522 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
56523
56524 static inline int ptrace_reparented(struct task_struct *child)
56525 {
56526 diff -urNp linux-2.6.32.41/include/linux/random.h linux-2.6.32.41/include/linux/random.h
56527 --- linux-2.6.32.41/include/linux/random.h 2011-03-27 14:31:47.000000000 -0400
56528 +++ linux-2.6.32.41/include/linux/random.h 2011-04-17 15:56:46.000000000 -0400
56529 @@ -74,6 +74,11 @@ unsigned long randomize_range(unsigned l
56530 u32 random32(void);
56531 void srandom32(u32 seed);
56532
56533 +static inline unsigned long pax_get_random_long(void)
56534 +{
56535 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
56536 +}
56537 +
56538 #endif /* __KERNEL___ */
56539
56540 #endif /* _LINUX_RANDOM_H */
56541 diff -urNp linux-2.6.32.41/include/linux/reboot.h linux-2.6.32.41/include/linux/reboot.h
56542 --- linux-2.6.32.41/include/linux/reboot.h 2011-03-27 14:31:47.000000000 -0400
56543 +++ linux-2.6.32.41/include/linux/reboot.h 2011-05-22 23:02:06.000000000 -0400
56544 @@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
56545 * Architecture-specific implementations of sys_reboot commands.
56546 */
56547
56548 -extern void machine_restart(char *cmd);
56549 -extern void machine_halt(void);
56550 -extern void machine_power_off(void);
56551 +extern void machine_restart(char *cmd) __noreturn;
56552 +extern void machine_halt(void) __noreturn;
56553 +extern void machine_power_off(void) __noreturn;
56554
56555 extern void machine_shutdown(void);
56556 struct pt_regs;
56557 @@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
56558 */
56559
56560 extern void kernel_restart_prepare(char *cmd);
56561 -extern void kernel_restart(char *cmd);
56562 -extern void kernel_halt(void);
56563 -extern void kernel_power_off(void);
56564 +extern void kernel_restart(char *cmd) __noreturn;
56565 +extern void kernel_halt(void) __noreturn;
56566 +extern void kernel_power_off(void) __noreturn;
56567
56568 void ctrl_alt_del(void);
56569
56570 @@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
56571 * Emergency restart, callable from an interrupt handler.
56572 */
56573
56574 -extern void emergency_restart(void);
56575 +extern void emergency_restart(void) __noreturn;
56576 #include <asm/emergency-restart.h>
56577
56578 #endif
56579 diff -urNp linux-2.6.32.41/include/linux/reiserfs_fs.h linux-2.6.32.41/include/linux/reiserfs_fs.h
56580 --- linux-2.6.32.41/include/linux/reiserfs_fs.h 2011-03-27 14:31:47.000000000 -0400
56581 +++ linux-2.6.32.41/include/linux/reiserfs_fs.h 2011-04-17 15:56:46.000000000 -0400
56582 @@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset
56583 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
56584
56585 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
56586 -#define get_generation(s) atomic_read (&fs_generation(s))
56587 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
56588 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
56589 #define __fs_changed(gen,s) (gen != get_generation (s))
56590 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
56591 @@ -1534,24 +1534,24 @@ static inline struct super_block *sb_fro
56592 */
56593
56594 struct item_operations {
56595 - int (*bytes_number) (struct item_head * ih, int block_size);
56596 - void (*decrement_key) (struct cpu_key *);
56597 - int (*is_left_mergeable) (struct reiserfs_key * ih,
56598 + int (* const bytes_number) (struct item_head * ih, int block_size);
56599 + void (* const decrement_key) (struct cpu_key *);
56600 + int (* const is_left_mergeable) (struct reiserfs_key * ih,
56601 unsigned long bsize);
56602 - void (*print_item) (struct item_head *, char *item);
56603 - void (*check_item) (struct item_head *, char *item);
56604 + void (* const print_item) (struct item_head *, char *item);
56605 + void (* const check_item) (struct item_head *, char *item);
56606
56607 - int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
56608 + int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
56609 int is_affected, int insert_size);
56610 - int (*check_left) (struct virtual_item * vi, int free,
56611 + int (* const check_left) (struct virtual_item * vi, int free,
56612 int start_skip, int end_skip);
56613 - int (*check_right) (struct virtual_item * vi, int free);
56614 - int (*part_size) (struct virtual_item * vi, int from, int to);
56615 - int (*unit_num) (struct virtual_item * vi);
56616 - void (*print_vi) (struct virtual_item * vi);
56617 + int (* const check_right) (struct virtual_item * vi, int free);
56618 + int (* const part_size) (struct virtual_item * vi, int from, int to);
56619 + int (* const unit_num) (struct virtual_item * vi);
56620 + void (* const print_vi) (struct virtual_item * vi);
56621 };
56622
56623 -extern struct item_operations *item_ops[TYPE_ANY + 1];
56624 +extern const struct item_operations * const item_ops[TYPE_ANY + 1];
56625
56626 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
56627 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
56628 diff -urNp linux-2.6.32.41/include/linux/reiserfs_fs_sb.h linux-2.6.32.41/include/linux/reiserfs_fs_sb.h
56629 --- linux-2.6.32.41/include/linux/reiserfs_fs_sb.h 2011-03-27 14:31:47.000000000 -0400
56630 +++ linux-2.6.32.41/include/linux/reiserfs_fs_sb.h 2011-04-17 15:56:46.000000000 -0400
56631 @@ -377,7 +377,7 @@ struct reiserfs_sb_info {
56632 /* Comment? -Hans */
56633 wait_queue_head_t s_wait;
56634 /* To be obsoleted soon by per buffer seals.. -Hans */
56635 - atomic_t s_generation_counter; // increased by one every time the
56636 + atomic_unchecked_t s_generation_counter; // increased by one every time the
56637 // tree gets re-balanced
56638 unsigned long s_properties; /* File system properties. Currently holds
56639 on-disk FS format */
56640 diff -urNp linux-2.6.32.41/include/linux/sched.h linux-2.6.32.41/include/linux/sched.h
56641 --- linux-2.6.32.41/include/linux/sched.h 2011-03-27 14:31:47.000000000 -0400
56642 +++ linux-2.6.32.41/include/linux/sched.h 2011-06-04 20:42:54.000000000 -0400
56643 @@ -101,6 +101,7 @@ struct bio;
56644 struct fs_struct;
56645 struct bts_context;
56646 struct perf_event_context;
56647 +struct linux_binprm;
56648
56649 /*
56650 * List of flags we want to share for kernel threads,
56651 @@ -350,7 +351,7 @@ extern signed long schedule_timeout_kill
56652 extern signed long schedule_timeout_uninterruptible(signed long timeout);
56653 asmlinkage void __schedule(void);
56654 asmlinkage void schedule(void);
56655 -extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
56656 +extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
56657
56658 struct nsproxy;
56659 struct user_namespace;
56660 @@ -371,9 +372,12 @@ struct user_namespace;
56661 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
56662
56663 extern int sysctl_max_map_count;
56664 +extern unsigned long sysctl_heap_stack_gap;
56665
56666 #include <linux/aio.h>
56667
56668 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
56669 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
56670 extern unsigned long
56671 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
56672 unsigned long, unsigned long);
56673 @@ -666,6 +670,16 @@ struct signal_struct {
56674 struct tty_audit_buf *tty_audit_buf;
56675 #endif
56676
56677 +#ifdef CONFIG_GRKERNSEC
56678 + u32 curr_ip;
56679 + u32 saved_ip;
56680 + u32 gr_saddr;
56681 + u32 gr_daddr;
56682 + u16 gr_sport;
56683 + u16 gr_dport;
56684 + u8 used_accept:1;
56685 +#endif
56686 +
56687 int oom_adj; /* OOM kill score adjustment (bit shift) */
56688 };
56689
56690 @@ -723,6 +737,11 @@ struct user_struct {
56691 struct key *session_keyring; /* UID's default session keyring */
56692 #endif
56693
56694 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
56695 + unsigned int banned;
56696 + unsigned long ban_expires;
56697 +#endif
56698 +
56699 /* Hash table maintenance information */
56700 struct hlist_node uidhash_node;
56701 uid_t uid;
56702 @@ -1328,8 +1347,8 @@ struct task_struct {
56703 struct list_head thread_group;
56704
56705 struct completion *vfork_done; /* for vfork() */
56706 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
56707 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
56708 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
56709 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
56710
56711 cputime_t utime, stime, utimescaled, stimescaled;
56712 cputime_t gtime;
56713 @@ -1343,16 +1362,6 @@ struct task_struct {
56714 struct task_cputime cputime_expires;
56715 struct list_head cpu_timers[3];
56716
56717 -/* process credentials */
56718 - const struct cred *real_cred; /* objective and real subjective task
56719 - * credentials (COW) */
56720 - const struct cred *cred; /* effective (overridable) subjective task
56721 - * credentials (COW) */
56722 - struct mutex cred_guard_mutex; /* guard against foreign influences on
56723 - * credential calculations
56724 - * (notably. ptrace) */
56725 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
56726 -
56727 char comm[TASK_COMM_LEN]; /* executable name excluding path
56728 - access with [gs]et_task_comm (which lock
56729 it with task_lock())
56730 @@ -1369,6 +1378,10 @@ struct task_struct {
56731 #endif
56732 /* CPU-specific state of this task */
56733 struct thread_struct thread;
56734 +/* thread_info moved to task_struct */
56735 +#ifdef CONFIG_X86
56736 + struct thread_info tinfo;
56737 +#endif
56738 /* filesystem information */
56739 struct fs_struct *fs;
56740 /* open file information */
56741 @@ -1436,6 +1449,15 @@ struct task_struct {
56742 int hardirq_context;
56743 int softirq_context;
56744 #endif
56745 +
56746 +/* process credentials */
56747 + const struct cred *real_cred; /* objective and real subjective task
56748 + * credentials (COW) */
56749 + struct mutex cred_guard_mutex; /* guard against foreign influences on
56750 + * credential calculations
56751 + * (notably. ptrace) */
56752 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
56753 +
56754 #ifdef CONFIG_LOCKDEP
56755 # define MAX_LOCK_DEPTH 48UL
56756 u64 curr_chain_key;
56757 @@ -1456,6 +1478,9 @@ struct task_struct {
56758
56759 struct backing_dev_info *backing_dev_info;
56760
56761 + const struct cred *cred; /* effective (overridable) subjective task
56762 + * credentials (COW) */
56763 +
56764 struct io_context *io_context;
56765
56766 unsigned long ptrace_message;
56767 @@ -1519,6 +1544,21 @@ struct task_struct {
56768 unsigned long default_timer_slack_ns;
56769
56770 struct list_head *scm_work_list;
56771 +
56772 +#ifdef CONFIG_GRKERNSEC
56773 + /* grsecurity */
56774 + struct dentry *gr_chroot_dentry;
56775 + struct acl_subject_label *acl;
56776 + struct acl_role_label *role;
56777 + struct file *exec_file;
56778 + u16 acl_role_id;
56779 + /* is this the task that authenticated to the special role */
56780 + u8 acl_sp_role;
56781 + u8 is_writable;
56782 + u8 brute;
56783 + u8 gr_is_chrooted;
56784 +#endif
56785 +
56786 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
56787 /* Index of current stored adress in ret_stack */
56788 int curr_ret_stack;
56789 @@ -1542,6 +1582,57 @@ struct task_struct {
56790 #endif /* CONFIG_TRACING */
56791 };
56792
56793 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
56794 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
56795 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
56796 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
56797 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
56798 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
56799 +
56800 +#ifdef CONFIG_PAX_SOFTMODE
56801 +extern unsigned int pax_softmode;
56802 +#endif
56803 +
56804 +extern int pax_check_flags(unsigned long *);
56805 +
56806 +/* if tsk != current then task_lock must be held on it */
56807 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
56808 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
56809 +{
56810 + if (likely(tsk->mm))
56811 + return tsk->mm->pax_flags;
56812 + else
56813 + return 0UL;
56814 +}
56815 +
56816 +/* if tsk != current then task_lock must be held on it */
56817 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
56818 +{
56819 + if (likely(tsk->mm)) {
56820 + tsk->mm->pax_flags = flags;
56821 + return 0;
56822 + }
56823 + return -EINVAL;
56824 +}
56825 +#endif
56826 +
56827 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
56828 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
56829 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
56830 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
56831 +#endif
56832 +
56833 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
56834 +void pax_report_insns(void *pc, void *sp);
56835 +void pax_report_refcount_overflow(struct pt_regs *regs);
56836 +void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type);
56837 +
56838 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
56839 +extern void pax_track_stack(void);
56840 +#else
56841 +static inline void pax_track_stack(void) {}
56842 +#endif
56843 +
56844 /* Future-safe accessor for struct task_struct's cpus_allowed. */
56845 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
56846
56847 @@ -1978,7 +2069,9 @@ void yield(void);
56848 extern struct exec_domain default_exec_domain;
56849
56850 union thread_union {
56851 +#ifndef CONFIG_X86
56852 struct thread_info thread_info;
56853 +#endif
56854 unsigned long stack[THREAD_SIZE/sizeof(long)];
56855 };
56856
56857 @@ -2155,7 +2248,7 @@ extern void __cleanup_sighand(struct sig
56858 extern void exit_itimers(struct signal_struct *);
56859 extern void flush_itimer_signals(void);
56860
56861 -extern NORET_TYPE void do_group_exit(int);
56862 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
56863
56864 extern void daemonize(const char *, ...);
56865 extern int allow_signal(int);
56866 @@ -2284,13 +2377,17 @@ static inline unsigned long *end_of_stac
56867
56868 #endif
56869
56870 -static inline int object_is_on_stack(void *obj)
56871 +static inline int object_starts_on_stack(void *obj)
56872 {
56873 - void *stack = task_stack_page(current);
56874 + const void *stack = task_stack_page(current);
56875
56876 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
56877 }
56878
56879 +#ifdef CONFIG_PAX_USERCOPY
56880 +extern int object_is_on_stack(const void *obj, unsigned long len);
56881 +#endif
56882 +
56883 extern void thread_info_cache_init(void);
56884
56885 #ifdef CONFIG_DEBUG_STACK_USAGE
56886 diff -urNp linux-2.6.32.41/include/linux/screen_info.h linux-2.6.32.41/include/linux/screen_info.h
56887 --- linux-2.6.32.41/include/linux/screen_info.h 2011-03-27 14:31:47.000000000 -0400
56888 +++ linux-2.6.32.41/include/linux/screen_info.h 2011-04-17 15:56:46.000000000 -0400
56889 @@ -42,7 +42,8 @@ struct screen_info {
56890 __u16 pages; /* 0x32 */
56891 __u16 vesa_attributes; /* 0x34 */
56892 __u32 capabilities; /* 0x36 */
56893 - __u8 _reserved[6]; /* 0x3a */
56894 + __u16 vesapm_size; /* 0x3a */
56895 + __u8 _reserved[4]; /* 0x3c */
56896 } __attribute__((packed));
56897
56898 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
56899 diff -urNp linux-2.6.32.41/include/linux/security.h linux-2.6.32.41/include/linux/security.h
56900 --- linux-2.6.32.41/include/linux/security.h 2011-03-27 14:31:47.000000000 -0400
56901 +++ linux-2.6.32.41/include/linux/security.h 2011-04-17 15:56:46.000000000 -0400
56902 @@ -34,6 +34,7 @@
56903 #include <linux/key.h>
56904 #include <linux/xfrm.h>
56905 #include <linux/gfp.h>
56906 +#include <linux/grsecurity.h>
56907 #include <net/flow.h>
56908
56909 /* Maximum number of letters for an LSM name string */
56910 diff -urNp linux-2.6.32.41/include/linux/shm.h linux-2.6.32.41/include/linux/shm.h
56911 --- linux-2.6.32.41/include/linux/shm.h 2011-03-27 14:31:47.000000000 -0400
56912 +++ linux-2.6.32.41/include/linux/shm.h 2011-04-17 15:56:46.000000000 -0400
56913 @@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
56914 pid_t shm_cprid;
56915 pid_t shm_lprid;
56916 struct user_struct *mlock_user;
56917 +#ifdef CONFIG_GRKERNSEC
56918 + time_t shm_createtime;
56919 + pid_t shm_lapid;
56920 +#endif
56921 };
56922
56923 /* shm_mode upper byte flags */
56924 diff -urNp linux-2.6.32.41/include/linux/skbuff.h linux-2.6.32.41/include/linux/skbuff.h
56925 --- linux-2.6.32.41/include/linux/skbuff.h 2011-03-27 14:31:47.000000000 -0400
56926 +++ linux-2.6.32.41/include/linux/skbuff.h 2011-05-04 17:56:20.000000000 -0400
56927 @@ -544,7 +544,7 @@ static inline union skb_shared_tx *skb_t
56928 */
56929 static inline int skb_queue_empty(const struct sk_buff_head *list)
56930 {
56931 - return list->next == (struct sk_buff *)list;
56932 + return list->next == (const struct sk_buff *)list;
56933 }
56934
56935 /**
56936 @@ -557,7 +557,7 @@ static inline int skb_queue_empty(const
56937 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
56938 const struct sk_buff *skb)
56939 {
56940 - return (skb->next == (struct sk_buff *) list);
56941 + return (skb->next == (const struct sk_buff *) list);
56942 }
56943
56944 /**
56945 @@ -570,7 +570,7 @@ static inline bool skb_queue_is_last(con
56946 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
56947 const struct sk_buff *skb)
56948 {
56949 - return (skb->prev == (struct sk_buff *) list);
56950 + return (skb->prev == (const struct sk_buff *) list);
56951 }
56952
56953 /**
56954 @@ -1367,7 +1367,7 @@ static inline int skb_network_offset(con
56955 * headroom, you should not reduce this.
56956 */
56957 #ifndef NET_SKB_PAD
56958 -#define NET_SKB_PAD 32
56959 +#define NET_SKB_PAD (_AC(32,U))
56960 #endif
56961
56962 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
56963 diff -urNp linux-2.6.32.41/include/linux/slab_def.h linux-2.6.32.41/include/linux/slab_def.h
56964 --- linux-2.6.32.41/include/linux/slab_def.h 2011-03-27 14:31:47.000000000 -0400
56965 +++ linux-2.6.32.41/include/linux/slab_def.h 2011-05-04 17:56:28.000000000 -0400
56966 @@ -69,10 +69,10 @@ struct kmem_cache {
56967 unsigned long node_allocs;
56968 unsigned long node_frees;
56969 unsigned long node_overflow;
56970 - atomic_t allochit;
56971 - atomic_t allocmiss;
56972 - atomic_t freehit;
56973 - atomic_t freemiss;
56974 + atomic_unchecked_t allochit;
56975 + atomic_unchecked_t allocmiss;
56976 + atomic_unchecked_t freehit;
56977 + atomic_unchecked_t freemiss;
56978
56979 /*
56980 * If debugging is enabled, then the allocator can add additional
56981 diff -urNp linux-2.6.32.41/include/linux/slab.h linux-2.6.32.41/include/linux/slab.h
56982 --- linux-2.6.32.41/include/linux/slab.h 2011-03-27 14:31:47.000000000 -0400
56983 +++ linux-2.6.32.41/include/linux/slab.h 2011-04-17 15:56:46.000000000 -0400
56984 @@ -11,12 +11,20 @@
56985
56986 #include <linux/gfp.h>
56987 #include <linux/types.h>
56988 +#include <linux/err.h>
56989
56990 /*
56991 * Flags to pass to kmem_cache_create().
56992 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
56993 */
56994 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
56995 +
56996 +#ifdef CONFIG_PAX_USERCOPY
56997 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
56998 +#else
56999 +#define SLAB_USERCOPY 0x00000000UL
57000 +#endif
57001 +
57002 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
57003 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
57004 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
57005 @@ -82,10 +90,13 @@
57006 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
57007 * Both make kfree a no-op.
57008 */
57009 -#define ZERO_SIZE_PTR ((void *)16)
57010 +#define ZERO_SIZE_PTR \
57011 +({ \
57012 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
57013 + (void *)(-MAX_ERRNO-1L); \
57014 +})
57015
57016 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
57017 - (unsigned long)ZERO_SIZE_PTR)
57018 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
57019
57020 /*
57021 * struct kmem_cache related prototypes
57022 @@ -138,6 +149,7 @@ void * __must_check krealloc(const void
57023 void kfree(const void *);
57024 void kzfree(const void *);
57025 size_t ksize(const void *);
57026 +void check_object_size(const void *ptr, unsigned long n, bool to);
57027
57028 /*
57029 * Allocator specific definitions. These are mainly used to establish optimized
57030 @@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t
57031
57032 void __init kmem_cache_init_late(void);
57033
57034 +#define kmalloc(x, y) \
57035 +({ \
57036 + void *___retval; \
57037 + intoverflow_t ___x = (intoverflow_t)x; \
57038 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
57039 + ___retval = NULL; \
57040 + else \
57041 + ___retval = kmalloc((size_t)___x, (y)); \
57042 + ___retval; \
57043 +})
57044 +
57045 +#define kmalloc_node(x, y, z) \
57046 +({ \
57047 + void *___retval; \
57048 + intoverflow_t ___x = (intoverflow_t)x; \
57049 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
57050 + ___retval = NULL; \
57051 + else \
57052 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
57053 + ___retval; \
57054 +})
57055 +
57056 +#define kzalloc(x, y) \
57057 +({ \
57058 + void *___retval; \
57059 + intoverflow_t ___x = (intoverflow_t)x; \
57060 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
57061 + ___retval = NULL; \
57062 + else \
57063 + ___retval = kzalloc((size_t)___x, (y)); \
57064 + ___retval; \
57065 +})
57066 +
57067 #endif /* _LINUX_SLAB_H */
57068 diff -urNp linux-2.6.32.41/include/linux/slub_def.h linux-2.6.32.41/include/linux/slub_def.h
57069 --- linux-2.6.32.41/include/linux/slub_def.h 2011-03-27 14:31:47.000000000 -0400
57070 +++ linux-2.6.32.41/include/linux/slub_def.h 2011-04-17 15:56:46.000000000 -0400
57071 @@ -86,7 +86,7 @@ struct kmem_cache {
57072 struct kmem_cache_order_objects max;
57073 struct kmem_cache_order_objects min;
57074 gfp_t allocflags; /* gfp flags to use on each alloc */
57075 - int refcount; /* Refcount for slab cache destroy */
57076 + atomic_t refcount; /* Refcount for slab cache destroy */
57077 void (*ctor)(void *);
57078 int inuse; /* Offset to metadata */
57079 int align; /* Alignment */
57080 diff -urNp linux-2.6.32.41/include/linux/sonet.h linux-2.6.32.41/include/linux/sonet.h
57081 --- linux-2.6.32.41/include/linux/sonet.h 2011-03-27 14:31:47.000000000 -0400
57082 +++ linux-2.6.32.41/include/linux/sonet.h 2011-04-17 15:56:46.000000000 -0400
57083 @@ -61,7 +61,7 @@ struct sonet_stats {
57084 #include <asm/atomic.h>
57085
57086 struct k_sonet_stats {
57087 -#define __HANDLE_ITEM(i) atomic_t i
57088 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
57089 __SONET_ITEMS
57090 #undef __HANDLE_ITEM
57091 };
57092 diff -urNp linux-2.6.32.41/include/linux/sunrpc/clnt.h linux-2.6.32.41/include/linux/sunrpc/clnt.h
57093 --- linux-2.6.32.41/include/linux/sunrpc/clnt.h 2011-03-27 14:31:47.000000000 -0400
57094 +++ linux-2.6.32.41/include/linux/sunrpc/clnt.h 2011-04-17 15:56:46.000000000 -0400
57095 @@ -167,9 +167,9 @@ static inline unsigned short rpc_get_por
57096 {
57097 switch (sap->sa_family) {
57098 case AF_INET:
57099 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
57100 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
57101 case AF_INET6:
57102 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
57103 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
57104 }
57105 return 0;
57106 }
57107 @@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const
57108 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
57109 const struct sockaddr *src)
57110 {
57111 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
57112 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
57113 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
57114
57115 dsin->sin_family = ssin->sin_family;
57116 @@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const
57117 if (sa->sa_family != AF_INET6)
57118 return 0;
57119
57120 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
57121 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
57122 }
57123
57124 #endif /* __KERNEL__ */
57125 diff -urNp linux-2.6.32.41/include/linux/sunrpc/svc_rdma.h linux-2.6.32.41/include/linux/sunrpc/svc_rdma.h
57126 --- linux-2.6.32.41/include/linux/sunrpc/svc_rdma.h 2011-03-27 14:31:47.000000000 -0400
57127 +++ linux-2.6.32.41/include/linux/sunrpc/svc_rdma.h 2011-05-04 17:56:28.000000000 -0400
57128 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
57129 extern unsigned int svcrdma_max_requests;
57130 extern unsigned int svcrdma_max_req_size;
57131
57132 -extern atomic_t rdma_stat_recv;
57133 -extern atomic_t rdma_stat_read;
57134 -extern atomic_t rdma_stat_write;
57135 -extern atomic_t rdma_stat_sq_starve;
57136 -extern atomic_t rdma_stat_rq_starve;
57137 -extern atomic_t rdma_stat_rq_poll;
57138 -extern atomic_t rdma_stat_rq_prod;
57139 -extern atomic_t rdma_stat_sq_poll;
57140 -extern atomic_t rdma_stat_sq_prod;
57141 +extern atomic_unchecked_t rdma_stat_recv;
57142 +extern atomic_unchecked_t rdma_stat_read;
57143 +extern atomic_unchecked_t rdma_stat_write;
57144 +extern atomic_unchecked_t rdma_stat_sq_starve;
57145 +extern atomic_unchecked_t rdma_stat_rq_starve;
57146 +extern atomic_unchecked_t rdma_stat_rq_poll;
57147 +extern atomic_unchecked_t rdma_stat_rq_prod;
57148 +extern atomic_unchecked_t rdma_stat_sq_poll;
57149 +extern atomic_unchecked_t rdma_stat_sq_prod;
57150
57151 #define RPCRDMA_VERSION 1
57152
57153 diff -urNp linux-2.6.32.41/include/linux/suspend.h linux-2.6.32.41/include/linux/suspend.h
57154 --- linux-2.6.32.41/include/linux/suspend.h 2011-03-27 14:31:47.000000000 -0400
57155 +++ linux-2.6.32.41/include/linux/suspend.h 2011-04-17 15:56:46.000000000 -0400
57156 @@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
57157 * which require special recovery actions in that situation.
57158 */
57159 struct platform_suspend_ops {
57160 - int (*valid)(suspend_state_t state);
57161 - int (*begin)(suspend_state_t state);
57162 - int (*prepare)(void);
57163 - int (*prepare_late)(void);
57164 - int (*enter)(suspend_state_t state);
57165 - void (*wake)(void);
57166 - void (*finish)(void);
57167 - void (*end)(void);
57168 - void (*recover)(void);
57169 + int (* const valid)(suspend_state_t state);
57170 + int (* const begin)(suspend_state_t state);
57171 + int (* const prepare)(void);
57172 + int (* const prepare_late)(void);
57173 + int (* const enter)(suspend_state_t state);
57174 + void (* const wake)(void);
57175 + void (* const finish)(void);
57176 + void (* const end)(void);
57177 + void (* const recover)(void);
57178 };
57179
57180 #ifdef CONFIG_SUSPEND
57181 @@ -120,7 +120,7 @@ struct platform_suspend_ops {
57182 * suspend_set_ops - set platform dependent suspend operations
57183 * @ops: The new suspend operations to set.
57184 */
57185 -extern void suspend_set_ops(struct platform_suspend_ops *ops);
57186 +extern void suspend_set_ops(const struct platform_suspend_ops *ops);
57187 extern int suspend_valid_only_mem(suspend_state_t state);
57188
57189 /**
57190 @@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t st
57191 #else /* !CONFIG_SUSPEND */
57192 #define suspend_valid_only_mem NULL
57193
57194 -static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
57195 +static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
57196 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
57197 #endif /* !CONFIG_SUSPEND */
57198
57199 @@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone
57200 * platforms which require special recovery actions in that situation.
57201 */
57202 struct platform_hibernation_ops {
57203 - int (*begin)(void);
57204 - void (*end)(void);
57205 - int (*pre_snapshot)(void);
57206 - void (*finish)(void);
57207 - int (*prepare)(void);
57208 - int (*enter)(void);
57209 - void (*leave)(void);
57210 - int (*pre_restore)(void);
57211 - void (*restore_cleanup)(void);
57212 - void (*recover)(void);
57213 + int (* const begin)(void);
57214 + void (* const end)(void);
57215 + int (* const pre_snapshot)(void);
57216 + void (* const finish)(void);
57217 + int (* const prepare)(void);
57218 + int (* const enter)(void);
57219 + void (* const leave)(void);
57220 + int (* const pre_restore)(void);
57221 + void (* const restore_cleanup)(void);
57222 + void (* const recover)(void);
57223 };
57224
57225 #ifdef CONFIG_HIBERNATION
57226 @@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct
57227 extern void swsusp_unset_page_free(struct page *);
57228 extern unsigned long get_safe_page(gfp_t gfp_mask);
57229
57230 -extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
57231 +extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
57232 extern int hibernate(void);
57233 extern bool system_entering_hibernation(void);
57234 #else /* CONFIG_HIBERNATION */
57235 @@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidd
57236 static inline void swsusp_set_page_free(struct page *p) {}
57237 static inline void swsusp_unset_page_free(struct page *p) {}
57238
57239 -static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
57240 +static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
57241 static inline int hibernate(void) { return -ENOSYS; }
57242 static inline bool system_entering_hibernation(void) { return false; }
57243 #endif /* CONFIG_HIBERNATION */
57244 diff -urNp linux-2.6.32.41/include/linux/sysctl.h linux-2.6.32.41/include/linux/sysctl.h
57245 --- linux-2.6.32.41/include/linux/sysctl.h 2011-03-27 14:31:47.000000000 -0400
57246 +++ linux-2.6.32.41/include/linux/sysctl.h 2011-04-17 15:56:46.000000000 -0400
57247 @@ -164,7 +164,11 @@ enum
57248 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
57249 };
57250
57251 -
57252 +#ifdef CONFIG_PAX_SOFTMODE
57253 +enum {
57254 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
57255 +};
57256 +#endif
57257
57258 /* CTL_VM names: */
57259 enum
57260 @@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_tab
57261
57262 extern int proc_dostring(struct ctl_table *, int,
57263 void __user *, size_t *, loff_t *);
57264 +extern int proc_dostring_modpriv(struct ctl_table *, int,
57265 + void __user *, size_t *, loff_t *);
57266 extern int proc_dointvec(struct ctl_table *, int,
57267 void __user *, size_t *, loff_t *);
57268 extern int proc_dointvec_minmax(struct ctl_table *, int,
57269 @@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name,
57270
57271 extern ctl_handler sysctl_data;
57272 extern ctl_handler sysctl_string;
57273 +extern ctl_handler sysctl_string_modpriv;
57274 extern ctl_handler sysctl_intvec;
57275 extern ctl_handler sysctl_jiffies;
57276 extern ctl_handler sysctl_ms_jiffies;
57277 diff -urNp linux-2.6.32.41/include/linux/sysfs.h linux-2.6.32.41/include/linux/sysfs.h
57278 --- linux-2.6.32.41/include/linux/sysfs.h 2011-03-27 14:31:47.000000000 -0400
57279 +++ linux-2.6.32.41/include/linux/sysfs.h 2011-04-17 15:56:46.000000000 -0400
57280 @@ -75,8 +75,8 @@ struct bin_attribute {
57281 };
57282
57283 struct sysfs_ops {
57284 - ssize_t (*show)(struct kobject *, struct attribute *,char *);
57285 - ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
57286 + ssize_t (* const show)(struct kobject *, struct attribute *,char *);
57287 + ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
57288 };
57289
57290 struct sysfs_dirent;
57291 diff -urNp linux-2.6.32.41/include/linux/thread_info.h linux-2.6.32.41/include/linux/thread_info.h
57292 --- linux-2.6.32.41/include/linux/thread_info.h 2011-03-27 14:31:47.000000000 -0400
57293 +++ linux-2.6.32.41/include/linux/thread_info.h 2011-04-17 15:56:46.000000000 -0400
57294 @@ -23,7 +23,7 @@ struct restart_block {
57295 };
57296 /* For futex_wait and futex_wait_requeue_pi */
57297 struct {
57298 - u32 *uaddr;
57299 + u32 __user *uaddr;
57300 u32 val;
57301 u32 flags;
57302 u32 bitset;
57303 diff -urNp linux-2.6.32.41/include/linux/tty.h linux-2.6.32.41/include/linux/tty.h
57304 --- linux-2.6.32.41/include/linux/tty.h 2011-03-27 14:31:47.000000000 -0400
57305 +++ linux-2.6.32.41/include/linux/tty.h 2011-04-17 15:56:46.000000000 -0400
57306 @@ -13,6 +13,7 @@
57307 #include <linux/tty_driver.h>
57308 #include <linux/tty_ldisc.h>
57309 #include <linux/mutex.h>
57310 +#include <linux/poll.h>
57311
57312 #include <asm/system.h>
57313
57314 @@ -443,7 +444,6 @@ extern int tty_perform_flush(struct tty_
57315 extern dev_t tty_devnum(struct tty_struct *tty);
57316 extern void proc_clear_tty(struct task_struct *p);
57317 extern struct tty_struct *get_current_tty(void);
57318 -extern void tty_default_fops(struct file_operations *fops);
57319 extern struct tty_struct *alloc_tty_struct(void);
57320 extern void free_tty_struct(struct tty_struct *tty);
57321 extern void initialize_tty_struct(struct tty_struct *tty,
57322 @@ -493,6 +493,18 @@ extern void tty_ldisc_begin(void);
57323 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
57324 extern void tty_ldisc_enable(struct tty_struct *tty);
57325
57326 +/* tty_io.c */
57327 +extern ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
57328 +extern ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
57329 +extern unsigned int tty_poll(struct file *, poll_table *);
57330 +#ifdef CONFIG_COMPAT
57331 +extern long tty_compat_ioctl(struct file *file, unsigned int cmd,
57332 + unsigned long arg);
57333 +#else
57334 +#define tty_compat_ioctl NULL
57335 +#endif
57336 +extern int tty_release(struct inode *, struct file *);
57337 +extern int tty_fasync(int fd, struct file *filp, int on);
57338
57339 /* n_tty.c */
57340 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
57341 diff -urNp linux-2.6.32.41/include/linux/tty_ldisc.h linux-2.6.32.41/include/linux/tty_ldisc.h
57342 --- linux-2.6.32.41/include/linux/tty_ldisc.h 2011-03-27 14:31:47.000000000 -0400
57343 +++ linux-2.6.32.41/include/linux/tty_ldisc.h 2011-04-17 15:56:46.000000000 -0400
57344 @@ -139,7 +139,7 @@ struct tty_ldisc_ops {
57345
57346 struct module *owner;
57347
57348 - int refcount;
57349 + atomic_t refcount;
57350 };
57351
57352 struct tty_ldisc {
57353 diff -urNp linux-2.6.32.41/include/linux/types.h linux-2.6.32.41/include/linux/types.h
57354 --- linux-2.6.32.41/include/linux/types.h 2011-03-27 14:31:47.000000000 -0400
57355 +++ linux-2.6.32.41/include/linux/types.h 2011-04-17 15:56:46.000000000 -0400
57356 @@ -191,10 +191,26 @@ typedef struct {
57357 volatile int counter;
57358 } atomic_t;
57359
57360 +#ifdef CONFIG_PAX_REFCOUNT
57361 +typedef struct {
57362 + volatile int counter;
57363 +} atomic_unchecked_t;
57364 +#else
57365 +typedef atomic_t atomic_unchecked_t;
57366 +#endif
57367 +
57368 #ifdef CONFIG_64BIT
57369 typedef struct {
57370 volatile long counter;
57371 } atomic64_t;
57372 +
57373 +#ifdef CONFIG_PAX_REFCOUNT
57374 +typedef struct {
57375 + volatile long counter;
57376 +} atomic64_unchecked_t;
57377 +#else
57378 +typedef atomic64_t atomic64_unchecked_t;
57379 +#endif
57380 #endif
57381
57382 struct ustat {
57383 diff -urNp linux-2.6.32.41/include/linux/uaccess.h linux-2.6.32.41/include/linux/uaccess.h
57384 --- linux-2.6.32.41/include/linux/uaccess.h 2011-03-27 14:31:47.000000000 -0400
57385 +++ linux-2.6.32.41/include/linux/uaccess.h 2011-04-17 15:56:46.000000000 -0400
57386 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
57387 long ret; \
57388 mm_segment_t old_fs = get_fs(); \
57389 \
57390 - set_fs(KERNEL_DS); \
57391 pagefault_disable(); \
57392 + set_fs(KERNEL_DS); \
57393 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
57394 - pagefault_enable(); \
57395 set_fs(old_fs); \
57396 + pagefault_enable(); \
57397 ret; \
57398 })
57399
57400 @@ -93,7 +93,7 @@ static inline unsigned long __copy_from_
57401 * Safely read from address @src to the buffer at @dst. If a kernel fault
57402 * happens, handle that and return -EFAULT.
57403 */
57404 -extern long probe_kernel_read(void *dst, void *src, size_t size);
57405 +extern long probe_kernel_read(void *dst, const void *src, size_t size);
57406
57407 /*
57408 * probe_kernel_write(): safely attempt to write to a location
57409 @@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst,
57410 * Safely write to address @dst from the buffer at @src. If a kernel fault
57411 * happens, handle that and return -EFAULT.
57412 */
57413 -extern long probe_kernel_write(void *dst, void *src, size_t size);
57414 +extern long probe_kernel_write(void *dst, const void *src, size_t size);
57415
57416 #endif /* __LINUX_UACCESS_H__ */
57417 diff -urNp linux-2.6.32.41/include/linux/unaligned/access_ok.h linux-2.6.32.41/include/linux/unaligned/access_ok.h
57418 --- linux-2.6.32.41/include/linux/unaligned/access_ok.h 2011-03-27 14:31:47.000000000 -0400
57419 +++ linux-2.6.32.41/include/linux/unaligned/access_ok.h 2011-04-17 15:56:46.000000000 -0400
57420 @@ -6,32 +6,32 @@
57421
57422 static inline u16 get_unaligned_le16(const void *p)
57423 {
57424 - return le16_to_cpup((__le16 *)p);
57425 + return le16_to_cpup((const __le16 *)p);
57426 }
57427
57428 static inline u32 get_unaligned_le32(const void *p)
57429 {
57430 - return le32_to_cpup((__le32 *)p);
57431 + return le32_to_cpup((const __le32 *)p);
57432 }
57433
57434 static inline u64 get_unaligned_le64(const void *p)
57435 {
57436 - return le64_to_cpup((__le64 *)p);
57437 + return le64_to_cpup((const __le64 *)p);
57438 }
57439
57440 static inline u16 get_unaligned_be16(const void *p)
57441 {
57442 - return be16_to_cpup((__be16 *)p);
57443 + return be16_to_cpup((const __be16 *)p);
57444 }
57445
57446 static inline u32 get_unaligned_be32(const void *p)
57447 {
57448 - return be32_to_cpup((__be32 *)p);
57449 + return be32_to_cpup((const __be32 *)p);
57450 }
57451
57452 static inline u64 get_unaligned_be64(const void *p)
57453 {
57454 - return be64_to_cpup((__be64 *)p);
57455 + return be64_to_cpup((const __be64 *)p);
57456 }
57457
57458 static inline void put_unaligned_le16(u16 val, void *p)
57459 diff -urNp linux-2.6.32.41/include/linux/vmalloc.h linux-2.6.32.41/include/linux/vmalloc.h
57460 --- linux-2.6.32.41/include/linux/vmalloc.h 2011-03-27 14:31:47.000000000 -0400
57461 +++ linux-2.6.32.41/include/linux/vmalloc.h 2011-04-17 15:56:46.000000000 -0400
57462 @@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
57463 #define VM_MAP 0x00000004 /* vmap()ed pages */
57464 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
57465 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
57466 +
57467 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
57468 +#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
57469 +#endif
57470 +
57471 /* bits [20..32] reserved for arch specific ioremap internals */
57472
57473 /*
57474 @@ -123,4 +128,81 @@ struct vm_struct **pcpu_get_vm_areas(con
57475
57476 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
57477
57478 +#define vmalloc(x) \
57479 +({ \
57480 + void *___retval; \
57481 + intoverflow_t ___x = (intoverflow_t)x; \
57482 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
57483 + ___retval = NULL; \
57484 + else \
57485 + ___retval = vmalloc((unsigned long)___x); \
57486 + ___retval; \
57487 +})
57488 +
57489 +#define __vmalloc(x, y, z) \
57490 +({ \
57491 + void *___retval; \
57492 + intoverflow_t ___x = (intoverflow_t)x; \
57493 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
57494 + ___retval = NULL; \
57495 + else \
57496 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
57497 + ___retval; \
57498 +})
57499 +
57500 +#define vmalloc_user(x) \
57501 +({ \
57502 + void *___retval; \
57503 + intoverflow_t ___x = (intoverflow_t)x; \
57504 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
57505 + ___retval = NULL; \
57506 + else \
57507 + ___retval = vmalloc_user((unsigned long)___x); \
57508 + ___retval; \
57509 +})
57510 +
57511 +#define vmalloc_exec(x) \
57512 +({ \
57513 + void *___retval; \
57514 + intoverflow_t ___x = (intoverflow_t)x; \
57515 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
57516 + ___retval = NULL; \
57517 + else \
57518 + ___retval = vmalloc_exec((unsigned long)___x); \
57519 + ___retval; \
57520 +})
57521 +
57522 +#define vmalloc_node(x, y) \
57523 +({ \
57524 + void *___retval; \
57525 + intoverflow_t ___x = (intoverflow_t)x; \
57526 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
57527 + ___retval = NULL; \
57528 + else \
57529 + ___retval = vmalloc_node((unsigned long)___x, (y));\
57530 + ___retval; \
57531 +})
57532 +
57533 +#define vmalloc_32(x) \
57534 +({ \
57535 + void *___retval; \
57536 + intoverflow_t ___x = (intoverflow_t)x; \
57537 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
57538 + ___retval = NULL; \
57539 + else \
57540 + ___retval = vmalloc_32((unsigned long)___x); \
57541 + ___retval; \
57542 +})
57543 +
57544 +#define vmalloc_32_user(x) \
57545 +({ \
57546 + void *___retval; \
57547 + intoverflow_t ___x = (intoverflow_t)x; \
57548 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
57549 + ___retval = NULL; \
57550 + else \
57551 + ___retval = vmalloc_32_user((unsigned long)___x);\
57552 + ___retval; \
57553 +})
57554 +
57555 #endif /* _LINUX_VMALLOC_H */
57556 diff -urNp linux-2.6.32.41/include/linux/vmstat.h linux-2.6.32.41/include/linux/vmstat.h
57557 --- linux-2.6.32.41/include/linux/vmstat.h 2011-03-27 14:31:47.000000000 -0400
57558 +++ linux-2.6.32.41/include/linux/vmstat.h 2011-04-17 15:56:46.000000000 -0400
57559 @@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(in
57560 /*
57561 * Zone based page accounting with per cpu differentials.
57562 */
57563 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
57564 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
57565
57566 static inline void zone_page_state_add(long x, struct zone *zone,
57567 enum zone_stat_item item)
57568 {
57569 - atomic_long_add(x, &zone->vm_stat[item]);
57570 - atomic_long_add(x, &vm_stat[item]);
57571 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
57572 + atomic_long_add_unchecked(x, &vm_stat[item]);
57573 }
57574
57575 static inline unsigned long global_page_state(enum zone_stat_item item)
57576 {
57577 - long x = atomic_long_read(&vm_stat[item]);
57578 + long x = atomic_long_read_unchecked(&vm_stat[item]);
57579 #ifdef CONFIG_SMP
57580 if (x < 0)
57581 x = 0;
57582 @@ -158,7 +158,7 @@ static inline unsigned long global_page_
57583 static inline unsigned long zone_page_state(struct zone *zone,
57584 enum zone_stat_item item)
57585 {
57586 - long x = atomic_long_read(&zone->vm_stat[item]);
57587 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
57588 #ifdef CONFIG_SMP
57589 if (x < 0)
57590 x = 0;
57591 @@ -175,7 +175,7 @@ static inline unsigned long zone_page_st
57592 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
57593 enum zone_stat_item item)
57594 {
57595 - long x = atomic_long_read(&zone->vm_stat[item]);
57596 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
57597
57598 #ifdef CONFIG_SMP
57599 int cpu;
57600 @@ -264,8 +264,8 @@ static inline void __mod_zone_page_state
57601
57602 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
57603 {
57604 - atomic_long_inc(&zone->vm_stat[item]);
57605 - atomic_long_inc(&vm_stat[item]);
57606 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
57607 + atomic_long_inc_unchecked(&vm_stat[item]);
57608 }
57609
57610 static inline void __inc_zone_page_state(struct page *page,
57611 @@ -276,8 +276,8 @@ static inline void __inc_zone_page_state
57612
57613 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
57614 {
57615 - atomic_long_dec(&zone->vm_stat[item]);
57616 - atomic_long_dec(&vm_stat[item]);
57617 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
57618 + atomic_long_dec_unchecked(&vm_stat[item]);
57619 }
57620
57621 static inline void __dec_zone_page_state(struct page *page,
57622 diff -urNp linux-2.6.32.41/include/media/v4l2-device.h linux-2.6.32.41/include/media/v4l2-device.h
57623 --- linux-2.6.32.41/include/media/v4l2-device.h 2011-03-27 14:31:47.000000000 -0400
57624 +++ linux-2.6.32.41/include/media/v4l2-device.h 2011-05-04 17:56:28.000000000 -0400
57625 @@ -71,7 +71,7 @@ int __must_check v4l2_device_register(st
57626 this function returns 0. If the name ends with a digit (e.g. cx18),
57627 then the name will be set to cx18-0 since cx180 looks really odd. */
57628 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
57629 - atomic_t *instance);
57630 + atomic_unchecked_t *instance);
57631
57632 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
57633 Since the parent disappears this ensures that v4l2_dev doesn't have an
57634 diff -urNp linux-2.6.32.41/include/net/flow.h linux-2.6.32.41/include/net/flow.h
57635 --- linux-2.6.32.41/include/net/flow.h 2011-03-27 14:31:47.000000000 -0400
57636 +++ linux-2.6.32.41/include/net/flow.h 2011-05-04 17:56:28.000000000 -0400
57637 @@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net
57638 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
57639 u8 dir, flow_resolve_t resolver);
57640 extern void flow_cache_flush(void);
57641 -extern atomic_t flow_cache_genid;
57642 +extern atomic_unchecked_t flow_cache_genid;
57643
57644 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
57645 {
57646 diff -urNp linux-2.6.32.41/include/net/inetpeer.h linux-2.6.32.41/include/net/inetpeer.h
57647 --- linux-2.6.32.41/include/net/inetpeer.h 2011-03-27 14:31:47.000000000 -0400
57648 +++ linux-2.6.32.41/include/net/inetpeer.h 2011-04-17 15:56:46.000000000 -0400
57649 @@ -24,7 +24,7 @@ struct inet_peer
57650 __u32 dtime; /* the time of last use of not
57651 * referenced entries */
57652 atomic_t refcnt;
57653 - atomic_t rid; /* Frag reception counter */
57654 + atomic_unchecked_t rid; /* Frag reception counter */
57655 __u32 tcp_ts;
57656 unsigned long tcp_ts_stamp;
57657 };
57658 diff -urNp linux-2.6.32.41/include/net/ip_vs.h linux-2.6.32.41/include/net/ip_vs.h
57659 --- linux-2.6.32.41/include/net/ip_vs.h 2011-03-27 14:31:47.000000000 -0400
57660 +++ linux-2.6.32.41/include/net/ip_vs.h 2011-05-04 17:56:28.000000000 -0400
57661 @@ -365,7 +365,7 @@ struct ip_vs_conn {
57662 struct ip_vs_conn *control; /* Master control connection */
57663 atomic_t n_control; /* Number of controlled ones */
57664 struct ip_vs_dest *dest; /* real server */
57665 - atomic_t in_pkts; /* incoming packet counter */
57666 + atomic_unchecked_t in_pkts; /* incoming packet counter */
57667
57668 /* packet transmitter for different forwarding methods. If it
57669 mangles the packet, it must return NF_DROP or better NF_STOLEN,
57670 @@ -466,7 +466,7 @@ struct ip_vs_dest {
57671 union nf_inet_addr addr; /* IP address of the server */
57672 __be16 port; /* port number of the server */
57673 volatile unsigned flags; /* dest status flags */
57674 - atomic_t conn_flags; /* flags to copy to conn */
57675 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
57676 atomic_t weight; /* server weight */
57677
57678 atomic_t refcnt; /* reference counter */
57679 diff -urNp linux-2.6.32.41/include/net/irda/ircomm_tty.h linux-2.6.32.41/include/net/irda/ircomm_tty.h
57680 --- linux-2.6.32.41/include/net/irda/ircomm_tty.h 2011-03-27 14:31:47.000000000 -0400
57681 +++ linux-2.6.32.41/include/net/irda/ircomm_tty.h 2011-04-17 15:56:46.000000000 -0400
57682 @@ -35,6 +35,7 @@
57683 #include <linux/termios.h>
57684 #include <linux/timer.h>
57685 #include <linux/tty.h> /* struct tty_struct */
57686 +#include <asm/local.h>
57687
57688 #include <net/irda/irias_object.h>
57689 #include <net/irda/ircomm_core.h>
57690 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
57691 unsigned short close_delay;
57692 unsigned short closing_wait; /* time to wait before closing */
57693
57694 - int open_count;
57695 - int blocked_open; /* # of blocked opens */
57696 + local_t open_count;
57697 + local_t blocked_open; /* # of blocked opens */
57698
57699 /* Protect concurent access to :
57700 * o self->open_count
57701 diff -urNp linux-2.6.32.41/include/net/iucv/af_iucv.h linux-2.6.32.41/include/net/iucv/af_iucv.h
57702 --- linux-2.6.32.41/include/net/iucv/af_iucv.h 2011-03-27 14:31:47.000000000 -0400
57703 +++ linux-2.6.32.41/include/net/iucv/af_iucv.h 2011-05-04 17:56:28.000000000 -0400
57704 @@ -87,7 +87,7 @@ struct iucv_sock {
57705 struct iucv_sock_list {
57706 struct hlist_head head;
57707 rwlock_t lock;
57708 - atomic_t autobind_name;
57709 + atomic_unchecked_t autobind_name;
57710 };
57711
57712 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
57713 diff -urNp linux-2.6.32.41/include/net/neighbour.h linux-2.6.32.41/include/net/neighbour.h
57714 --- linux-2.6.32.41/include/net/neighbour.h 2011-03-27 14:31:47.000000000 -0400
57715 +++ linux-2.6.32.41/include/net/neighbour.h 2011-04-17 15:56:46.000000000 -0400
57716 @@ -125,12 +125,12 @@ struct neighbour
57717 struct neigh_ops
57718 {
57719 int family;
57720 - void (*solicit)(struct neighbour *, struct sk_buff*);
57721 - void (*error_report)(struct neighbour *, struct sk_buff*);
57722 - int (*output)(struct sk_buff*);
57723 - int (*connected_output)(struct sk_buff*);
57724 - int (*hh_output)(struct sk_buff*);
57725 - int (*queue_xmit)(struct sk_buff*);
57726 + void (* const solicit)(struct neighbour *, struct sk_buff*);
57727 + void (* const error_report)(struct neighbour *, struct sk_buff*);
57728 + int (* const output)(struct sk_buff*);
57729 + int (* const connected_output)(struct sk_buff*);
57730 + int (* const hh_output)(struct sk_buff*);
57731 + int (* const queue_xmit)(struct sk_buff*);
57732 };
57733
57734 struct pneigh_entry
57735 diff -urNp linux-2.6.32.41/include/net/netlink.h linux-2.6.32.41/include/net/netlink.h
57736 --- linux-2.6.32.41/include/net/netlink.h 2011-03-27 14:31:47.000000000 -0400
57737 +++ linux-2.6.32.41/include/net/netlink.h 2011-04-17 15:56:46.000000000 -0400
57738 @@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct
57739 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
57740 {
57741 if (mark)
57742 - skb_trim(skb, (unsigned char *) mark - skb->data);
57743 + skb_trim(skb, (const unsigned char *) mark - skb->data);
57744 }
57745
57746 /**
57747 diff -urNp linux-2.6.32.41/include/net/netns/ipv4.h linux-2.6.32.41/include/net/netns/ipv4.h
57748 --- linux-2.6.32.41/include/net/netns/ipv4.h 2011-03-27 14:31:47.000000000 -0400
57749 +++ linux-2.6.32.41/include/net/netns/ipv4.h 2011-05-04 17:56:28.000000000 -0400
57750 @@ -54,7 +54,7 @@ struct netns_ipv4 {
57751 int current_rt_cache_rebuild_count;
57752
57753 struct timer_list rt_secret_timer;
57754 - atomic_t rt_genid;
57755 + atomic_unchecked_t rt_genid;
57756
57757 #ifdef CONFIG_IP_MROUTE
57758 struct sock *mroute_sk;
57759 diff -urNp linux-2.6.32.41/include/net/sctp/sctp.h linux-2.6.32.41/include/net/sctp/sctp.h
57760 --- linux-2.6.32.41/include/net/sctp/sctp.h 2011-03-27 14:31:47.000000000 -0400
57761 +++ linux-2.6.32.41/include/net/sctp/sctp.h 2011-04-17 15:56:46.000000000 -0400
57762 @@ -305,8 +305,8 @@ extern int sctp_debug_flag;
57763
57764 #else /* SCTP_DEBUG */
57765
57766 -#define SCTP_DEBUG_PRINTK(whatever...)
57767 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
57768 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
57769 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
57770 #define SCTP_ENABLE_DEBUG
57771 #define SCTP_DISABLE_DEBUG
57772 #define SCTP_ASSERT(expr, str, func)
57773 diff -urNp linux-2.6.32.41/include/net/sock.h linux-2.6.32.41/include/net/sock.h
57774 --- linux-2.6.32.41/include/net/sock.h 2011-03-27 14:31:47.000000000 -0400
57775 +++ linux-2.6.32.41/include/net/sock.h 2011-05-04 17:56:28.000000000 -0400
57776 @@ -272,7 +272,7 @@ struct sock {
57777 rwlock_t sk_callback_lock;
57778 int sk_err,
57779 sk_err_soft;
57780 - atomic_t sk_drops;
57781 + atomic_unchecked_t sk_drops;
57782 unsigned short sk_ack_backlog;
57783 unsigned short sk_max_ack_backlog;
57784 __u32 sk_priority;
57785 diff -urNp linux-2.6.32.41/include/net/tcp.h linux-2.6.32.41/include/net/tcp.h
57786 --- linux-2.6.32.41/include/net/tcp.h 2011-03-27 14:31:47.000000000 -0400
57787 +++ linux-2.6.32.41/include/net/tcp.h 2011-04-17 15:56:46.000000000 -0400
57788 @@ -1444,6 +1444,7 @@ enum tcp_seq_states {
57789 struct tcp_seq_afinfo {
57790 char *name;
57791 sa_family_t family;
57792 + /* cannot be const */
57793 struct file_operations seq_fops;
57794 struct seq_operations seq_ops;
57795 };
57796 diff -urNp linux-2.6.32.41/include/net/udp.h linux-2.6.32.41/include/net/udp.h
57797 --- linux-2.6.32.41/include/net/udp.h 2011-03-27 14:31:47.000000000 -0400
57798 +++ linux-2.6.32.41/include/net/udp.h 2011-04-17 15:56:46.000000000 -0400
57799 @@ -187,6 +187,7 @@ struct udp_seq_afinfo {
57800 char *name;
57801 sa_family_t family;
57802 struct udp_table *udp_table;
57803 + /* cannot be const */
57804 struct file_operations seq_fops;
57805 struct seq_operations seq_ops;
57806 };
57807 diff -urNp linux-2.6.32.41/include/scsi/scsi_device.h linux-2.6.32.41/include/scsi/scsi_device.h
57808 --- linux-2.6.32.41/include/scsi/scsi_device.h 2011-04-17 17:00:52.000000000 -0400
57809 +++ linux-2.6.32.41/include/scsi/scsi_device.h 2011-05-04 17:56:28.000000000 -0400
57810 @@ -156,9 +156,9 @@ struct scsi_device {
57811 unsigned int max_device_blocked; /* what device_blocked counts down from */
57812 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
57813
57814 - atomic_t iorequest_cnt;
57815 - atomic_t iodone_cnt;
57816 - atomic_t ioerr_cnt;
57817 + atomic_unchecked_t iorequest_cnt;
57818 + atomic_unchecked_t iodone_cnt;
57819 + atomic_unchecked_t ioerr_cnt;
57820
57821 struct device sdev_gendev,
57822 sdev_dev;
57823 diff -urNp linux-2.6.32.41/include/sound/ac97_codec.h linux-2.6.32.41/include/sound/ac97_codec.h
57824 --- linux-2.6.32.41/include/sound/ac97_codec.h 2011-03-27 14:31:47.000000000 -0400
57825 +++ linux-2.6.32.41/include/sound/ac97_codec.h 2011-04-17 15:56:46.000000000 -0400
57826 @@ -419,15 +419,15 @@
57827 struct snd_ac97;
57828
57829 struct snd_ac97_build_ops {
57830 - int (*build_3d) (struct snd_ac97 *ac97);
57831 - int (*build_specific) (struct snd_ac97 *ac97);
57832 - int (*build_spdif) (struct snd_ac97 *ac97);
57833 - int (*build_post_spdif) (struct snd_ac97 *ac97);
57834 + int (* const build_3d) (struct snd_ac97 *ac97);
57835 + int (* const build_specific) (struct snd_ac97 *ac97);
57836 + int (* const build_spdif) (struct snd_ac97 *ac97);
57837 + int (* const build_post_spdif) (struct snd_ac97 *ac97);
57838 #ifdef CONFIG_PM
57839 - void (*suspend) (struct snd_ac97 *ac97);
57840 - void (*resume) (struct snd_ac97 *ac97);
57841 + void (* const suspend) (struct snd_ac97 *ac97);
57842 + void (* const resume) (struct snd_ac97 *ac97);
57843 #endif
57844 - void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
57845 + void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
57846 };
57847
57848 struct snd_ac97_bus_ops {
57849 @@ -477,7 +477,7 @@ struct snd_ac97_template {
57850
57851 struct snd_ac97 {
57852 /* -- lowlevel (hardware) driver specific -- */
57853 - struct snd_ac97_build_ops * build_ops;
57854 + const struct snd_ac97_build_ops * build_ops;
57855 void *private_data;
57856 void (*private_free) (struct snd_ac97 *ac97);
57857 /* --- */
57858 diff -urNp linux-2.6.32.41/include/sound/ymfpci.h linux-2.6.32.41/include/sound/ymfpci.h
57859 --- linux-2.6.32.41/include/sound/ymfpci.h 2011-03-27 14:31:47.000000000 -0400
57860 +++ linux-2.6.32.41/include/sound/ymfpci.h 2011-05-04 17:56:28.000000000 -0400
57861 @@ -358,7 +358,7 @@ struct snd_ymfpci {
57862 spinlock_t reg_lock;
57863 spinlock_t voice_lock;
57864 wait_queue_head_t interrupt_sleep;
57865 - atomic_t interrupt_sleep_count;
57866 + atomic_unchecked_t interrupt_sleep_count;
57867 struct snd_info_entry *proc_entry;
57868 const struct firmware *dsp_microcode;
57869 const struct firmware *controller_microcode;
57870 diff -urNp linux-2.6.32.41/include/trace/events/irq.h linux-2.6.32.41/include/trace/events/irq.h
57871 --- linux-2.6.32.41/include/trace/events/irq.h 2011-03-27 14:31:47.000000000 -0400
57872 +++ linux-2.6.32.41/include/trace/events/irq.h 2011-04-17 15:56:46.000000000 -0400
57873 @@ -34,7 +34,7 @@
57874 */
57875 TRACE_EVENT(irq_handler_entry,
57876
57877 - TP_PROTO(int irq, struct irqaction *action),
57878 + TP_PROTO(int irq, const struct irqaction *action),
57879
57880 TP_ARGS(irq, action),
57881
57882 @@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
57883 */
57884 TRACE_EVENT(irq_handler_exit,
57885
57886 - TP_PROTO(int irq, struct irqaction *action, int ret),
57887 + TP_PROTO(int irq, const struct irqaction *action, int ret),
57888
57889 TP_ARGS(irq, action, ret),
57890
57891 @@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
57892 */
57893 TRACE_EVENT(softirq_entry,
57894
57895 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
57896 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
57897
57898 TP_ARGS(h, vec),
57899
57900 @@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
57901 */
57902 TRACE_EVENT(softirq_exit,
57903
57904 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
57905 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
57906
57907 TP_ARGS(h, vec),
57908
57909 diff -urNp linux-2.6.32.41/include/video/uvesafb.h linux-2.6.32.41/include/video/uvesafb.h
57910 --- linux-2.6.32.41/include/video/uvesafb.h 2011-03-27 14:31:47.000000000 -0400
57911 +++ linux-2.6.32.41/include/video/uvesafb.h 2011-04-17 15:56:46.000000000 -0400
57912 @@ -177,6 +177,7 @@ struct uvesafb_par {
57913 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
57914 u8 pmi_setpal; /* PMI for palette changes */
57915 u16 *pmi_base; /* protected mode interface location */
57916 + u8 *pmi_code; /* protected mode code location */
57917 void *pmi_start;
57918 void *pmi_pal;
57919 u8 *vbe_state_orig; /*
57920 diff -urNp linux-2.6.32.41/init/do_mounts.c linux-2.6.32.41/init/do_mounts.c
57921 --- linux-2.6.32.41/init/do_mounts.c 2011-03-27 14:31:47.000000000 -0400
57922 +++ linux-2.6.32.41/init/do_mounts.c 2011-04-17 15:56:46.000000000 -0400
57923 @@ -216,11 +216,11 @@ static void __init get_fs_names(char *pa
57924
57925 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
57926 {
57927 - int err = sys_mount(name, "/root", fs, flags, data);
57928 + int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
57929 if (err)
57930 return err;
57931
57932 - sys_chdir("/root");
57933 + sys_chdir((__force const char __user *)"/root");
57934 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
57935 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
57936 current->fs->pwd.mnt->mnt_sb->s_type->name,
57937 @@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...
57938 va_start(args, fmt);
57939 vsprintf(buf, fmt, args);
57940 va_end(args);
57941 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
57942 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
57943 if (fd >= 0) {
57944 sys_ioctl(fd, FDEJECT, 0);
57945 sys_close(fd);
57946 }
57947 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
57948 - fd = sys_open("/dev/console", O_RDWR, 0);
57949 + fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
57950 if (fd >= 0) {
57951 sys_ioctl(fd, TCGETS, (long)&termios);
57952 termios.c_lflag &= ~ICANON;
57953 sys_ioctl(fd, TCSETSF, (long)&termios);
57954 - sys_read(fd, &c, 1);
57955 + sys_read(fd, (char __user *)&c, 1);
57956 termios.c_lflag |= ICANON;
57957 sys_ioctl(fd, TCSETSF, (long)&termios);
57958 sys_close(fd);
57959 @@ -416,6 +416,6 @@ void __init prepare_namespace(void)
57960 mount_root();
57961 out:
57962 devtmpfs_mount("dev");
57963 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
57964 - sys_chroot(".");
57965 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
57966 + sys_chroot((__force char __user *)".");
57967 }
57968 diff -urNp linux-2.6.32.41/init/do_mounts.h linux-2.6.32.41/init/do_mounts.h
57969 --- linux-2.6.32.41/init/do_mounts.h 2011-03-27 14:31:47.000000000 -0400
57970 +++ linux-2.6.32.41/init/do_mounts.h 2011-04-17 15:56:46.000000000 -0400
57971 @@ -15,15 +15,15 @@ extern int root_mountflags;
57972
57973 static inline int create_dev(char *name, dev_t dev)
57974 {
57975 - sys_unlink(name);
57976 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
57977 + sys_unlink((__force char __user *)name);
57978 + return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
57979 }
57980
57981 #if BITS_PER_LONG == 32
57982 static inline u32 bstat(char *name)
57983 {
57984 struct stat64 stat;
57985 - if (sys_stat64(name, &stat) != 0)
57986 + if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
57987 return 0;
57988 if (!S_ISBLK(stat.st_mode))
57989 return 0;
57990 diff -urNp linux-2.6.32.41/init/do_mounts_initrd.c linux-2.6.32.41/init/do_mounts_initrd.c
57991 --- linux-2.6.32.41/init/do_mounts_initrd.c 2011-03-27 14:31:47.000000000 -0400
57992 +++ linux-2.6.32.41/init/do_mounts_initrd.c 2011-04-17 15:56:46.000000000 -0400
57993 @@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shel
57994 sys_close(old_fd);sys_close(root_fd);
57995 sys_close(0);sys_close(1);sys_close(2);
57996 sys_setsid();
57997 - (void) sys_open("/dev/console",O_RDWR,0);
57998 + (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
57999 (void) sys_dup(0);
58000 (void) sys_dup(0);
58001 return kernel_execve(shell, argv, envp_init);
58002 @@ -47,13 +47,13 @@ static void __init handle_initrd(void)
58003 create_dev("/dev/root.old", Root_RAM0);
58004 /* mount initrd on rootfs' /root */
58005 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
58006 - sys_mkdir("/old", 0700);
58007 - root_fd = sys_open("/", 0, 0);
58008 - old_fd = sys_open("/old", 0, 0);
58009 + sys_mkdir((__force const char __user *)"/old", 0700);
58010 + root_fd = sys_open((__force const char __user *)"/", 0, 0);
58011 + old_fd = sys_open((__force const char __user *)"/old", 0, 0);
58012 /* move initrd over / and chdir/chroot in initrd root */
58013 - sys_chdir("/root");
58014 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
58015 - sys_chroot(".");
58016 + sys_chdir((__force const char __user *)"/root");
58017 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
58018 + sys_chroot((__force const char __user *)".");
58019
58020 /*
58021 * In case that a resume from disk is carried out by linuxrc or one of
58022 @@ -70,15 +70,15 @@ static void __init handle_initrd(void)
58023
58024 /* move initrd to rootfs' /old */
58025 sys_fchdir(old_fd);
58026 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
58027 + sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
58028 /* switch root and cwd back to / of rootfs */
58029 sys_fchdir(root_fd);
58030 - sys_chroot(".");
58031 + sys_chroot((__force const char __user *)".");
58032 sys_close(old_fd);
58033 sys_close(root_fd);
58034
58035 if (new_decode_dev(real_root_dev) == Root_RAM0) {
58036 - sys_chdir("/old");
58037 + sys_chdir((__force const char __user *)"/old");
58038 return;
58039 }
58040
58041 @@ -86,17 +86,17 @@ static void __init handle_initrd(void)
58042 mount_root();
58043
58044 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
58045 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
58046 + error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
58047 if (!error)
58048 printk("okay\n");
58049 else {
58050 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
58051 + int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
58052 if (error == -ENOENT)
58053 printk("/initrd does not exist. Ignored.\n");
58054 else
58055 printk("failed\n");
58056 printk(KERN_NOTICE "Unmounting old root\n");
58057 - sys_umount("/old", MNT_DETACH);
58058 + sys_umount((__force char __user *)"/old", MNT_DETACH);
58059 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
58060 if (fd < 0) {
58061 error = fd;
58062 @@ -119,11 +119,11 @@ int __init initrd_load(void)
58063 * mounted in the normal path.
58064 */
58065 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
58066 - sys_unlink("/initrd.image");
58067 + sys_unlink((__force const char __user *)"/initrd.image");
58068 handle_initrd();
58069 return 1;
58070 }
58071 }
58072 - sys_unlink("/initrd.image");
58073 + sys_unlink((__force const char __user *)"/initrd.image");
58074 return 0;
58075 }
58076 diff -urNp linux-2.6.32.41/init/do_mounts_md.c linux-2.6.32.41/init/do_mounts_md.c
58077 --- linux-2.6.32.41/init/do_mounts_md.c 2011-03-27 14:31:47.000000000 -0400
58078 +++ linux-2.6.32.41/init/do_mounts_md.c 2011-04-17 15:56:46.000000000 -0400
58079 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
58080 partitioned ? "_d" : "", minor,
58081 md_setup_args[ent].device_names);
58082
58083 - fd = sys_open(name, 0, 0);
58084 + fd = sys_open((__force char __user *)name, 0, 0);
58085 if (fd < 0) {
58086 printk(KERN_ERR "md: open failed - cannot start "
58087 "array %s\n", name);
58088 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
58089 * array without it
58090 */
58091 sys_close(fd);
58092 - fd = sys_open(name, 0, 0);
58093 + fd = sys_open((__force char __user *)name, 0, 0);
58094 sys_ioctl(fd, BLKRRPART, 0);
58095 }
58096 sys_close(fd);
58097 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
58098
58099 wait_for_device_probe();
58100
58101 - fd = sys_open("/dev/md0", 0, 0);
58102 + fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
58103 if (fd >= 0) {
58104 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
58105 sys_close(fd);
58106 diff -urNp linux-2.6.32.41/init/initramfs.c linux-2.6.32.41/init/initramfs.c
58107 --- linux-2.6.32.41/init/initramfs.c 2011-03-27 14:31:47.000000000 -0400
58108 +++ linux-2.6.32.41/init/initramfs.c 2011-04-17 15:56:46.000000000 -0400
58109 @@ -74,7 +74,7 @@ static void __init free_hash(void)
58110 }
58111 }
58112
58113 -static long __init do_utime(char __user *filename, time_t mtime)
58114 +static long __init do_utime(__force char __user *filename, time_t mtime)
58115 {
58116 struct timespec t[2];
58117
58118 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
58119 struct dir_entry *de, *tmp;
58120 list_for_each_entry_safe(de, tmp, &dir_list, list) {
58121 list_del(&de->list);
58122 - do_utime(de->name, de->mtime);
58123 + do_utime((__force char __user *)de->name, de->mtime);
58124 kfree(de->name);
58125 kfree(de);
58126 }
58127 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
58128 if (nlink >= 2) {
58129 char *old = find_link(major, minor, ino, mode, collected);
58130 if (old)
58131 - return (sys_link(old, collected) < 0) ? -1 : 1;
58132 + return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
58133 }
58134 return 0;
58135 }
58136 @@ -280,11 +280,11 @@ static void __init clean_path(char *path
58137 {
58138 struct stat st;
58139
58140 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
58141 + if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
58142 if (S_ISDIR(st.st_mode))
58143 - sys_rmdir(path);
58144 + sys_rmdir((__force char __user *)path);
58145 else
58146 - sys_unlink(path);
58147 + sys_unlink((__force char __user *)path);
58148 }
58149 }
58150
58151 @@ -305,7 +305,7 @@ static int __init do_name(void)
58152 int openflags = O_WRONLY|O_CREAT;
58153 if (ml != 1)
58154 openflags |= O_TRUNC;
58155 - wfd = sys_open(collected, openflags, mode);
58156 + wfd = sys_open((__force char __user *)collected, openflags, mode);
58157
58158 if (wfd >= 0) {
58159 sys_fchown(wfd, uid, gid);
58160 @@ -317,17 +317,17 @@ static int __init do_name(void)
58161 }
58162 }
58163 } else if (S_ISDIR(mode)) {
58164 - sys_mkdir(collected, mode);
58165 - sys_chown(collected, uid, gid);
58166 - sys_chmod(collected, mode);
58167 + sys_mkdir((__force char __user *)collected, mode);
58168 + sys_chown((__force char __user *)collected, uid, gid);
58169 + sys_chmod((__force char __user *)collected, mode);
58170 dir_add(collected, mtime);
58171 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
58172 S_ISFIFO(mode) || S_ISSOCK(mode)) {
58173 if (maybe_link() == 0) {
58174 - sys_mknod(collected, mode, rdev);
58175 - sys_chown(collected, uid, gid);
58176 - sys_chmod(collected, mode);
58177 - do_utime(collected, mtime);
58178 + sys_mknod((__force char __user *)collected, mode, rdev);
58179 + sys_chown((__force char __user *)collected, uid, gid);
58180 + sys_chmod((__force char __user *)collected, mode);
58181 + do_utime((__force char __user *)collected, mtime);
58182 }
58183 }
58184 return 0;
58185 @@ -336,15 +336,15 @@ static int __init do_name(void)
58186 static int __init do_copy(void)
58187 {
58188 if (count >= body_len) {
58189 - sys_write(wfd, victim, body_len);
58190 + sys_write(wfd, (__force char __user *)victim, body_len);
58191 sys_close(wfd);
58192 - do_utime(vcollected, mtime);
58193 + do_utime((__force char __user *)vcollected, mtime);
58194 kfree(vcollected);
58195 eat(body_len);
58196 state = SkipIt;
58197 return 0;
58198 } else {
58199 - sys_write(wfd, victim, count);
58200 + sys_write(wfd, (__force char __user *)victim, count);
58201 body_len -= count;
58202 eat(count);
58203 return 1;
58204 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
58205 {
58206 collected[N_ALIGN(name_len) + body_len] = '\0';
58207 clean_path(collected, 0);
58208 - sys_symlink(collected + N_ALIGN(name_len), collected);
58209 - sys_lchown(collected, uid, gid);
58210 - do_utime(collected, mtime);
58211 + sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
58212 + sys_lchown((__force char __user *)collected, uid, gid);
58213 + do_utime((__force char __user *)collected, mtime);
58214 state = SkipIt;
58215 next_state = Reset;
58216 return 0;
58217 diff -urNp linux-2.6.32.41/init/Kconfig linux-2.6.32.41/init/Kconfig
58218 --- linux-2.6.32.41/init/Kconfig 2011-05-10 22:12:01.000000000 -0400
58219 +++ linux-2.6.32.41/init/Kconfig 2011-05-10 22:12:34.000000000 -0400
58220 @@ -1004,7 +1004,7 @@ config SLUB_DEBUG
58221
58222 config COMPAT_BRK
58223 bool "Disable heap randomization"
58224 - default y
58225 + default n
58226 help
58227 Randomizing heap placement makes heap exploits harder, but it
58228 also breaks ancient binaries (including anything libc5 based).
58229 diff -urNp linux-2.6.32.41/init/main.c linux-2.6.32.41/init/main.c
58230 --- linux-2.6.32.41/init/main.c 2011-05-10 22:12:01.000000000 -0400
58231 +++ linux-2.6.32.41/init/main.c 2011-05-22 23:02:06.000000000 -0400
58232 @@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void)
58233 #ifdef CONFIG_TC
58234 extern void tc_init(void);
58235 #endif
58236 +extern void grsecurity_init(void);
58237
58238 enum system_states system_state __read_mostly;
58239 EXPORT_SYMBOL(system_state);
58240 @@ -183,6 +184,49 @@ static int __init set_reset_devices(char
58241
58242 __setup("reset_devices", set_reset_devices);
58243
58244 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
58245 +extern char pax_enter_kernel_user[];
58246 +extern char pax_exit_kernel_user[];
58247 +extern pgdval_t clone_pgd_mask;
58248 +#endif
58249 +
58250 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
58251 +static int __init setup_pax_nouderef(char *str)
58252 +{
58253 +#ifdef CONFIG_X86_32
58254 + unsigned int cpu;
58255 + struct desc_struct *gdt;
58256 +
58257 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
58258 + gdt = get_cpu_gdt_table(cpu);
58259 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
58260 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
58261 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
58262 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
58263 + }
58264 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
58265 +#else
58266 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
58267 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
58268 + clone_pgd_mask = ~(pgdval_t)0UL;
58269 +#endif
58270 +
58271 + return 0;
58272 +}
58273 +early_param("pax_nouderef", setup_pax_nouderef);
58274 +#endif
58275 +
58276 +#ifdef CONFIG_PAX_SOFTMODE
58277 +unsigned int pax_softmode;
58278 +
58279 +static int __init setup_pax_softmode(char *str)
58280 +{
58281 + get_option(&str, &pax_softmode);
58282 + return 1;
58283 +}
58284 +__setup("pax_softmode=", setup_pax_softmode);
58285 +#endif
58286 +
58287 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
58288 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
58289 static const char *panic_later, *panic_param;
58290 @@ -705,52 +749,53 @@ int initcall_debug;
58291 core_param(initcall_debug, initcall_debug, bool, 0644);
58292
58293 static char msgbuf[64];
58294 -static struct boot_trace_call call;
58295 -static struct boot_trace_ret ret;
58296 +static struct boot_trace_call trace_call;
58297 +static struct boot_trace_ret trace_ret;
58298
58299 int do_one_initcall(initcall_t fn)
58300 {
58301 int count = preempt_count();
58302 ktime_t calltime, delta, rettime;
58303 + const char *msg1 = "", *msg2 = "";
58304
58305 if (initcall_debug) {
58306 - call.caller = task_pid_nr(current);
58307 - printk("calling %pF @ %i\n", fn, call.caller);
58308 + trace_call.caller = task_pid_nr(current);
58309 + printk("calling %pF @ %i\n", fn, trace_call.caller);
58310 calltime = ktime_get();
58311 - trace_boot_call(&call, fn);
58312 + trace_boot_call(&trace_call, fn);
58313 enable_boot_trace();
58314 }
58315
58316 - ret.result = fn();
58317 + trace_ret.result = fn();
58318
58319 if (initcall_debug) {
58320 disable_boot_trace();
58321 rettime = ktime_get();
58322 delta = ktime_sub(rettime, calltime);
58323 - ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
58324 - trace_boot_ret(&ret, fn);
58325 + trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
58326 + trace_boot_ret(&trace_ret, fn);
58327 printk("initcall %pF returned %d after %Ld usecs\n", fn,
58328 - ret.result, ret.duration);
58329 + trace_ret.result, trace_ret.duration);
58330 }
58331
58332 msgbuf[0] = 0;
58333
58334 - if (ret.result && ret.result != -ENODEV && initcall_debug)
58335 - sprintf(msgbuf, "error code %d ", ret.result);
58336 + if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
58337 + sprintf(msgbuf, "error code %d ", trace_ret.result);
58338
58339 if (preempt_count() != count) {
58340 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
58341 + msg1 = " preemption imbalance";
58342 preempt_count() = count;
58343 }
58344 if (irqs_disabled()) {
58345 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
58346 + msg2 = " disabled interrupts";
58347 local_irq_enable();
58348 }
58349 - if (msgbuf[0]) {
58350 - printk("initcall %pF returned with %s\n", fn, msgbuf);
58351 + if (msgbuf[0] || *msg1 || *msg2) {
58352 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
58353 }
58354
58355 - return ret.result;
58356 + return trace_ret.result;
58357 }
58358
58359
58360 @@ -893,11 +938,13 @@ static int __init kernel_init(void * unu
58361 if (!ramdisk_execute_command)
58362 ramdisk_execute_command = "/init";
58363
58364 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
58365 + if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
58366 ramdisk_execute_command = NULL;
58367 prepare_namespace();
58368 }
58369
58370 + grsecurity_init();
58371 +
58372 /*
58373 * Ok, we have completed the initial bootup, and
58374 * we're essentially up and running. Get rid of the
58375 diff -urNp linux-2.6.32.41/init/noinitramfs.c linux-2.6.32.41/init/noinitramfs.c
58376 --- linux-2.6.32.41/init/noinitramfs.c 2011-03-27 14:31:47.000000000 -0400
58377 +++ linux-2.6.32.41/init/noinitramfs.c 2011-04-17 15:56:46.000000000 -0400
58378 @@ -29,7 +29,7 @@ static int __init default_rootfs(void)
58379 {
58380 int err;
58381
58382 - err = sys_mkdir("/dev", 0755);
58383 + err = sys_mkdir((const char __user *)"/dev", 0755);
58384 if (err < 0)
58385 goto out;
58386
58387 @@ -39,7 +39,7 @@ static int __init default_rootfs(void)
58388 if (err < 0)
58389 goto out;
58390
58391 - err = sys_mkdir("/root", 0700);
58392 + err = sys_mkdir((const char __user *)"/root", 0700);
58393 if (err < 0)
58394 goto out;
58395
58396 diff -urNp linux-2.6.32.41/ipc/mqueue.c linux-2.6.32.41/ipc/mqueue.c
58397 --- linux-2.6.32.41/ipc/mqueue.c 2011-03-27 14:31:47.000000000 -0400
58398 +++ linux-2.6.32.41/ipc/mqueue.c 2011-04-17 15:56:46.000000000 -0400
58399 @@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(st
58400 mq_bytes = (mq_msg_tblsz +
58401 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
58402
58403 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
58404 spin_lock(&mq_lock);
58405 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
58406 u->mq_bytes + mq_bytes >
58407 diff -urNp linux-2.6.32.41/ipc/sem.c linux-2.6.32.41/ipc/sem.c
58408 --- linux-2.6.32.41/ipc/sem.c 2011-03-27 14:31:47.000000000 -0400
58409 +++ linux-2.6.32.41/ipc/sem.c 2011-05-16 21:46:57.000000000 -0400
58410 @@ -671,6 +671,8 @@ static int semctl_main(struct ipc_namesp
58411 ushort* sem_io = fast_sem_io;
58412 int nsems;
58413
58414 + pax_track_stack();
58415 +
58416 sma = sem_lock_check(ns, semid);
58417 if (IS_ERR(sma))
58418 return PTR_ERR(sma);
58419 @@ -1071,6 +1073,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
58420 unsigned long jiffies_left = 0;
58421 struct ipc_namespace *ns;
58422
58423 + pax_track_stack();
58424 +
58425 ns = current->nsproxy->ipc_ns;
58426
58427 if (nsops < 1 || semid < 0)
58428 diff -urNp linux-2.6.32.41/ipc/shm.c linux-2.6.32.41/ipc/shm.c
58429 --- linux-2.6.32.41/ipc/shm.c 2011-03-27 14:31:47.000000000 -0400
58430 +++ linux-2.6.32.41/ipc/shm.c 2011-04-17 15:56:46.000000000 -0400
58431 @@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_name
58432 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
58433 #endif
58434
58435 +#ifdef CONFIG_GRKERNSEC
58436 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
58437 + const time_t shm_createtime, const uid_t cuid,
58438 + const int shmid);
58439 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
58440 + const time_t shm_createtime);
58441 +#endif
58442 +
58443 void shm_init_ns(struct ipc_namespace *ns)
58444 {
58445 ns->shm_ctlmax = SHMMAX;
58446 @@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *
58447 shp->shm_lprid = 0;
58448 shp->shm_atim = shp->shm_dtim = 0;
58449 shp->shm_ctim = get_seconds();
58450 +#ifdef CONFIG_GRKERNSEC
58451 + {
58452 + struct timespec timeval;
58453 + do_posix_clock_monotonic_gettime(&timeval);
58454 +
58455 + shp->shm_createtime = timeval.tv_sec;
58456 + }
58457 +#endif
58458 shp->shm_segsz = size;
58459 shp->shm_nattch = 0;
58460 shp->shm_file = file;
58461 @@ -880,9 +896,21 @@ long do_shmat(int shmid, char __user *sh
58462 if (err)
58463 goto out_unlock;
58464
58465 +#ifdef CONFIG_GRKERNSEC
58466 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
58467 + shp->shm_perm.cuid, shmid) ||
58468 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
58469 + err = -EACCES;
58470 + goto out_unlock;
58471 + }
58472 +#endif
58473 +
58474 path.dentry = dget(shp->shm_file->f_path.dentry);
58475 path.mnt = shp->shm_file->f_path.mnt;
58476 shp->shm_nattch++;
58477 +#ifdef CONFIG_GRKERNSEC
58478 + shp->shm_lapid = current->pid;
58479 +#endif
58480 size = i_size_read(path.dentry->d_inode);
58481 shm_unlock(shp);
58482
58483 diff -urNp linux-2.6.32.41/kernel/acct.c linux-2.6.32.41/kernel/acct.c
58484 --- linux-2.6.32.41/kernel/acct.c 2011-03-27 14:31:47.000000000 -0400
58485 +++ linux-2.6.32.41/kernel/acct.c 2011-04-17 15:56:46.000000000 -0400
58486 @@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_a
58487 */
58488 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
58489 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
58490 - file->f_op->write(file, (char *)&ac,
58491 + file->f_op->write(file, (__force char __user *)&ac,
58492 sizeof(acct_t), &file->f_pos);
58493 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
58494 set_fs(fs);
58495 diff -urNp linux-2.6.32.41/kernel/audit.c linux-2.6.32.41/kernel/audit.c
58496 --- linux-2.6.32.41/kernel/audit.c 2011-03-27 14:31:47.000000000 -0400
58497 +++ linux-2.6.32.41/kernel/audit.c 2011-05-04 17:56:28.000000000 -0400
58498 @@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
58499 3) suppressed due to audit_rate_limit
58500 4) suppressed due to audit_backlog_limit
58501 */
58502 -static atomic_t audit_lost = ATOMIC_INIT(0);
58503 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
58504
58505 /* The netlink socket. */
58506 static struct sock *audit_sock;
58507 @@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
58508 unsigned long now;
58509 int print;
58510
58511 - atomic_inc(&audit_lost);
58512 + atomic_inc_unchecked(&audit_lost);
58513
58514 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
58515
58516 @@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
58517 printk(KERN_WARNING
58518 "audit: audit_lost=%d audit_rate_limit=%d "
58519 "audit_backlog_limit=%d\n",
58520 - atomic_read(&audit_lost),
58521 + atomic_read_unchecked(&audit_lost),
58522 audit_rate_limit,
58523 audit_backlog_limit);
58524 audit_panic(message);
58525 @@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_b
58526 status_set.pid = audit_pid;
58527 status_set.rate_limit = audit_rate_limit;
58528 status_set.backlog_limit = audit_backlog_limit;
58529 - status_set.lost = atomic_read(&audit_lost);
58530 + status_set.lost = atomic_read_unchecked(&audit_lost);
58531 status_set.backlog = skb_queue_len(&audit_skb_queue);
58532 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
58533 &status_set, sizeof(status_set));
58534 @@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_b
58535 spin_unlock_irq(&tsk->sighand->siglock);
58536 }
58537 read_unlock(&tasklist_lock);
58538 - audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
58539 - &s, sizeof(s));
58540 +
58541 + if (!err)
58542 + audit_send_reply(NETLINK_CB(skb).pid, seq,
58543 + AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
58544 break;
58545 }
58546 case AUDIT_TTY_SET: {
58547 diff -urNp linux-2.6.32.41/kernel/auditsc.c linux-2.6.32.41/kernel/auditsc.c
58548 --- linux-2.6.32.41/kernel/auditsc.c 2011-03-27 14:31:47.000000000 -0400
58549 +++ linux-2.6.32.41/kernel/auditsc.c 2011-05-04 17:56:28.000000000 -0400
58550 @@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_conte
58551 }
58552
58553 /* global counter which is incremented every time something logs in */
58554 -static atomic_t session_id = ATOMIC_INIT(0);
58555 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
58556
58557 /**
58558 * audit_set_loginuid - set a task's audit_context loginuid
58559 @@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT
58560 */
58561 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
58562 {
58563 - unsigned int sessionid = atomic_inc_return(&session_id);
58564 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
58565 struct audit_context *context = task->audit_context;
58566
58567 if (context && context->in_syscall) {
58568 diff -urNp linux-2.6.32.41/kernel/capability.c linux-2.6.32.41/kernel/capability.c
58569 --- linux-2.6.32.41/kernel/capability.c 2011-03-27 14:31:47.000000000 -0400
58570 +++ linux-2.6.32.41/kernel/capability.c 2011-04-17 15:56:46.000000000 -0400
58571 @@ -305,10 +305,26 @@ int capable(int cap)
58572 BUG();
58573 }
58574
58575 - if (security_capable(cap) == 0) {
58576 + if (security_capable(cap) == 0 && gr_is_capable(cap)) {
58577 current->flags |= PF_SUPERPRIV;
58578 return 1;
58579 }
58580 return 0;
58581 }
58582 +
58583 +int capable_nolog(int cap)
58584 +{
58585 + if (unlikely(!cap_valid(cap))) {
58586 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
58587 + BUG();
58588 + }
58589 +
58590 + if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
58591 + current->flags |= PF_SUPERPRIV;
58592 + return 1;
58593 + }
58594 + return 0;
58595 +}
58596 +
58597 EXPORT_SYMBOL(capable);
58598 +EXPORT_SYMBOL(capable_nolog);
58599 diff -urNp linux-2.6.32.41/kernel/cgroup.c linux-2.6.32.41/kernel/cgroup.c
58600 --- linux-2.6.32.41/kernel/cgroup.c 2011-03-27 14:31:47.000000000 -0400
58601 +++ linux-2.6.32.41/kernel/cgroup.c 2011-05-16 21:46:57.000000000 -0400
58602 @@ -536,6 +536,8 @@ static struct css_set *find_css_set(
58603 struct hlist_head *hhead;
58604 struct cg_cgroup_link *link;
58605
58606 + pax_track_stack();
58607 +
58608 /* First see if we already have a cgroup group that matches
58609 * the desired set */
58610 read_lock(&css_set_lock);
58611 diff -urNp linux-2.6.32.41/kernel/configs.c linux-2.6.32.41/kernel/configs.c
58612 --- linux-2.6.32.41/kernel/configs.c 2011-03-27 14:31:47.000000000 -0400
58613 +++ linux-2.6.32.41/kernel/configs.c 2011-04-17 15:56:46.000000000 -0400
58614 @@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
58615 struct proc_dir_entry *entry;
58616
58617 /* create the current config file */
58618 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
58619 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
58620 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
58621 + &ikconfig_file_ops);
58622 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58623 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
58624 + &ikconfig_file_ops);
58625 +#endif
58626 +#else
58627 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
58628 &ikconfig_file_ops);
58629 +#endif
58630 +
58631 if (!entry)
58632 return -ENOMEM;
58633
58634 diff -urNp linux-2.6.32.41/kernel/cpu.c linux-2.6.32.41/kernel/cpu.c
58635 --- linux-2.6.32.41/kernel/cpu.c 2011-03-27 14:31:47.000000000 -0400
58636 +++ linux-2.6.32.41/kernel/cpu.c 2011-04-17 15:56:46.000000000 -0400
58637 @@ -19,7 +19,7 @@
58638 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
58639 static DEFINE_MUTEX(cpu_add_remove_lock);
58640
58641 -static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
58642 +static RAW_NOTIFIER_HEAD(cpu_chain);
58643
58644 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
58645 * Should always be manipulated under cpu_add_remove_lock
58646 diff -urNp linux-2.6.32.41/kernel/cred.c linux-2.6.32.41/kernel/cred.c
58647 --- linux-2.6.32.41/kernel/cred.c 2011-03-27 14:31:47.000000000 -0400
58648 +++ linux-2.6.32.41/kernel/cred.c 2011-05-17 19:26:34.000000000 -0400
58649 @@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head
58650 */
58651 void __put_cred(struct cred *cred)
58652 {
58653 + pax_track_stack();
58654 +
58655 kdebug("__put_cred(%p{%d,%d})", cred,
58656 atomic_read(&cred->usage),
58657 read_cred_subscribers(cred));
58658 @@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
58659 {
58660 struct cred *cred;
58661
58662 + pax_track_stack();
58663 +
58664 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
58665 atomic_read(&tsk->cred->usage),
58666 read_cred_subscribers(tsk->cred));
58667 @@ -222,6 +226,8 @@ const struct cred *get_task_cred(struct
58668 {
58669 const struct cred *cred;
58670
58671 + pax_track_stack();
58672 +
58673 rcu_read_lock();
58674
58675 do {
58676 @@ -241,6 +247,8 @@ struct cred *cred_alloc_blank(void)
58677 {
58678 struct cred *new;
58679
58680 + pax_track_stack();
58681 +
58682 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
58683 if (!new)
58684 return NULL;
58685 @@ -289,6 +297,8 @@ struct cred *prepare_creds(void)
58686 const struct cred *old;
58687 struct cred *new;
58688
58689 + pax_track_stack();
58690 +
58691 validate_process_creds();
58692
58693 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
58694 @@ -335,6 +345,8 @@ struct cred *prepare_exec_creds(void)
58695 struct thread_group_cred *tgcred = NULL;
58696 struct cred *new;
58697
58698 + pax_track_stack();
58699 +
58700 #ifdef CONFIG_KEYS
58701 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
58702 if (!tgcred)
58703 @@ -441,6 +453,8 @@ int copy_creds(struct task_struct *p, un
58704 struct cred *new;
58705 int ret;
58706
58707 + pax_track_stack();
58708 +
58709 mutex_init(&p->cred_guard_mutex);
58710
58711 if (
58712 @@ -528,6 +542,8 @@ int commit_creds(struct cred *new)
58713 struct task_struct *task = current;
58714 const struct cred *old = task->real_cred;
58715
58716 + pax_track_stack();
58717 +
58718 kdebug("commit_creds(%p{%d,%d})", new,
58719 atomic_read(&new->usage),
58720 read_cred_subscribers(new));
58721 @@ -544,6 +560,8 @@ int commit_creds(struct cred *new)
58722
58723 get_cred(new); /* we will require a ref for the subj creds too */
58724
58725 + gr_set_role_label(task, new->uid, new->gid);
58726 +
58727 /* dumpability changes */
58728 if (old->euid != new->euid ||
58729 old->egid != new->egid ||
58730 @@ -606,6 +624,8 @@ EXPORT_SYMBOL(commit_creds);
58731 */
58732 void abort_creds(struct cred *new)
58733 {
58734 + pax_track_stack();
58735 +
58736 kdebug("abort_creds(%p{%d,%d})", new,
58737 atomic_read(&new->usage),
58738 read_cred_subscribers(new));
58739 @@ -629,6 +649,8 @@ const struct cred *override_creds(const
58740 {
58741 const struct cred *old = current->cred;
58742
58743 + pax_track_stack();
58744 +
58745 kdebug("override_creds(%p{%d,%d})", new,
58746 atomic_read(&new->usage),
58747 read_cred_subscribers(new));
58748 @@ -658,6 +680,8 @@ void revert_creds(const struct cred *old
58749 {
58750 const struct cred *override = current->cred;
58751
58752 + pax_track_stack();
58753 +
58754 kdebug("revert_creds(%p{%d,%d})", old,
58755 atomic_read(&old->usage),
58756 read_cred_subscribers(old));
58757 @@ -704,6 +728,8 @@ struct cred *prepare_kernel_cred(struct
58758 const struct cred *old;
58759 struct cred *new;
58760
58761 + pax_track_stack();
58762 +
58763 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
58764 if (!new)
58765 return NULL;
58766 @@ -758,6 +784,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
58767 */
58768 int set_security_override(struct cred *new, u32 secid)
58769 {
58770 + pax_track_stack();
58771 +
58772 return security_kernel_act_as(new, secid);
58773 }
58774 EXPORT_SYMBOL(set_security_override);
58775 @@ -777,6 +805,8 @@ int set_security_override_from_ctx(struc
58776 u32 secid;
58777 int ret;
58778
58779 + pax_track_stack();
58780 +
58781 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
58782 if (ret < 0)
58783 return ret;
58784 diff -urNp linux-2.6.32.41/kernel/exit.c linux-2.6.32.41/kernel/exit.c
58785 --- linux-2.6.32.41/kernel/exit.c 2011-03-27 14:31:47.000000000 -0400
58786 +++ linux-2.6.32.41/kernel/exit.c 2011-04-17 15:56:46.000000000 -0400
58787 @@ -55,6 +55,10 @@
58788 #include <asm/pgtable.h>
58789 #include <asm/mmu_context.h>
58790
58791 +#ifdef CONFIG_GRKERNSEC
58792 +extern rwlock_t grsec_exec_file_lock;
58793 +#endif
58794 +
58795 static void exit_mm(struct task_struct * tsk);
58796
58797 static void __unhash_process(struct task_struct *p)
58798 @@ -174,6 +178,8 @@ void release_task(struct task_struct * p
58799 struct task_struct *leader;
58800 int zap_leader;
58801 repeat:
58802 + gr_del_task_from_ip_table(p);
58803 +
58804 tracehook_prepare_release_task(p);
58805 /* don't need to get the RCU readlock here - the process is dead and
58806 * can't be modifying its own credentials */
58807 @@ -341,11 +347,22 @@ static void reparent_to_kthreadd(void)
58808 {
58809 write_lock_irq(&tasklist_lock);
58810
58811 +#ifdef CONFIG_GRKERNSEC
58812 + write_lock(&grsec_exec_file_lock);
58813 + if (current->exec_file) {
58814 + fput(current->exec_file);
58815 + current->exec_file = NULL;
58816 + }
58817 + write_unlock(&grsec_exec_file_lock);
58818 +#endif
58819 +
58820 ptrace_unlink(current);
58821 /* Reparent to init */
58822 current->real_parent = current->parent = kthreadd_task;
58823 list_move_tail(&current->sibling, &current->real_parent->children);
58824
58825 + gr_set_kernel_label(current);
58826 +
58827 /* Set the exit signal to SIGCHLD so we signal init on exit */
58828 current->exit_signal = SIGCHLD;
58829
58830 @@ -397,7 +414,7 @@ int allow_signal(int sig)
58831 * know it'll be handled, so that they don't get converted to
58832 * SIGKILL or just silently dropped.
58833 */
58834 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
58835 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
58836 recalc_sigpending();
58837 spin_unlock_irq(&current->sighand->siglock);
58838 return 0;
58839 @@ -433,6 +450,17 @@ void daemonize(const char *name, ...)
58840 vsnprintf(current->comm, sizeof(current->comm), name, args);
58841 va_end(args);
58842
58843 +#ifdef CONFIG_GRKERNSEC
58844 + write_lock(&grsec_exec_file_lock);
58845 + if (current->exec_file) {
58846 + fput(current->exec_file);
58847 + current->exec_file = NULL;
58848 + }
58849 + write_unlock(&grsec_exec_file_lock);
58850 +#endif
58851 +
58852 + gr_set_kernel_label(current);
58853 +
58854 /*
58855 * If we were started as result of loading a module, close all of the
58856 * user space pages. We don't need them, and if we didn't close them
58857 @@ -897,17 +925,17 @@ NORET_TYPE void do_exit(long code)
58858 struct task_struct *tsk = current;
58859 int group_dead;
58860
58861 - profile_task_exit(tsk);
58862 -
58863 - WARN_ON(atomic_read(&tsk->fs_excl));
58864 -
58865 + /*
58866 + * Check this first since set_fs() below depends on
58867 + * current_thread_info(), which we better not access when we're in
58868 + * interrupt context. Other than that, we want to do the set_fs()
58869 + * as early as possible.
58870 + */
58871 if (unlikely(in_interrupt()))
58872 panic("Aiee, killing interrupt handler!");
58873 - if (unlikely(!tsk->pid))
58874 - panic("Attempted to kill the idle task!");
58875
58876 /*
58877 - * If do_exit is called because this processes oopsed, it's possible
58878 + * If do_exit is called because this processes Oops'ed, it's possible
58879 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
58880 * continuing. Amongst other possible reasons, this is to prevent
58881 * mm_release()->clear_child_tid() from writing to a user-controlled
58882 @@ -915,6 +943,13 @@ NORET_TYPE void do_exit(long code)
58883 */
58884 set_fs(USER_DS);
58885
58886 + profile_task_exit(tsk);
58887 +
58888 + WARN_ON(atomic_read(&tsk->fs_excl));
58889 +
58890 + if (unlikely(!tsk->pid))
58891 + panic("Attempted to kill the idle task!");
58892 +
58893 tracehook_report_exit(&code);
58894
58895 validate_creds_for_do_exit(tsk);
58896 @@ -973,6 +1008,9 @@ NORET_TYPE void do_exit(long code)
58897 tsk->exit_code = code;
58898 taskstats_exit(tsk, group_dead);
58899
58900 + gr_acl_handle_psacct(tsk, code);
58901 + gr_acl_handle_exit();
58902 +
58903 exit_mm(tsk);
58904
58905 if (group_dead)
58906 @@ -1188,7 +1226,7 @@ static int wait_task_zombie(struct wait_
58907
58908 if (unlikely(wo->wo_flags & WNOWAIT)) {
58909 int exit_code = p->exit_code;
58910 - int why, status;
58911 + int why;
58912
58913 get_task_struct(p);
58914 read_unlock(&tasklist_lock);
58915 diff -urNp linux-2.6.32.41/kernel/fork.c linux-2.6.32.41/kernel/fork.c
58916 --- linux-2.6.32.41/kernel/fork.c 2011-03-27 14:31:47.000000000 -0400
58917 +++ linux-2.6.32.41/kernel/fork.c 2011-04-17 15:56:46.000000000 -0400
58918 @@ -253,7 +253,7 @@ static struct task_struct *dup_task_stru
58919 *stackend = STACK_END_MAGIC; /* for overflow detection */
58920
58921 #ifdef CONFIG_CC_STACKPROTECTOR
58922 - tsk->stack_canary = get_random_int();
58923 + tsk->stack_canary = pax_get_random_long();
58924 #endif
58925
58926 /* One for us, one for whoever does the "release_task()" (usually parent) */
58927 @@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm
58928 mm->locked_vm = 0;
58929 mm->mmap = NULL;
58930 mm->mmap_cache = NULL;
58931 - mm->free_area_cache = oldmm->mmap_base;
58932 - mm->cached_hole_size = ~0UL;
58933 + mm->free_area_cache = oldmm->free_area_cache;
58934 + mm->cached_hole_size = oldmm->cached_hole_size;
58935 mm->map_count = 0;
58936 cpumask_clear(mm_cpumask(mm));
58937 mm->mm_rb = RB_ROOT;
58938 @@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm
58939 tmp->vm_flags &= ~VM_LOCKED;
58940 tmp->vm_mm = mm;
58941 tmp->vm_next = tmp->vm_prev = NULL;
58942 + tmp->vm_mirror = NULL;
58943 anon_vma_link(tmp);
58944 file = tmp->vm_file;
58945 if (file) {
58946 @@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm
58947 if (retval)
58948 goto out;
58949 }
58950 +
58951 +#ifdef CONFIG_PAX_SEGMEXEC
58952 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
58953 + struct vm_area_struct *mpnt_m;
58954 +
58955 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
58956 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
58957 +
58958 + if (!mpnt->vm_mirror)
58959 + continue;
58960 +
58961 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
58962 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
58963 + mpnt->vm_mirror = mpnt_m;
58964 + } else {
58965 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
58966 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
58967 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
58968 + mpnt->vm_mirror->vm_mirror = mpnt;
58969 + }
58970 + }
58971 + BUG_ON(mpnt_m);
58972 + }
58973 +#endif
58974 +
58975 /* a new mm has just been created */
58976 arch_dup_mmap(oldmm, mm);
58977 retval = 0;
58978 @@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_f
58979 write_unlock(&fs->lock);
58980 return -EAGAIN;
58981 }
58982 - fs->users++;
58983 + atomic_inc(&fs->users);
58984 write_unlock(&fs->lock);
58985 return 0;
58986 }
58987 tsk->fs = copy_fs_struct(fs);
58988 if (!tsk->fs)
58989 return -ENOMEM;
58990 + gr_set_chroot_entries(tsk, &tsk->fs->root);
58991 return 0;
58992 }
58993
58994 @@ -1033,10 +1060,13 @@ static struct task_struct *copy_process(
58995 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
58996 #endif
58997 retval = -EAGAIN;
58998 +
58999 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
59000 +
59001 if (atomic_read(&p->real_cred->user->processes) >=
59002 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
59003 - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
59004 - p->real_cred->user != INIT_USER)
59005 + if (p->real_cred->user != INIT_USER &&
59006 + !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
59007 goto bad_fork_free;
59008 }
59009
59010 @@ -1183,6 +1213,8 @@ static struct task_struct *copy_process(
59011 goto bad_fork_free_pid;
59012 }
59013
59014 + gr_copy_label(p);
59015 +
59016 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
59017 /*
59018 * Clear TID on mm_release()?
59019 @@ -1333,6 +1365,8 @@ bad_fork_cleanup_count:
59020 bad_fork_free:
59021 free_task(p);
59022 fork_out:
59023 + gr_log_forkfail(retval);
59024 +
59025 return ERR_PTR(retval);
59026 }
59027
59028 @@ -1426,6 +1460,8 @@ long do_fork(unsigned long clone_flags,
59029 if (clone_flags & CLONE_PARENT_SETTID)
59030 put_user(nr, parent_tidptr);
59031
59032 + gr_handle_brute_check();
59033 +
59034 if (clone_flags & CLONE_VFORK) {
59035 p->vfork_done = &vfork;
59036 init_completion(&vfork);
59037 @@ -1558,7 +1594,7 @@ static int unshare_fs(unsigned long unsh
59038 return 0;
59039
59040 /* don't need lock here; in the worst case we'll do useless copy */
59041 - if (fs->users == 1)
59042 + if (atomic_read(&fs->users) == 1)
59043 return 0;
59044
59045 *new_fsp = copy_fs_struct(fs);
59046 @@ -1681,7 +1717,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
59047 fs = current->fs;
59048 write_lock(&fs->lock);
59049 current->fs = new_fs;
59050 - if (--fs->users)
59051 + gr_set_chroot_entries(current, &current->fs->root);
59052 + if (atomic_dec_return(&fs->users))
59053 new_fs = NULL;
59054 else
59055 new_fs = fs;
59056 diff -urNp linux-2.6.32.41/kernel/futex.c linux-2.6.32.41/kernel/futex.c
59057 --- linux-2.6.32.41/kernel/futex.c 2011-03-27 14:31:47.000000000 -0400
59058 +++ linux-2.6.32.41/kernel/futex.c 2011-05-16 21:46:57.000000000 -0400
59059 @@ -54,6 +54,7 @@
59060 #include <linux/mount.h>
59061 #include <linux/pagemap.h>
59062 #include <linux/syscalls.h>
59063 +#include <linux/ptrace.h>
59064 #include <linux/signal.h>
59065 #include <linux/module.h>
59066 #include <linux/magic.h>
59067 @@ -221,6 +222,11 @@ get_futex_key(u32 __user *uaddr, int fsh
59068 struct page *page;
59069 int err;
59070
59071 +#ifdef CONFIG_PAX_SEGMEXEC
59072 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
59073 + return -EFAULT;
59074 +#endif
59075 +
59076 /*
59077 * The futex address must be "naturally" aligned.
59078 */
59079 @@ -1789,6 +1795,8 @@ static int futex_wait(u32 __user *uaddr,
59080 struct futex_q q;
59081 int ret;
59082
59083 + pax_track_stack();
59084 +
59085 if (!bitset)
59086 return -EINVAL;
59087
59088 @@ -1841,7 +1849,7 @@ retry:
59089
59090 restart = &current_thread_info()->restart_block;
59091 restart->fn = futex_wait_restart;
59092 - restart->futex.uaddr = (u32 *)uaddr;
59093 + restart->futex.uaddr = uaddr;
59094 restart->futex.val = val;
59095 restart->futex.time = abs_time->tv64;
59096 restart->futex.bitset = bitset;
59097 @@ -2203,6 +2211,8 @@ static int futex_wait_requeue_pi(u32 __u
59098 struct futex_q q;
59099 int res, ret;
59100
59101 + pax_track_stack();
59102 +
59103 if (!bitset)
59104 return -EINVAL;
59105
59106 @@ -2377,7 +2387,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
59107 {
59108 struct robust_list_head __user *head;
59109 unsigned long ret;
59110 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
59111 const struct cred *cred = current_cred(), *pcred;
59112 +#endif
59113
59114 if (!futex_cmpxchg_enabled)
59115 return -ENOSYS;
59116 @@ -2393,11 +2405,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
59117 if (!p)
59118 goto err_unlock;
59119 ret = -EPERM;
59120 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59121 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
59122 + goto err_unlock;
59123 +#else
59124 pcred = __task_cred(p);
59125 if (cred->euid != pcred->euid &&
59126 cred->euid != pcred->uid &&
59127 !capable(CAP_SYS_PTRACE))
59128 goto err_unlock;
59129 +#endif
59130 head = p->robust_list;
59131 rcu_read_unlock();
59132 }
59133 @@ -2459,7 +2476,7 @@ retry:
59134 */
59135 static inline int fetch_robust_entry(struct robust_list __user **entry,
59136 struct robust_list __user * __user *head,
59137 - int *pi)
59138 + unsigned int *pi)
59139 {
59140 unsigned long uentry;
59141
59142 @@ -2640,6 +2657,7 @@ static int __init futex_init(void)
59143 {
59144 u32 curval;
59145 int i;
59146 + mm_segment_t oldfs;
59147
59148 /*
59149 * This will fail and we want it. Some arch implementations do
59150 @@ -2651,7 +2669,10 @@ static int __init futex_init(void)
59151 * implementation, the non functional ones will return
59152 * -ENOSYS.
59153 */
59154 + oldfs = get_fs();
59155 + set_fs(USER_DS);
59156 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
59157 + set_fs(oldfs);
59158 if (curval == -EFAULT)
59159 futex_cmpxchg_enabled = 1;
59160
59161 diff -urNp linux-2.6.32.41/kernel/futex_compat.c linux-2.6.32.41/kernel/futex_compat.c
59162 --- linux-2.6.32.41/kernel/futex_compat.c 2011-03-27 14:31:47.000000000 -0400
59163 +++ linux-2.6.32.41/kernel/futex_compat.c 2011-04-17 15:56:46.000000000 -0400
59164 @@ -10,6 +10,7 @@
59165 #include <linux/compat.h>
59166 #include <linux/nsproxy.h>
59167 #include <linux/futex.h>
59168 +#include <linux/ptrace.h>
59169
59170 #include <asm/uaccess.h>
59171
59172 @@ -135,7 +136,10 @@ compat_sys_get_robust_list(int pid, comp
59173 {
59174 struct compat_robust_list_head __user *head;
59175 unsigned long ret;
59176 - const struct cred *cred = current_cred(), *pcred;
59177 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
59178 + const struct cred *cred = current_cred();
59179 + const struct cred *pcred;
59180 +#endif
59181
59182 if (!futex_cmpxchg_enabled)
59183 return -ENOSYS;
59184 @@ -151,11 +155,16 @@ compat_sys_get_robust_list(int pid, comp
59185 if (!p)
59186 goto err_unlock;
59187 ret = -EPERM;
59188 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59189 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
59190 + goto err_unlock;
59191 +#else
59192 pcred = __task_cred(p);
59193 if (cred->euid != pcred->euid &&
59194 cred->euid != pcred->uid &&
59195 !capable(CAP_SYS_PTRACE))
59196 goto err_unlock;
59197 +#endif
59198 head = p->compat_robust_list;
59199 read_unlock(&tasklist_lock);
59200 }
59201 diff -urNp linux-2.6.32.41/kernel/gcov/base.c linux-2.6.32.41/kernel/gcov/base.c
59202 --- linux-2.6.32.41/kernel/gcov/base.c 2011-03-27 14:31:47.000000000 -0400
59203 +++ linux-2.6.32.41/kernel/gcov/base.c 2011-04-17 15:56:46.000000000 -0400
59204 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
59205 }
59206
59207 #ifdef CONFIG_MODULES
59208 -static inline int within(void *addr, void *start, unsigned long size)
59209 -{
59210 - return ((addr >= start) && (addr < start + size));
59211 -}
59212 -
59213 /* Update list and generate events when modules are unloaded. */
59214 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
59215 void *data)
59216 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
59217 prev = NULL;
59218 /* Remove entries located in module from linked list. */
59219 for (info = gcov_info_head; info; info = info->next) {
59220 - if (within(info, mod->module_core, mod->core_size)) {
59221 + if (within_module_core_rw((unsigned long)info, mod)) {
59222 if (prev)
59223 prev->next = info->next;
59224 else
59225 diff -urNp linux-2.6.32.41/kernel/hrtimer.c linux-2.6.32.41/kernel/hrtimer.c
59226 --- linux-2.6.32.41/kernel/hrtimer.c 2011-03-27 14:31:47.000000000 -0400
59227 +++ linux-2.6.32.41/kernel/hrtimer.c 2011-04-17 15:56:46.000000000 -0400
59228 @@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
59229 local_irq_restore(flags);
59230 }
59231
59232 -static void run_hrtimer_softirq(struct softirq_action *h)
59233 +static void run_hrtimer_softirq(void)
59234 {
59235 hrtimer_peek_ahead_timers();
59236 }
59237 diff -urNp linux-2.6.32.41/kernel/kallsyms.c linux-2.6.32.41/kernel/kallsyms.c
59238 --- linux-2.6.32.41/kernel/kallsyms.c 2011-03-27 14:31:47.000000000 -0400
59239 +++ linux-2.6.32.41/kernel/kallsyms.c 2011-04-17 15:56:46.000000000 -0400
59240 @@ -11,6 +11,9 @@
59241 * Changed the compression method from stem compression to "table lookup"
59242 * compression (see scripts/kallsyms.c for a more complete description)
59243 */
59244 +#ifdef CONFIG_GRKERNSEC_HIDESYM
59245 +#define __INCLUDED_BY_HIDESYM 1
59246 +#endif
59247 #include <linux/kallsyms.h>
59248 #include <linux/module.h>
59249 #include <linux/init.h>
59250 @@ -51,12 +54,33 @@ extern const unsigned long kallsyms_mark
59251
59252 static inline int is_kernel_inittext(unsigned long addr)
59253 {
59254 + if (system_state != SYSTEM_BOOTING)
59255 + return 0;
59256 +
59257 if (addr >= (unsigned long)_sinittext
59258 && addr <= (unsigned long)_einittext)
59259 return 1;
59260 return 0;
59261 }
59262
59263 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
59264 +#ifdef CONFIG_MODULES
59265 +static inline int is_module_text(unsigned long addr)
59266 +{
59267 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
59268 + return 1;
59269 +
59270 + addr = ktla_ktva(addr);
59271 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
59272 +}
59273 +#else
59274 +static inline int is_module_text(unsigned long addr)
59275 +{
59276 + return 0;
59277 +}
59278 +#endif
59279 +#endif
59280 +
59281 static inline int is_kernel_text(unsigned long addr)
59282 {
59283 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
59284 @@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigne
59285
59286 static inline int is_kernel(unsigned long addr)
59287 {
59288 +
59289 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
59290 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
59291 + return 1;
59292 +
59293 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
59294 +#else
59295 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
59296 +#endif
59297 +
59298 return 1;
59299 return in_gate_area_no_task(addr);
59300 }
59301
59302 static int is_ksym_addr(unsigned long addr)
59303 {
59304 +
59305 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
59306 + if (is_module_text(addr))
59307 + return 0;
59308 +#endif
59309 +
59310 if (all_var)
59311 return is_kernel(addr);
59312
59313 @@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(st
59314
59315 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
59316 {
59317 - iter->name[0] = '\0';
59318 iter->nameoff = get_symbol_offset(new_pos);
59319 iter->pos = new_pos;
59320 }
59321 @@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, vo
59322 {
59323 struct kallsym_iter *iter = m->private;
59324
59325 +#ifdef CONFIG_GRKERNSEC_HIDESYM
59326 + if (current_uid())
59327 + return 0;
59328 +#endif
59329 +
59330 /* Some debugging symbols have no name. Ignore them. */
59331 if (!iter->name[0])
59332 return 0;
59333 @@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *i
59334 struct kallsym_iter *iter;
59335 int ret;
59336
59337 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
59338 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
59339 if (!iter)
59340 return -ENOMEM;
59341 reset_iter(iter, 0);
59342 diff -urNp linux-2.6.32.41/kernel/kgdb.c linux-2.6.32.41/kernel/kgdb.c
59343 --- linux-2.6.32.41/kernel/kgdb.c 2011-04-17 17:00:52.000000000 -0400
59344 +++ linux-2.6.32.41/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
59345 @@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
59346 /* Guard for recursive entry */
59347 static int exception_level;
59348
59349 -static struct kgdb_io *kgdb_io_ops;
59350 +static const struct kgdb_io *kgdb_io_ops;
59351 static DEFINE_SPINLOCK(kgdb_registration_lock);
59352
59353 /* kgdb console driver is loaded */
59354 @@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1)
59355 */
59356 static atomic_t passive_cpu_wait[NR_CPUS];
59357 static atomic_t cpu_in_kgdb[NR_CPUS];
59358 -atomic_t kgdb_setting_breakpoint;
59359 +atomic_unchecked_t kgdb_setting_breakpoint;
59360
59361 struct task_struct *kgdb_usethread;
59362 struct task_struct *kgdb_contthread;
59363 @@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBY
59364 sizeof(unsigned long)];
59365
59366 /* to keep track of the CPU which is doing the single stepping*/
59367 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
59368 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
59369
59370 /*
59371 * If you are debugging a problem where roundup (the collection of
59372 @@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
59373 return 0;
59374 if (kgdb_connected)
59375 return 1;
59376 - if (atomic_read(&kgdb_setting_breakpoint))
59377 + if (atomic_read_unchecked(&kgdb_setting_breakpoint))
59378 return 1;
59379 if (print_wait)
59380 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
59381 @@ -1426,8 +1426,8 @@ acquirelock:
59382 * instance of the exception handler wanted to come into the
59383 * debugger on a different CPU via a single step
59384 */
59385 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
59386 - atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
59387 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
59388 + atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
59389
59390 atomic_set(&kgdb_active, -1);
59391 touch_softlockup_watchdog();
59392 @@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void
59393 *
59394 * Register it with the KGDB core.
59395 */
59396 -int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
59397 +int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
59398 {
59399 int err;
59400
59401 @@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_modul
59402 *
59403 * Unregister it with the KGDB core.
59404 */
59405 -void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
59406 +void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
59407 {
59408 BUG_ON(kgdb_connected);
59409
59410 @@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_mod
59411 */
59412 void kgdb_breakpoint(void)
59413 {
59414 - atomic_set(&kgdb_setting_breakpoint, 1);
59415 + atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
59416 wmb(); /* Sync point before breakpoint */
59417 arch_kgdb_breakpoint();
59418 wmb(); /* Sync point after breakpoint */
59419 - atomic_set(&kgdb_setting_breakpoint, 0);
59420 + atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
59421 }
59422 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
59423
59424 diff -urNp linux-2.6.32.41/kernel/kmod.c linux-2.6.32.41/kernel/kmod.c
59425 --- linux-2.6.32.41/kernel/kmod.c 2011-03-27 14:31:47.000000000 -0400
59426 +++ linux-2.6.32.41/kernel/kmod.c 2011-04-17 15:56:46.000000000 -0400
59427 @@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
59428 * If module auto-loading support is disabled then this function
59429 * becomes a no-operation.
59430 */
59431 -int __request_module(bool wait, const char *fmt, ...)
59432 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
59433 {
59434 - va_list args;
59435 char module_name[MODULE_NAME_LEN];
59436 unsigned int max_modprobes;
59437 int ret;
59438 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
59439 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
59440 static char *envp[] = { "HOME=/",
59441 "TERM=linux",
59442 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
59443 @@ -84,12 +83,24 @@ int __request_module(bool wait, const ch
59444 if (ret)
59445 return ret;
59446
59447 - va_start(args, fmt);
59448 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
59449 - va_end(args);
59450 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
59451 if (ret >= MODULE_NAME_LEN)
59452 return -ENAMETOOLONG;
59453
59454 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
59455 + if (!current_uid()) {
59456 + /* hack to workaround consolekit/udisks stupidity */
59457 + read_lock(&tasklist_lock);
59458 + if (!strcmp(current->comm, "mount") &&
59459 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
59460 + read_unlock(&tasklist_lock);
59461 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
59462 + return -EPERM;
59463 + }
59464 + read_unlock(&tasklist_lock);
59465 + }
59466 +#endif
59467 +
59468 /* If modprobe needs a service that is in a module, we get a recursive
59469 * loop. Limit the number of running kmod threads to max_threads/2 or
59470 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
59471 @@ -121,6 +132,48 @@ int __request_module(bool wait, const ch
59472 atomic_dec(&kmod_concurrent);
59473 return ret;
59474 }
59475 +
59476 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
59477 +{
59478 + va_list args;
59479 + int ret;
59480 +
59481 + va_start(args, fmt);
59482 + ret = ____request_module(wait, module_param, fmt, args);
59483 + va_end(args);
59484 +
59485 + return ret;
59486 +}
59487 +
59488 +int __request_module(bool wait, const char *fmt, ...)
59489 +{
59490 + va_list args;
59491 + int ret;
59492 +
59493 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
59494 + if (current_uid()) {
59495 + char module_param[MODULE_NAME_LEN];
59496 +
59497 + memset(module_param, 0, sizeof(module_param));
59498 +
59499 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
59500 +
59501 + va_start(args, fmt);
59502 + ret = ____request_module(wait, module_param, fmt, args);
59503 + va_end(args);
59504 +
59505 + return ret;
59506 + }
59507 +#endif
59508 +
59509 + va_start(args, fmt);
59510 + ret = ____request_module(wait, NULL, fmt, args);
59511 + va_end(args);
59512 +
59513 + return ret;
59514 +}
59515 +
59516 +
59517 EXPORT_SYMBOL(__request_module);
59518 #endif /* CONFIG_MODULES */
59519
59520 diff -urNp linux-2.6.32.41/kernel/kprobes.c linux-2.6.32.41/kernel/kprobes.c
59521 --- linux-2.6.32.41/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
59522 +++ linux-2.6.32.41/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
59523 @@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_
59524 * kernel image and loaded module images reside. This is required
59525 * so x86_64 can correctly handle the %rip-relative fixups.
59526 */
59527 - kip->insns = module_alloc(PAGE_SIZE);
59528 + kip->insns = module_alloc_exec(PAGE_SIZE);
59529 if (!kip->insns) {
59530 kfree(kip);
59531 return NULL;
59532 @@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(st
59533 */
59534 if (!list_is_singular(&kprobe_insn_pages)) {
59535 list_del(&kip->list);
59536 - module_free(NULL, kip->insns);
59537 + module_free_exec(NULL, kip->insns);
59538 kfree(kip);
59539 }
59540 return 1;
59541 @@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
59542 {
59543 int i, err = 0;
59544 unsigned long offset = 0, size = 0;
59545 - char *modname, namebuf[128];
59546 + char *modname, namebuf[KSYM_NAME_LEN];
59547 const char *symbol_name;
59548 void *addr;
59549 struct kprobe_blackpoint *kb;
59550 @@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(st
59551 const char *sym = NULL;
59552 unsigned int i = *(loff_t *) v;
59553 unsigned long offset = 0;
59554 - char *modname, namebuf[128];
59555 + char *modname, namebuf[KSYM_NAME_LEN];
59556
59557 head = &kprobe_table[i];
59558 preempt_disable();
59559 diff -urNp linux-2.6.32.41/kernel/lockdep.c linux-2.6.32.41/kernel/lockdep.c
59560 --- linux-2.6.32.41/kernel/lockdep.c 2011-03-27 14:31:47.000000000 -0400
59561 +++ linux-2.6.32.41/kernel/lockdep.c 2011-04-17 15:56:46.000000000 -0400
59562 @@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_t
59563 /*
59564 * Various lockdep statistics:
59565 */
59566 -atomic_t chain_lookup_hits;
59567 -atomic_t chain_lookup_misses;
59568 -atomic_t hardirqs_on_events;
59569 -atomic_t hardirqs_off_events;
59570 -atomic_t redundant_hardirqs_on;
59571 -atomic_t redundant_hardirqs_off;
59572 -atomic_t softirqs_on_events;
59573 -atomic_t softirqs_off_events;
59574 -atomic_t redundant_softirqs_on;
59575 -atomic_t redundant_softirqs_off;
59576 -atomic_t nr_unused_locks;
59577 -atomic_t nr_cyclic_checks;
59578 -atomic_t nr_find_usage_forwards_checks;
59579 -atomic_t nr_find_usage_backwards_checks;
59580 +atomic_unchecked_t chain_lookup_hits;
59581 +atomic_unchecked_t chain_lookup_misses;
59582 +atomic_unchecked_t hardirqs_on_events;
59583 +atomic_unchecked_t hardirqs_off_events;
59584 +atomic_unchecked_t redundant_hardirqs_on;
59585 +atomic_unchecked_t redundant_hardirqs_off;
59586 +atomic_unchecked_t softirqs_on_events;
59587 +atomic_unchecked_t softirqs_off_events;
59588 +atomic_unchecked_t redundant_softirqs_on;
59589 +atomic_unchecked_t redundant_softirqs_off;
59590 +atomic_unchecked_t nr_unused_locks;
59591 +atomic_unchecked_t nr_cyclic_checks;
59592 +atomic_unchecked_t nr_find_usage_forwards_checks;
59593 +atomic_unchecked_t nr_find_usage_backwards_checks;
59594 #endif
59595
59596 /*
59597 @@ -577,6 +577,10 @@ static int static_obj(void *obj)
59598 int i;
59599 #endif
59600
59601 +#ifdef CONFIG_PAX_KERNEXEC
59602 + start = ktla_ktva(start);
59603 +#endif
59604 +
59605 /*
59606 * static variable?
59607 */
59608 @@ -592,8 +596,7 @@ static int static_obj(void *obj)
59609 */
59610 for_each_possible_cpu(i) {
59611 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
59612 - end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
59613 - + per_cpu_offset(i);
59614 + end = start + PERCPU_ENOUGH_ROOM;
59615
59616 if ((addr >= start) && (addr < end))
59617 return 1;
59618 @@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *
59619 if (!static_obj(lock->key)) {
59620 debug_locks_off();
59621 printk("INFO: trying to register non-static key.\n");
59622 + printk("lock:%pS key:%pS.\n", lock, lock->key);
59623 printk("the code is fine but needs lockdep annotation.\n");
59624 printk("turning off the locking correctness validator.\n");
59625 dump_stack();
59626 @@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep
59627 if (!class)
59628 return 0;
59629 }
59630 - debug_atomic_inc((atomic_t *)&class->ops);
59631 + debug_atomic_inc((atomic_unchecked_t *)&class->ops);
59632 if (very_verbose(class)) {
59633 printk("\nacquire class [%p] %s", class->key, class->name);
59634 if (class->name_version > 1)
59635 diff -urNp linux-2.6.32.41/kernel/lockdep_internals.h linux-2.6.32.41/kernel/lockdep_internals.h
59636 --- linux-2.6.32.41/kernel/lockdep_internals.h 2011-03-27 14:31:47.000000000 -0400
59637 +++ linux-2.6.32.41/kernel/lockdep_internals.h 2011-04-17 15:56:46.000000000 -0400
59638 @@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_
59639 /*
59640 * Various lockdep statistics:
59641 */
59642 -extern atomic_t chain_lookup_hits;
59643 -extern atomic_t chain_lookup_misses;
59644 -extern atomic_t hardirqs_on_events;
59645 -extern atomic_t hardirqs_off_events;
59646 -extern atomic_t redundant_hardirqs_on;
59647 -extern atomic_t redundant_hardirqs_off;
59648 -extern atomic_t softirqs_on_events;
59649 -extern atomic_t softirqs_off_events;
59650 -extern atomic_t redundant_softirqs_on;
59651 -extern atomic_t redundant_softirqs_off;
59652 -extern atomic_t nr_unused_locks;
59653 -extern atomic_t nr_cyclic_checks;
59654 -extern atomic_t nr_cyclic_check_recursions;
59655 -extern atomic_t nr_find_usage_forwards_checks;
59656 -extern atomic_t nr_find_usage_forwards_recursions;
59657 -extern atomic_t nr_find_usage_backwards_checks;
59658 -extern atomic_t nr_find_usage_backwards_recursions;
59659 -# define debug_atomic_inc(ptr) atomic_inc(ptr)
59660 -# define debug_atomic_dec(ptr) atomic_dec(ptr)
59661 -# define debug_atomic_read(ptr) atomic_read(ptr)
59662 +extern atomic_unchecked_t chain_lookup_hits;
59663 +extern atomic_unchecked_t chain_lookup_misses;
59664 +extern atomic_unchecked_t hardirqs_on_events;
59665 +extern atomic_unchecked_t hardirqs_off_events;
59666 +extern atomic_unchecked_t redundant_hardirqs_on;
59667 +extern atomic_unchecked_t redundant_hardirqs_off;
59668 +extern atomic_unchecked_t softirqs_on_events;
59669 +extern atomic_unchecked_t softirqs_off_events;
59670 +extern atomic_unchecked_t redundant_softirqs_on;
59671 +extern atomic_unchecked_t redundant_softirqs_off;
59672 +extern atomic_unchecked_t nr_unused_locks;
59673 +extern atomic_unchecked_t nr_cyclic_checks;
59674 +extern atomic_unchecked_t nr_cyclic_check_recursions;
59675 +extern atomic_unchecked_t nr_find_usage_forwards_checks;
59676 +extern atomic_unchecked_t nr_find_usage_forwards_recursions;
59677 +extern atomic_unchecked_t nr_find_usage_backwards_checks;
59678 +extern atomic_unchecked_t nr_find_usage_backwards_recursions;
59679 +# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
59680 +# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
59681 +# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
59682 #else
59683 # define debug_atomic_inc(ptr) do { } while (0)
59684 # define debug_atomic_dec(ptr) do { } while (0)
59685 diff -urNp linux-2.6.32.41/kernel/lockdep_proc.c linux-2.6.32.41/kernel/lockdep_proc.c
59686 --- linux-2.6.32.41/kernel/lockdep_proc.c 2011-03-27 14:31:47.000000000 -0400
59687 +++ linux-2.6.32.41/kernel/lockdep_proc.c 2011-04-17 15:56:46.000000000 -0400
59688 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
59689
59690 static void print_name(struct seq_file *m, struct lock_class *class)
59691 {
59692 - char str[128];
59693 + char str[KSYM_NAME_LEN];
59694 const char *name = class->name;
59695
59696 if (!name) {
59697 diff -urNp linux-2.6.32.41/kernel/module.c linux-2.6.32.41/kernel/module.c
59698 --- linux-2.6.32.41/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
59699 +++ linux-2.6.32.41/kernel/module.c 2011-04-29 18:52:40.000000000 -0400
59700 @@ -55,6 +55,7 @@
59701 #include <linux/async.h>
59702 #include <linux/percpu.h>
59703 #include <linux/kmemleak.h>
59704 +#include <linux/grsecurity.h>
59705
59706 #define CREATE_TRACE_POINTS
59707 #include <trace/events/module.h>
59708 @@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq
59709 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
59710
59711 /* Bounds of module allocation, for speeding __module_address */
59712 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
59713 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
59714 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
59715
59716 int register_module_notifier(struct notifier_block * nb)
59717 {
59718 @@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct
59719 return true;
59720
59721 list_for_each_entry_rcu(mod, &modules, list) {
59722 - struct symsearch arr[] = {
59723 + struct symsearch modarr[] = {
59724 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
59725 NOT_GPL_ONLY, false },
59726 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
59727 @@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct
59728 #endif
59729 };
59730
59731 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
59732 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
59733 return true;
59734 }
59735 return false;
59736 @@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned lo
59737 void *ptr;
59738 int cpu;
59739
59740 - if (align > PAGE_SIZE) {
59741 + if (align-1 >= PAGE_SIZE) {
59742 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
59743 name, align, PAGE_SIZE);
59744 align = PAGE_SIZE;
59745 @@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resol
59746 * /sys/module/foo/sections stuff
59747 * J. Corbet <corbet@lwn.net>
59748 */
59749 -#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
59750 +#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59751
59752 static inline bool sect_empty(const Elf_Shdr *sect)
59753 {
59754 @@ -1545,7 +1547,8 @@ static void free_module(struct module *m
59755 destroy_params(mod->kp, mod->num_kp);
59756
59757 /* This may be NULL, but that's OK */
59758 - module_free(mod, mod->module_init);
59759 + module_free(mod, mod->module_init_rw);
59760 + module_free_exec(mod, mod->module_init_rx);
59761 kfree(mod->args);
59762 if (mod->percpu)
59763 percpu_modfree(mod->percpu);
59764 @@ -1554,10 +1557,12 @@ static void free_module(struct module *m
59765 percpu_modfree(mod->refptr);
59766 #endif
59767 /* Free lock-classes: */
59768 - lockdep_free_key_range(mod->module_core, mod->core_size);
59769 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
59770 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
59771
59772 /* Finally, free the core (containing the module structure) */
59773 - module_free(mod, mod->module_core);
59774 + module_free_exec(mod, mod->module_core_rx);
59775 + module_free(mod, mod->module_core_rw);
59776
59777 #ifdef CONFIG_MPU
59778 update_protections(current->mm);
59779 @@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *se
59780 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
59781 int ret = 0;
59782 const struct kernel_symbol *ksym;
59783 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
59784 + int is_fs_load = 0;
59785 + int register_filesystem_found = 0;
59786 + char *p;
59787 +
59788 + p = strstr(mod->args, "grsec_modharden_fs");
59789 +
59790 + if (p) {
59791 + char *endptr = p + strlen("grsec_modharden_fs");
59792 + /* copy \0 as well */
59793 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
59794 + is_fs_load = 1;
59795 + }
59796 +#endif
59797 +
59798
59799 for (i = 1; i < n; i++) {
59800 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
59801 + const char *name = strtab + sym[i].st_name;
59802 +
59803 + /* it's a real shame this will never get ripped and copied
59804 + upstream! ;(
59805 + */
59806 + if (is_fs_load && !strcmp(name, "register_filesystem"))
59807 + register_filesystem_found = 1;
59808 +#endif
59809 switch (sym[i].st_shndx) {
59810 case SHN_COMMON:
59811 /* We compiled with -fno-common. These are not
59812 @@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *se
59813 strtab + sym[i].st_name, mod);
59814 /* Ok if resolved. */
59815 if (ksym) {
59816 + pax_open_kernel();
59817 sym[i].st_value = ksym->value;
59818 + pax_close_kernel();
59819 break;
59820 }
59821
59822 @@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *se
59823 secbase = (unsigned long)mod->percpu;
59824 else
59825 secbase = sechdrs[sym[i].st_shndx].sh_addr;
59826 + pax_open_kernel();
59827 sym[i].st_value += secbase;
59828 + pax_close_kernel();
59829 break;
59830 }
59831 }
59832
59833 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
59834 + if (is_fs_load && !register_filesystem_found) {
59835 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
59836 + ret = -EPERM;
59837 + }
59838 +#endif
59839 +
59840 return ret;
59841 }
59842
59843 @@ -1731,11 +1771,12 @@ static void layout_sections(struct modul
59844 || s->sh_entsize != ~0UL
59845 || strstarts(secstrings + s->sh_name, ".init"))
59846 continue;
59847 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
59848 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
59849 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
59850 + else
59851 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
59852 DEBUGP("\t%s\n", secstrings + s->sh_name);
59853 }
59854 - if (m == 0)
59855 - mod->core_text_size = mod->core_size;
59856 }
59857
59858 DEBUGP("Init section allocation order:\n");
59859 @@ -1748,12 +1789,13 @@ static void layout_sections(struct modul
59860 || s->sh_entsize != ~0UL
59861 || !strstarts(secstrings + s->sh_name, ".init"))
59862 continue;
59863 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
59864 - | INIT_OFFSET_MASK);
59865 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
59866 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
59867 + else
59868 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
59869 + s->sh_entsize |= INIT_OFFSET_MASK;
59870 DEBUGP("\t%s\n", secstrings + s->sh_name);
59871 }
59872 - if (m == 0)
59873 - mod->init_text_size = mod->init_size;
59874 }
59875 }
59876
59877 @@ -1857,9 +1899,8 @@ static int is_exported(const char *name,
59878
59879 /* As per nm */
59880 static char elf_type(const Elf_Sym *sym,
59881 - Elf_Shdr *sechdrs,
59882 - const char *secstrings,
59883 - struct module *mod)
59884 + const Elf_Shdr *sechdrs,
59885 + const char *secstrings)
59886 {
59887 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
59888 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
59889 @@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struc
59890
59891 /* Put symbol section at end of init part of module. */
59892 symsect->sh_flags |= SHF_ALLOC;
59893 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
59894 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
59895 symindex) | INIT_OFFSET_MASK;
59896 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
59897
59898 @@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struc
59899 }
59900
59901 /* Append room for core symbols at end of core part. */
59902 - symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
59903 - mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
59904 + symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
59905 + mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
59906
59907 /* Put string table section at end of init part of module. */
59908 strsect->sh_flags |= SHF_ALLOC;
59909 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
59910 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
59911 strindex) | INIT_OFFSET_MASK;
59912 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
59913
59914 /* Append room for core symbols' strings at end of core part. */
59915 - *pstroffs = mod->core_size;
59916 + *pstroffs = mod->core_size_rx;
59917 __set_bit(0, strmap);
59918 - mod->core_size += bitmap_weight(strmap, strsect->sh_size);
59919 + mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
59920
59921 return symoffs;
59922 }
59923 @@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *
59924 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
59925 mod->strtab = (void *)sechdrs[strindex].sh_addr;
59926
59927 + pax_open_kernel();
59928 +
59929 /* Set types up while we still have access to sections. */
59930 for (i = 0; i < mod->num_symtab; i++)
59931 mod->symtab[i].st_info
59932 - = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
59933 + = elf_type(&mod->symtab[i], sechdrs, secstrings);
59934
59935 - mod->core_symtab = dst = mod->module_core + symoffs;
59936 + mod->core_symtab = dst = mod->module_core_rx + symoffs;
59937 src = mod->symtab;
59938 *dst = *src;
59939 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
59940 @@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *
59941 }
59942 mod->core_num_syms = ndst;
59943
59944 - mod->core_strtab = s = mod->module_core + stroffs;
59945 + mod->core_strtab = s = mod->module_core_rx + stroffs;
59946 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
59947 if (test_bit(i, strmap))
59948 *++s = mod->strtab[i];
59949 +
59950 + pax_close_kernel();
59951 }
59952 #else
59953 static inline unsigned long layout_symtab(struct module *mod,
59954 @@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _
59955 #endif
59956 }
59957
59958 -static void *module_alloc_update_bounds(unsigned long size)
59959 +static void *module_alloc_update_bounds_rw(unsigned long size)
59960 {
59961 void *ret = module_alloc(size);
59962
59963 if (ret) {
59964 /* Update module bounds. */
59965 - if ((unsigned long)ret < module_addr_min)
59966 - module_addr_min = (unsigned long)ret;
59967 - if ((unsigned long)ret + size > module_addr_max)
59968 - module_addr_max = (unsigned long)ret + size;
59969 + if ((unsigned long)ret < module_addr_min_rw)
59970 + module_addr_min_rw = (unsigned long)ret;
59971 + if ((unsigned long)ret + size > module_addr_max_rw)
59972 + module_addr_max_rw = (unsigned long)ret + size;
59973 + }
59974 + return ret;
59975 +}
59976 +
59977 +static void *module_alloc_update_bounds_rx(unsigned long size)
59978 +{
59979 + void *ret = module_alloc_exec(size);
59980 +
59981 + if (ret) {
59982 + /* Update module bounds. */
59983 + if ((unsigned long)ret < module_addr_min_rx)
59984 + module_addr_min_rx = (unsigned long)ret;
59985 + if ((unsigned long)ret + size > module_addr_max_rx)
59986 + module_addr_max_rx = (unsigned long)ret + size;
59987 }
59988 return ret;
59989 }
59990 @@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct
59991 unsigned int i;
59992
59993 /* only scan the sections containing data */
59994 - kmemleak_scan_area(mod->module_core, (unsigned long)mod -
59995 - (unsigned long)mod->module_core,
59996 + kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
59997 + (unsigned long)mod->module_core_rw,
59998 sizeof(struct module), GFP_KERNEL);
59999
60000 for (i = 1; i < hdr->e_shnum; i++) {
60001 @@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct
60002 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
60003 continue;
60004
60005 - kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
60006 - (unsigned long)mod->module_core,
60007 + kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
60008 + (unsigned long)mod->module_core_rw,
60009 sechdrs[i].sh_size, GFP_KERNEL);
60010 }
60011 }
60012 @@ -2263,7 +2322,7 @@ static noinline struct module *load_modu
60013 secstrings, &stroffs, strmap);
60014
60015 /* Do the allocs. */
60016 - ptr = module_alloc_update_bounds(mod->core_size);
60017 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
60018 /*
60019 * The pointer to this block is stored in the module structure
60020 * which is inside the block. Just mark it as not being a
60021 @@ -2274,23 +2333,47 @@ static noinline struct module *load_modu
60022 err = -ENOMEM;
60023 goto free_percpu;
60024 }
60025 - memset(ptr, 0, mod->core_size);
60026 - mod->module_core = ptr;
60027 + memset(ptr, 0, mod->core_size_rw);
60028 + mod->module_core_rw = ptr;
60029
60030 - ptr = module_alloc_update_bounds(mod->init_size);
60031 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
60032 /*
60033 * The pointer to this block is stored in the module structure
60034 * which is inside the block. This block doesn't need to be
60035 * scanned as it contains data and code that will be freed
60036 * after the module is initialized.
60037 */
60038 - kmemleak_ignore(ptr);
60039 - if (!ptr && mod->init_size) {
60040 + kmemleak_not_leak(ptr);
60041 + if (!ptr && mod->init_size_rw) {
60042 + err = -ENOMEM;
60043 + goto free_core_rw;
60044 + }
60045 + memset(ptr, 0, mod->init_size_rw);
60046 + mod->module_init_rw = ptr;
60047 +
60048 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
60049 + kmemleak_not_leak(ptr);
60050 + if (!ptr) {
60051 err = -ENOMEM;
60052 - goto free_core;
60053 + goto free_init_rw;
60054 }
60055 - memset(ptr, 0, mod->init_size);
60056 - mod->module_init = ptr;
60057 +
60058 + pax_open_kernel();
60059 + memset(ptr, 0, mod->core_size_rx);
60060 + pax_close_kernel();
60061 + mod->module_core_rx = ptr;
60062 +
60063 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
60064 + kmemleak_not_leak(ptr);
60065 + if (!ptr && mod->init_size_rx) {
60066 + err = -ENOMEM;
60067 + goto free_core_rx;
60068 + }
60069 +
60070 + pax_open_kernel();
60071 + memset(ptr, 0, mod->init_size_rx);
60072 + pax_close_kernel();
60073 + mod->module_init_rx = ptr;
60074
60075 /* Transfer each section which specifies SHF_ALLOC */
60076 DEBUGP("final section addresses:\n");
60077 @@ -2300,17 +2383,45 @@ static noinline struct module *load_modu
60078 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
60079 continue;
60080
60081 - if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
60082 - dest = mod->module_init
60083 - + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
60084 - else
60085 - dest = mod->module_core + sechdrs[i].sh_entsize;
60086 + if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
60087 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
60088 + dest = mod->module_init_rw
60089 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
60090 + else
60091 + dest = mod->module_init_rx
60092 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
60093 + } else {
60094 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
60095 + dest = mod->module_core_rw + sechdrs[i].sh_entsize;
60096 + else
60097 + dest = mod->module_core_rx + sechdrs[i].sh_entsize;
60098 + }
60099 +
60100 + if (sechdrs[i].sh_type != SHT_NOBITS) {
60101
60102 - if (sechdrs[i].sh_type != SHT_NOBITS)
60103 - memcpy(dest, (void *)sechdrs[i].sh_addr,
60104 - sechdrs[i].sh_size);
60105 +#ifdef CONFIG_PAX_KERNEXEC
60106 +#ifdef CONFIG_X86_64
60107 + if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
60108 + set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
60109 +#endif
60110 + if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
60111 + pax_open_kernel();
60112 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
60113 + pax_close_kernel();
60114 + } else
60115 +#endif
60116 +
60117 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
60118 + }
60119 /* Update sh_addr to point to copy in image. */
60120 - sechdrs[i].sh_addr = (unsigned long)dest;
60121 +
60122 +#ifdef CONFIG_PAX_KERNEXEC
60123 + if (sechdrs[i].sh_flags & SHF_EXECINSTR)
60124 + sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
60125 + else
60126 +#endif
60127 +
60128 + sechdrs[i].sh_addr = (unsigned long)dest;
60129 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
60130 }
60131 /* Module has been moved. */
60132 @@ -2322,7 +2433,7 @@ static noinline struct module *load_modu
60133 mod->name);
60134 if (!mod->refptr) {
60135 err = -ENOMEM;
60136 - goto free_init;
60137 + goto free_init_rx;
60138 }
60139 #endif
60140 /* Now we've moved module, initialize linked lists, etc. */
60141 @@ -2351,6 +2462,31 @@ static noinline struct module *load_modu
60142 /* Set up MODINFO_ATTR fields */
60143 setup_modinfo(mod, sechdrs, infoindex);
60144
60145 + mod->args = args;
60146 +
60147 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
60148 + {
60149 + char *p, *p2;
60150 +
60151 + if (strstr(mod->args, "grsec_modharden_netdev")) {
60152 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
60153 + err = -EPERM;
60154 + goto cleanup;
60155 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
60156 + p += strlen("grsec_modharden_normal");
60157 + p2 = strstr(p, "_");
60158 + if (p2) {
60159 + *p2 = '\0';
60160 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
60161 + *p2 = '_';
60162 + }
60163 + err = -EPERM;
60164 + goto cleanup;
60165 + }
60166 + }
60167 +#endif
60168 +
60169 +
60170 /* Fix up syms, so that st_value is a pointer to location. */
60171 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
60172 mod);
60173 @@ -2431,8 +2567,8 @@ static noinline struct module *load_modu
60174
60175 /* Now do relocations. */
60176 for (i = 1; i < hdr->e_shnum; i++) {
60177 - const char *strtab = (char *)sechdrs[strindex].sh_addr;
60178 unsigned int info = sechdrs[i].sh_info;
60179 + strtab = (char *)sechdrs[strindex].sh_addr;
60180
60181 /* Not a valid relocation section? */
60182 if (info >= hdr->e_shnum)
60183 @@ -2493,16 +2629,15 @@ static noinline struct module *load_modu
60184 * Do it before processing of module parameters, so the module
60185 * can provide parameter accessor functions of its own.
60186 */
60187 - if (mod->module_init)
60188 - flush_icache_range((unsigned long)mod->module_init,
60189 - (unsigned long)mod->module_init
60190 - + mod->init_size);
60191 - flush_icache_range((unsigned long)mod->module_core,
60192 - (unsigned long)mod->module_core + mod->core_size);
60193 + if (mod->module_init_rx)
60194 + flush_icache_range((unsigned long)mod->module_init_rx,
60195 + (unsigned long)mod->module_init_rx
60196 + + mod->init_size_rx);
60197 + flush_icache_range((unsigned long)mod->module_core_rx,
60198 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
60199
60200 set_fs(old_fs);
60201
60202 - mod->args = args;
60203 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
60204 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
60205 mod->name);
60206 @@ -2546,12 +2681,16 @@ static noinline struct module *load_modu
60207 free_unload:
60208 module_unload_free(mod);
60209 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
60210 + free_init_rx:
60211 percpu_modfree(mod->refptr);
60212 - free_init:
60213 #endif
60214 - module_free(mod, mod->module_init);
60215 - free_core:
60216 - module_free(mod, mod->module_core);
60217 + module_free_exec(mod, mod->module_init_rx);
60218 + free_core_rx:
60219 + module_free_exec(mod, mod->module_core_rx);
60220 + free_init_rw:
60221 + module_free(mod, mod->module_init_rw);
60222 + free_core_rw:
60223 + module_free(mod, mod->module_core_rw);
60224 /* mod will be freed with core. Don't access it beyond this line! */
60225 free_percpu:
60226 if (percpu)
60227 @@ -2653,10 +2792,12 @@ SYSCALL_DEFINE3(init_module, void __user
60228 mod->symtab = mod->core_symtab;
60229 mod->strtab = mod->core_strtab;
60230 #endif
60231 - module_free(mod, mod->module_init);
60232 - mod->module_init = NULL;
60233 - mod->init_size = 0;
60234 - mod->init_text_size = 0;
60235 + module_free(mod, mod->module_init_rw);
60236 + module_free_exec(mod, mod->module_init_rx);
60237 + mod->module_init_rw = NULL;
60238 + mod->module_init_rx = NULL;
60239 + mod->init_size_rw = 0;
60240 + mod->init_size_rx = 0;
60241 mutex_unlock(&module_mutex);
60242
60243 return 0;
60244 @@ -2687,10 +2828,16 @@ static const char *get_ksymbol(struct mo
60245 unsigned long nextval;
60246
60247 /* At worse, next value is at end of module */
60248 - if (within_module_init(addr, mod))
60249 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
60250 + if (within_module_init_rx(addr, mod))
60251 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
60252 + else if (within_module_init_rw(addr, mod))
60253 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
60254 + else if (within_module_core_rx(addr, mod))
60255 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
60256 + else if (within_module_core_rw(addr, mod))
60257 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
60258 else
60259 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
60260 + return NULL;
60261
60262 /* Scan for closest preceeding symbol, and next symbol. (ELF
60263 starts real symbols at 1). */
60264 @@ -2936,7 +3083,7 @@ static int m_show(struct seq_file *m, vo
60265 char buf[8];
60266
60267 seq_printf(m, "%s %u",
60268 - mod->name, mod->init_size + mod->core_size);
60269 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
60270 print_unload_info(m, mod);
60271
60272 /* Informative for users. */
60273 @@ -2945,7 +3092,7 @@ static int m_show(struct seq_file *m, vo
60274 mod->state == MODULE_STATE_COMING ? "Loading":
60275 "Live");
60276 /* Used by oprofile and other similar tools. */
60277 - seq_printf(m, " 0x%p", mod->module_core);
60278 + seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
60279
60280 /* Taints info */
60281 if (mod->taints)
60282 @@ -2981,7 +3128,17 @@ static const struct file_operations proc
60283
60284 static int __init proc_modules_init(void)
60285 {
60286 +#ifndef CONFIG_GRKERNSEC_HIDESYM
60287 +#ifdef CONFIG_GRKERNSEC_PROC_USER
60288 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
60289 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60290 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
60291 +#else
60292 proc_create("modules", 0, NULL, &proc_modules_operations);
60293 +#endif
60294 +#else
60295 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
60296 +#endif
60297 return 0;
60298 }
60299 module_init(proc_modules_init);
60300 @@ -3040,12 +3197,12 @@ struct module *__module_address(unsigned
60301 {
60302 struct module *mod;
60303
60304 - if (addr < module_addr_min || addr > module_addr_max)
60305 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
60306 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
60307 return NULL;
60308
60309 list_for_each_entry_rcu(mod, &modules, list)
60310 - if (within_module_core(addr, mod)
60311 - || within_module_init(addr, mod))
60312 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
60313 return mod;
60314 return NULL;
60315 }
60316 @@ -3079,11 +3236,20 @@ bool is_module_text_address(unsigned lon
60317 */
60318 struct module *__module_text_address(unsigned long addr)
60319 {
60320 - struct module *mod = __module_address(addr);
60321 + struct module *mod;
60322 +
60323 +#ifdef CONFIG_X86_32
60324 + addr = ktla_ktva(addr);
60325 +#endif
60326 +
60327 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
60328 + return NULL;
60329 +
60330 + mod = __module_address(addr);
60331 +
60332 if (mod) {
60333 /* Make sure it's within the text section. */
60334 - if (!within(addr, mod->module_init, mod->init_text_size)
60335 - && !within(addr, mod->module_core, mod->core_text_size))
60336 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
60337 mod = NULL;
60338 }
60339 return mod;
60340 diff -urNp linux-2.6.32.41/kernel/mutex.c linux-2.6.32.41/kernel/mutex.c
60341 --- linux-2.6.32.41/kernel/mutex.c 2011-03-27 14:31:47.000000000 -0400
60342 +++ linux-2.6.32.41/kernel/mutex.c 2011-04-17 15:56:46.000000000 -0400
60343 @@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock,
60344 */
60345
60346 for (;;) {
60347 - struct thread_info *owner;
60348 + struct task_struct *owner;
60349
60350 /*
60351 * If we own the BKL, then don't spin. The owner of
60352 @@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock,
60353 spin_lock_mutex(&lock->wait_lock, flags);
60354
60355 debug_mutex_lock_common(lock, &waiter);
60356 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
60357 + debug_mutex_add_waiter(lock, &waiter, task);
60358
60359 /* add waiting tasks to the end of the waitqueue (FIFO): */
60360 list_add_tail(&waiter.list, &lock->wait_list);
60361 @@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock,
60362 * TASK_UNINTERRUPTIBLE case.)
60363 */
60364 if (unlikely(signal_pending_state(state, task))) {
60365 - mutex_remove_waiter(lock, &waiter,
60366 - task_thread_info(task));
60367 + mutex_remove_waiter(lock, &waiter, task);
60368 mutex_release(&lock->dep_map, 1, ip);
60369 spin_unlock_mutex(&lock->wait_lock, flags);
60370
60371 @@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock,
60372 done:
60373 lock_acquired(&lock->dep_map, ip);
60374 /* got the lock - rejoice! */
60375 - mutex_remove_waiter(lock, &waiter, current_thread_info());
60376 + mutex_remove_waiter(lock, &waiter, task);
60377 mutex_set_owner(lock);
60378
60379 /* set it to 0 if there are no waiters left: */
60380 diff -urNp linux-2.6.32.41/kernel/mutex-debug.c linux-2.6.32.41/kernel/mutex-debug.c
60381 --- linux-2.6.32.41/kernel/mutex-debug.c 2011-03-27 14:31:47.000000000 -0400
60382 +++ linux-2.6.32.41/kernel/mutex-debug.c 2011-04-17 15:56:46.000000000 -0400
60383 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
60384 }
60385
60386 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
60387 - struct thread_info *ti)
60388 + struct task_struct *task)
60389 {
60390 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
60391
60392 /* Mark the current thread as blocked on the lock: */
60393 - ti->task->blocked_on = waiter;
60394 + task->blocked_on = waiter;
60395 }
60396
60397 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
60398 - struct thread_info *ti)
60399 + struct task_struct *task)
60400 {
60401 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
60402 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
60403 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
60404 - ti->task->blocked_on = NULL;
60405 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
60406 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
60407 + task->blocked_on = NULL;
60408
60409 list_del_init(&waiter->list);
60410 waiter->task = NULL;
60411 @@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lo
60412 return;
60413
60414 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
60415 - DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
60416 + DEBUG_LOCKS_WARN_ON(lock->owner != current);
60417 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
60418 mutex_clear_owner(lock);
60419 }
60420 diff -urNp linux-2.6.32.41/kernel/mutex-debug.h linux-2.6.32.41/kernel/mutex-debug.h
60421 --- linux-2.6.32.41/kernel/mutex-debug.h 2011-03-27 14:31:47.000000000 -0400
60422 +++ linux-2.6.32.41/kernel/mutex-debug.h 2011-04-17 15:56:46.000000000 -0400
60423 @@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(stru
60424 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
60425 extern void debug_mutex_add_waiter(struct mutex *lock,
60426 struct mutex_waiter *waiter,
60427 - struct thread_info *ti);
60428 + struct task_struct *task);
60429 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
60430 - struct thread_info *ti);
60431 + struct task_struct *task);
60432 extern void debug_mutex_unlock(struct mutex *lock);
60433 extern void debug_mutex_init(struct mutex *lock, const char *name,
60434 struct lock_class_key *key);
60435
60436 static inline void mutex_set_owner(struct mutex *lock)
60437 {
60438 - lock->owner = current_thread_info();
60439 + lock->owner = current;
60440 }
60441
60442 static inline void mutex_clear_owner(struct mutex *lock)
60443 diff -urNp linux-2.6.32.41/kernel/mutex.h linux-2.6.32.41/kernel/mutex.h
60444 --- linux-2.6.32.41/kernel/mutex.h 2011-03-27 14:31:47.000000000 -0400
60445 +++ linux-2.6.32.41/kernel/mutex.h 2011-04-17 15:56:46.000000000 -0400
60446 @@ -19,7 +19,7 @@
60447 #ifdef CONFIG_SMP
60448 static inline void mutex_set_owner(struct mutex *lock)
60449 {
60450 - lock->owner = current_thread_info();
60451 + lock->owner = current;
60452 }
60453
60454 static inline void mutex_clear_owner(struct mutex *lock)
60455 diff -urNp linux-2.6.32.41/kernel/panic.c linux-2.6.32.41/kernel/panic.c
60456 --- linux-2.6.32.41/kernel/panic.c 2011-03-27 14:31:47.000000000 -0400
60457 +++ linux-2.6.32.41/kernel/panic.c 2011-04-17 15:56:46.000000000 -0400
60458 @@ -352,7 +352,7 @@ static void warn_slowpath_common(const c
60459 const char *board;
60460
60461 printk(KERN_WARNING "------------[ cut here ]------------\n");
60462 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
60463 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
60464 board = dmi_get_system_info(DMI_PRODUCT_NAME);
60465 if (board)
60466 printk(KERN_WARNING "Hardware name: %s\n", board);
60467 @@ -392,7 +392,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
60468 */
60469 void __stack_chk_fail(void)
60470 {
60471 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
60472 + dump_stack();
60473 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
60474 __builtin_return_address(0));
60475 }
60476 EXPORT_SYMBOL(__stack_chk_fail);
60477 diff -urNp linux-2.6.32.41/kernel/params.c linux-2.6.32.41/kernel/params.c
60478 --- linux-2.6.32.41/kernel/params.c 2011-03-27 14:31:47.000000000 -0400
60479 +++ linux-2.6.32.41/kernel/params.c 2011-04-17 15:56:46.000000000 -0400
60480 @@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct
60481 return ret;
60482 }
60483
60484 -static struct sysfs_ops module_sysfs_ops = {
60485 +static const struct sysfs_ops module_sysfs_ops = {
60486 .show = module_attr_show,
60487 .store = module_attr_store,
60488 };
60489 @@ -739,7 +739,7 @@ static int uevent_filter(struct kset *ks
60490 return 0;
60491 }
60492
60493 -static struct kset_uevent_ops module_uevent_ops = {
60494 +static const struct kset_uevent_ops module_uevent_ops = {
60495 .filter = uevent_filter,
60496 };
60497
60498 diff -urNp linux-2.6.32.41/kernel/perf_event.c linux-2.6.32.41/kernel/perf_event.c
60499 --- linux-2.6.32.41/kernel/perf_event.c 2011-04-17 17:00:52.000000000 -0400
60500 +++ linux-2.6.32.41/kernel/perf_event.c 2011-05-04 17:56:28.000000000 -0400
60501 @@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostl
60502 */
60503 int sysctl_perf_event_sample_rate __read_mostly = 100000;
60504
60505 -static atomic64_t perf_event_id;
60506 +static atomic64_unchecked_t perf_event_id;
60507
60508 /*
60509 * Lock for (sysadmin-configurable) event reservations:
60510 @@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struc
60511 * In order to keep per-task stats reliable we need to flip the event
60512 * values when we flip the contexts.
60513 */
60514 - value = atomic64_read(&next_event->count);
60515 - value = atomic64_xchg(&event->count, value);
60516 - atomic64_set(&next_event->count, value);
60517 + value = atomic64_read_unchecked(&next_event->count);
60518 + value = atomic64_xchg_unchecked(&event->count, value);
60519 + atomic64_set_unchecked(&next_event->count, value);
60520
60521 swap(event->total_time_enabled, next_event->total_time_enabled);
60522 swap(event->total_time_running, next_event->total_time_running);
60523 @@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_e
60524 update_event_times(event);
60525 }
60526
60527 - return atomic64_read(&event->count);
60528 + return atomic64_read_unchecked(&event->count);
60529 }
60530
60531 /*
60532 @@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct
60533 values[n++] = 1 + leader->nr_siblings;
60534 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
60535 values[n++] = leader->total_time_enabled +
60536 - atomic64_read(&leader->child_total_time_enabled);
60537 + atomic64_read_unchecked(&leader->child_total_time_enabled);
60538 }
60539 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
60540 values[n++] = leader->total_time_running +
60541 - atomic64_read(&leader->child_total_time_running);
60542 + atomic64_read_unchecked(&leader->child_total_time_running);
60543 }
60544
60545 size = n * sizeof(u64);
60546 @@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct pe
60547 values[n++] = perf_event_read_value(event);
60548 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
60549 values[n++] = event->total_time_enabled +
60550 - atomic64_read(&event->child_total_time_enabled);
60551 + atomic64_read_unchecked(&event->child_total_time_enabled);
60552 }
60553 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
60554 values[n++] = event->total_time_running +
60555 - atomic64_read(&event->child_total_time_running);
60556 + atomic64_read_unchecked(&event->child_total_time_running);
60557 }
60558 if (read_format & PERF_FORMAT_ID)
60559 values[n++] = primary_event_id(event);
60560 @@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct fil
60561 static void perf_event_reset(struct perf_event *event)
60562 {
60563 (void)perf_event_read(event);
60564 - atomic64_set(&event->count, 0);
60565 + atomic64_set_unchecked(&event->count, 0);
60566 perf_event_update_userpage(event);
60567 }
60568
60569 @@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct p
60570 ++userpg->lock;
60571 barrier();
60572 userpg->index = perf_event_index(event);
60573 - userpg->offset = atomic64_read(&event->count);
60574 + userpg->offset = atomic64_read_unchecked(&event->count);
60575 if (event->state == PERF_EVENT_STATE_ACTIVE)
60576 - userpg->offset -= atomic64_read(&event->hw.prev_count);
60577 + userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
60578
60579 userpg->time_enabled = event->total_time_enabled +
60580 - atomic64_read(&event->child_total_time_enabled);
60581 + atomic64_read_unchecked(&event->child_total_time_enabled);
60582
60583 userpg->time_running = event->total_time_running +
60584 - atomic64_read(&event->child_total_time_running);
60585 + atomic64_read_unchecked(&event->child_total_time_running);
60586
60587 barrier();
60588 ++userpg->lock;
60589 @@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct
60590 u64 values[4];
60591 int n = 0;
60592
60593 - values[n++] = atomic64_read(&event->count);
60594 + values[n++] = atomic64_read_unchecked(&event->count);
60595 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
60596 values[n++] = event->total_time_enabled +
60597 - atomic64_read(&event->child_total_time_enabled);
60598 + atomic64_read_unchecked(&event->child_total_time_enabled);
60599 }
60600 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
60601 values[n++] = event->total_time_running +
60602 - atomic64_read(&event->child_total_time_running);
60603 + atomic64_read_unchecked(&event->child_total_time_running);
60604 }
60605 if (read_format & PERF_FORMAT_ID)
60606 values[n++] = primary_event_id(event);
60607 @@ -2940,7 +2940,7 @@ static void perf_output_read_group(struc
60608 if (leader != event)
60609 leader->pmu->read(leader);
60610
60611 - values[n++] = atomic64_read(&leader->count);
60612 + values[n++] = atomic64_read_unchecked(&leader->count);
60613 if (read_format & PERF_FORMAT_ID)
60614 values[n++] = primary_event_id(leader);
60615
60616 @@ -2952,7 +2952,7 @@ static void perf_output_read_group(struc
60617 if (sub != event)
60618 sub->pmu->read(sub);
60619
60620 - values[n++] = atomic64_read(&sub->count);
60621 + values[n++] = atomic64_read_unchecked(&sub->count);
60622 if (read_format & PERF_FORMAT_ID)
60623 values[n++] = primary_event_id(sub);
60624
60625 @@ -3787,7 +3787,7 @@ static void perf_swevent_add(struct perf
60626 {
60627 struct hw_perf_event *hwc = &event->hw;
60628
60629 - atomic64_add(nr, &event->count);
60630 + atomic64_add_unchecked(nr, &event->count);
60631
60632 if (!hwc->sample_period)
60633 return;
60634 @@ -4044,9 +4044,9 @@ static void cpu_clock_perf_event_update(
60635 u64 now;
60636
60637 now = cpu_clock(cpu);
60638 - prev = atomic64_read(&event->hw.prev_count);
60639 - atomic64_set(&event->hw.prev_count, now);
60640 - atomic64_add(now - prev, &event->count);
60641 + prev = atomic64_read_unchecked(&event->hw.prev_count);
60642 + atomic64_set_unchecked(&event->hw.prev_count, now);
60643 + atomic64_add_unchecked(now - prev, &event->count);
60644 }
60645
60646 static int cpu_clock_perf_event_enable(struct perf_event *event)
60647 @@ -4054,7 +4054,7 @@ static int cpu_clock_perf_event_enable(s
60648 struct hw_perf_event *hwc = &event->hw;
60649 int cpu = raw_smp_processor_id();
60650
60651 - atomic64_set(&hwc->prev_count, cpu_clock(cpu));
60652 + atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
60653 perf_swevent_start_hrtimer(event);
60654
60655 return 0;
60656 @@ -4086,9 +4086,9 @@ static void task_clock_perf_event_update
60657 u64 prev;
60658 s64 delta;
60659
60660 - prev = atomic64_xchg(&event->hw.prev_count, now);
60661 + prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
60662 delta = now - prev;
60663 - atomic64_add(delta, &event->count);
60664 + atomic64_add_unchecked(delta, &event->count);
60665 }
60666
60667 static int task_clock_perf_event_enable(struct perf_event *event)
60668 @@ -4098,7 +4098,7 @@ static int task_clock_perf_event_enable(
60669
60670 now = event->ctx->time;
60671
60672 - atomic64_set(&hwc->prev_count, now);
60673 + atomic64_set_unchecked(&hwc->prev_count, now);
60674
60675 perf_swevent_start_hrtimer(event);
60676
60677 @@ -4293,7 +4293,7 @@ perf_event_alloc(struct perf_event_attr
60678 event->parent = parent_event;
60679
60680 event->ns = get_pid_ns(current->nsproxy->pid_ns);
60681 - event->id = atomic64_inc_return(&perf_event_id);
60682 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
60683
60684 event->state = PERF_EVENT_STATE_INACTIVE;
60685
60686 @@ -4724,15 +4724,15 @@ static void sync_child_event(struct perf
60687 if (child_event->attr.inherit_stat)
60688 perf_event_read_event(child_event, child);
60689
60690 - child_val = atomic64_read(&child_event->count);
60691 + child_val = atomic64_read_unchecked(&child_event->count);
60692
60693 /*
60694 * Add back the child's count to the parent's count:
60695 */
60696 - atomic64_add(child_val, &parent_event->count);
60697 - atomic64_add(child_event->total_time_enabled,
60698 + atomic64_add_unchecked(child_val, &parent_event->count);
60699 + atomic64_add_unchecked(child_event->total_time_enabled,
60700 &parent_event->child_total_time_enabled);
60701 - atomic64_add(child_event->total_time_running,
60702 + atomic64_add_unchecked(child_event->total_time_running,
60703 &parent_event->child_total_time_running);
60704
60705 /*
60706 diff -urNp linux-2.6.32.41/kernel/pid.c linux-2.6.32.41/kernel/pid.c
60707 --- linux-2.6.32.41/kernel/pid.c 2011-04-22 19:16:29.000000000 -0400
60708 +++ linux-2.6.32.41/kernel/pid.c 2011-04-18 19:22:38.000000000 -0400
60709 @@ -33,6 +33,7 @@
60710 #include <linux/rculist.h>
60711 #include <linux/bootmem.h>
60712 #include <linux/hash.h>
60713 +#include <linux/security.h>
60714 #include <linux/pid_namespace.h>
60715 #include <linux/init_task.h>
60716 #include <linux/syscalls.h>
60717 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
60718
60719 int pid_max = PID_MAX_DEFAULT;
60720
60721 -#define RESERVED_PIDS 300
60722 +#define RESERVED_PIDS 500
60723
60724 int pid_max_min = RESERVED_PIDS + 1;
60725 int pid_max_max = PID_MAX_LIMIT;
60726 @@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
60727 */
60728 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
60729 {
60730 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
60731 + struct task_struct *task;
60732 +
60733 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
60734 +
60735 + if (gr_pid_is_chrooted(task))
60736 + return NULL;
60737 +
60738 + return task;
60739 }
60740
60741 struct task_struct *find_task_by_vpid(pid_t vnr)
60742 diff -urNp linux-2.6.32.41/kernel/posix-cpu-timers.c linux-2.6.32.41/kernel/posix-cpu-timers.c
60743 --- linux-2.6.32.41/kernel/posix-cpu-timers.c 2011-03-27 14:31:47.000000000 -0400
60744 +++ linux-2.6.32.41/kernel/posix-cpu-timers.c 2011-04-17 15:56:46.000000000 -0400
60745 @@ -6,6 +6,7 @@
60746 #include <linux/posix-timers.h>
60747 #include <linux/errno.h>
60748 #include <linux/math64.h>
60749 +#include <linux/security.h>
60750 #include <asm/uaccess.h>
60751 #include <linux/kernel_stat.h>
60752 #include <trace/events/timer.h>
60753 diff -urNp linux-2.6.32.41/kernel/posix-timers.c linux-2.6.32.41/kernel/posix-timers.c
60754 --- linux-2.6.32.41/kernel/posix-timers.c 2011-03-27 14:31:47.000000000 -0400
60755 +++ linux-2.6.32.41/kernel/posix-timers.c 2011-05-16 21:46:57.000000000 -0400
60756 @@ -42,6 +42,7 @@
60757 #include <linux/compiler.h>
60758 #include <linux/idr.h>
60759 #include <linux/posix-timers.h>
60760 +#include <linux/grsecurity.h>
60761 #include <linux/syscalls.h>
60762 #include <linux/wait.h>
60763 #include <linux/workqueue.h>
60764 @@ -296,6 +297,8 @@ static __init int init_posix_timers(void
60765 .nsleep = no_nsleep,
60766 };
60767
60768 + pax_track_stack();
60769 +
60770 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
60771 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
60772 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
60773 @@ -948,6 +951,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
60774 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
60775 return -EFAULT;
60776
60777 + /* only the CLOCK_REALTIME clock can be set, all other clocks
60778 + have their clock_set fptr set to a nosettime dummy function
60779 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
60780 + call common_clock_set, which calls do_sys_settimeofday, which
60781 + we hook
60782 + */
60783 +
60784 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
60785 }
60786
60787 diff -urNp linux-2.6.32.41/kernel/power/hibernate.c linux-2.6.32.41/kernel/power/hibernate.c
60788 --- linux-2.6.32.41/kernel/power/hibernate.c 2011-03-27 14:31:47.000000000 -0400
60789 +++ linux-2.6.32.41/kernel/power/hibernate.c 2011-04-17 15:56:46.000000000 -0400
60790 @@ -48,14 +48,14 @@ enum {
60791
60792 static int hibernation_mode = HIBERNATION_SHUTDOWN;
60793
60794 -static struct platform_hibernation_ops *hibernation_ops;
60795 +static const struct platform_hibernation_ops *hibernation_ops;
60796
60797 /**
60798 * hibernation_set_ops - set the global hibernate operations
60799 * @ops: the hibernation operations to use in subsequent hibernation transitions
60800 */
60801
60802 -void hibernation_set_ops(struct platform_hibernation_ops *ops)
60803 +void hibernation_set_ops(const struct platform_hibernation_ops *ops)
60804 {
60805 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
60806 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
60807 diff -urNp linux-2.6.32.41/kernel/power/poweroff.c linux-2.6.32.41/kernel/power/poweroff.c
60808 --- linux-2.6.32.41/kernel/power/poweroff.c 2011-03-27 14:31:47.000000000 -0400
60809 +++ linux-2.6.32.41/kernel/power/poweroff.c 2011-04-17 15:56:46.000000000 -0400
60810 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
60811 .enable_mask = SYSRQ_ENABLE_BOOT,
60812 };
60813
60814 -static int pm_sysrq_init(void)
60815 +static int __init pm_sysrq_init(void)
60816 {
60817 register_sysrq_key('o', &sysrq_poweroff_op);
60818 return 0;
60819 diff -urNp linux-2.6.32.41/kernel/power/process.c linux-2.6.32.41/kernel/power/process.c
60820 --- linux-2.6.32.41/kernel/power/process.c 2011-03-27 14:31:47.000000000 -0400
60821 +++ linux-2.6.32.41/kernel/power/process.c 2011-04-17 15:56:46.000000000 -0400
60822 @@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_
60823 struct timeval start, end;
60824 u64 elapsed_csecs64;
60825 unsigned int elapsed_csecs;
60826 + bool timedout = false;
60827
60828 do_gettimeofday(&start);
60829
60830 end_time = jiffies + TIMEOUT;
60831 do {
60832 todo = 0;
60833 + if (time_after(jiffies, end_time))
60834 + timedout = true;
60835 read_lock(&tasklist_lock);
60836 do_each_thread(g, p) {
60837 if (frozen(p) || !freezeable(p))
60838 @@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_
60839 * It is "frozen enough". If the task does wake
60840 * up, it will immediately call try_to_freeze.
60841 */
60842 - if (!task_is_stopped_or_traced(p) &&
60843 - !freezer_should_skip(p))
60844 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
60845 todo++;
60846 + if (timedout) {
60847 + printk(KERN_ERR "Task refusing to freeze:\n");
60848 + sched_show_task(p);
60849 + }
60850 + }
60851 } while_each_thread(g, p);
60852 read_unlock(&tasklist_lock);
60853 yield(); /* Yield is okay here */
60854 - if (time_after(jiffies, end_time))
60855 - break;
60856 - } while (todo);
60857 + } while (todo && !timedout);
60858
60859 do_gettimeofday(&end);
60860 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
60861 diff -urNp linux-2.6.32.41/kernel/power/suspend.c linux-2.6.32.41/kernel/power/suspend.c
60862 --- linux-2.6.32.41/kernel/power/suspend.c 2011-03-27 14:31:47.000000000 -0400
60863 +++ linux-2.6.32.41/kernel/power/suspend.c 2011-04-17 15:56:46.000000000 -0400
60864 @@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_M
60865 [PM_SUSPEND_MEM] = "mem",
60866 };
60867
60868 -static struct platform_suspend_ops *suspend_ops;
60869 +static const struct platform_suspend_ops *suspend_ops;
60870
60871 /**
60872 * suspend_set_ops - Set the global suspend method table.
60873 * @ops: Pointer to ops structure.
60874 */
60875 -void suspend_set_ops(struct platform_suspend_ops *ops)
60876 +void suspend_set_ops(const struct platform_suspend_ops *ops)
60877 {
60878 mutex_lock(&pm_mutex);
60879 suspend_ops = ops;
60880 diff -urNp linux-2.6.32.41/kernel/printk.c linux-2.6.32.41/kernel/printk.c
60881 --- linux-2.6.32.41/kernel/printk.c 2011-03-27 14:31:47.000000000 -0400
60882 +++ linux-2.6.32.41/kernel/printk.c 2011-04-17 15:56:46.000000000 -0400
60883 @@ -278,6 +278,11 @@ int do_syslog(int type, char __user *buf
60884 char c;
60885 int error = 0;
60886
60887 +#ifdef CONFIG_GRKERNSEC_DMESG
60888 + if (grsec_enable_dmesg && !capable(CAP_SYS_ADMIN))
60889 + return -EPERM;
60890 +#endif
60891 +
60892 error = security_syslog(type);
60893 if (error)
60894 return error;
60895 diff -urNp linux-2.6.32.41/kernel/profile.c linux-2.6.32.41/kernel/profile.c
60896 --- linux-2.6.32.41/kernel/profile.c 2011-03-27 14:31:47.000000000 -0400
60897 +++ linux-2.6.32.41/kernel/profile.c 2011-05-04 17:56:28.000000000 -0400
60898 @@ -39,7 +39,7 @@ struct profile_hit {
60899 /* Oprofile timer tick hook */
60900 static int (*timer_hook)(struct pt_regs *) __read_mostly;
60901
60902 -static atomic_t *prof_buffer;
60903 +static atomic_unchecked_t *prof_buffer;
60904 static unsigned long prof_len, prof_shift;
60905
60906 int prof_on __read_mostly;
60907 @@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
60908 hits[i].pc = 0;
60909 continue;
60910 }
60911 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
60912 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
60913 hits[i].hits = hits[i].pc = 0;
60914 }
60915 }
60916 @@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc,
60917 * Add the current hit(s) and flush the write-queue out
60918 * to the global buffer:
60919 */
60920 - atomic_add(nr_hits, &prof_buffer[pc]);
60921 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
60922 for (i = 0; i < NR_PROFILE_HIT; ++i) {
60923 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
60924 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
60925 hits[i].pc = hits[i].hits = 0;
60926 }
60927 out:
60928 @@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc,
60929 if (prof_on != type || !prof_buffer)
60930 return;
60931 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
60932 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
60933 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
60934 }
60935 #endif /* !CONFIG_SMP */
60936 EXPORT_SYMBOL_GPL(profile_hits);
60937 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
60938 return -EFAULT;
60939 buf++; p++; count--; read++;
60940 }
60941 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
60942 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
60943 if (copy_to_user(buf, (void *)pnt, count))
60944 return -EFAULT;
60945 read += count;
60946 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
60947 }
60948 #endif
60949 profile_discard_flip_buffers();
60950 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
60951 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
60952 return count;
60953 }
60954
60955 diff -urNp linux-2.6.32.41/kernel/ptrace.c linux-2.6.32.41/kernel/ptrace.c
60956 --- linux-2.6.32.41/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
60957 +++ linux-2.6.32.41/kernel/ptrace.c 2011-05-22 23:02:06.000000000 -0400
60958 @@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_stru
60959 return ret;
60960 }
60961
60962 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
60963 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
60964 + unsigned int log)
60965 {
60966 const struct cred *cred = current_cred(), *tcred;
60967
60968 @@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_stru
60969 cred->gid != tcred->egid ||
60970 cred->gid != tcred->sgid ||
60971 cred->gid != tcred->gid) &&
60972 - !capable(CAP_SYS_PTRACE)) {
60973 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
60974 + (log && !capable(CAP_SYS_PTRACE)))
60975 + ) {
60976 rcu_read_unlock();
60977 return -EPERM;
60978 }
60979 @@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_stru
60980 smp_rmb();
60981 if (task->mm)
60982 dumpable = get_dumpable(task->mm);
60983 - if (!dumpable && !capable(CAP_SYS_PTRACE))
60984 + if (!dumpable &&
60985 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
60986 + (log && !capable(CAP_SYS_PTRACE))))
60987 return -EPERM;
60988
60989 return security_ptrace_access_check(task, mode);
60990 @@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struc
60991 {
60992 int err;
60993 task_lock(task);
60994 - err = __ptrace_may_access(task, mode);
60995 + err = __ptrace_may_access(task, mode, 0);
60996 + task_unlock(task);
60997 + return !err;
60998 +}
60999 +
61000 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
61001 +{
61002 + int err;
61003 + task_lock(task);
61004 + err = __ptrace_may_access(task, mode, 1);
61005 task_unlock(task);
61006 return !err;
61007 }
61008 @@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *ta
61009 goto out;
61010
61011 task_lock(task);
61012 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
61013 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
61014 task_unlock(task);
61015 if (retval)
61016 goto unlock_creds;
61017 @@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *ta
61018 goto unlock_tasklist;
61019
61020 task->ptrace = PT_PTRACED;
61021 - if (capable(CAP_SYS_PTRACE))
61022 + if (capable_nolog(CAP_SYS_PTRACE))
61023 task->ptrace |= PT_PTRACE_CAP;
61024
61025 __ptrace_link(task, current);
61026 @@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *
61027 {
61028 int copied = 0;
61029
61030 + pax_track_stack();
61031 +
61032 while (len > 0) {
61033 char buf[128];
61034 int this_len, retval;
61035 @@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct
61036 {
61037 int copied = 0;
61038
61039 + pax_track_stack();
61040 +
61041 while (len > 0) {
61042 char buf[128];
61043 int this_len, retval;
61044 @@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *c
61045 int ret = -EIO;
61046 siginfo_t siginfo;
61047
61048 + pax_track_stack();
61049 +
61050 switch (request) {
61051 case PTRACE_PEEKTEXT:
61052 case PTRACE_PEEKDATA:
61053 @@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *c
61054 ret = ptrace_setoptions(child, data);
61055 break;
61056 case PTRACE_GETEVENTMSG:
61057 - ret = put_user(child->ptrace_message, (unsigned long __user *) data);
61058 + ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
61059 break;
61060
61061 case PTRACE_GETSIGINFO:
61062 ret = ptrace_getsiginfo(child, &siginfo);
61063 if (!ret)
61064 - ret = copy_siginfo_to_user((siginfo_t __user *) data,
61065 + ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
61066 &siginfo);
61067 break;
61068
61069 case PTRACE_SETSIGINFO:
61070 - if (copy_from_user(&siginfo, (siginfo_t __user *) data,
61071 + if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
61072 sizeof siginfo))
61073 ret = -EFAULT;
61074 else
61075 @@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
61076 goto out;
61077 }
61078
61079 + if (gr_handle_ptrace(child, request)) {
61080 + ret = -EPERM;
61081 + goto out_put_task_struct;
61082 + }
61083 +
61084 if (request == PTRACE_ATTACH) {
61085 ret = ptrace_attach(child);
61086 /*
61087 * Some architectures need to do book-keeping after
61088 * a ptrace attach.
61089 */
61090 - if (!ret)
61091 + if (!ret) {
61092 arch_ptrace_attach(child);
61093 + gr_audit_ptrace(child);
61094 + }
61095 goto out_put_task_struct;
61096 }
61097
61098 @@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_
61099 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
61100 if (copied != sizeof(tmp))
61101 return -EIO;
61102 - return put_user(tmp, (unsigned long __user *)data);
61103 + return put_user(tmp, (__force unsigned long __user *)data);
61104 }
61105
61106 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
61107 @@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_st
61108 siginfo_t siginfo;
61109 int ret;
61110
61111 + pax_track_stack();
61112 +
61113 switch (request) {
61114 case PTRACE_PEEKTEXT:
61115 case PTRACE_PEEKDATA:
61116 @@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat
61117 goto out;
61118 }
61119
61120 + if (gr_handle_ptrace(child, request)) {
61121 + ret = -EPERM;
61122 + goto out_put_task_struct;
61123 + }
61124 +
61125 if (request == PTRACE_ATTACH) {
61126 ret = ptrace_attach(child);
61127 /*
61128 * Some architectures need to do book-keeping after
61129 * a ptrace attach.
61130 */
61131 - if (!ret)
61132 + if (!ret) {
61133 arch_ptrace_attach(child);
61134 + gr_audit_ptrace(child);
61135 + }
61136 goto out_put_task_struct;
61137 }
61138
61139 diff -urNp linux-2.6.32.41/kernel/rcutorture.c linux-2.6.32.41/kernel/rcutorture.c
61140 --- linux-2.6.32.41/kernel/rcutorture.c 2011-03-27 14:31:47.000000000 -0400
61141 +++ linux-2.6.32.41/kernel/rcutorture.c 2011-05-04 17:56:28.000000000 -0400
61142 @@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
61143 { 0 };
61144 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
61145 { 0 };
61146 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
61147 -static atomic_t n_rcu_torture_alloc;
61148 -static atomic_t n_rcu_torture_alloc_fail;
61149 -static atomic_t n_rcu_torture_free;
61150 -static atomic_t n_rcu_torture_mberror;
61151 -static atomic_t n_rcu_torture_error;
61152 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
61153 +static atomic_unchecked_t n_rcu_torture_alloc;
61154 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
61155 +static atomic_unchecked_t n_rcu_torture_free;
61156 +static atomic_unchecked_t n_rcu_torture_mberror;
61157 +static atomic_unchecked_t n_rcu_torture_error;
61158 static long n_rcu_torture_timers;
61159 static struct list_head rcu_torture_removed;
61160 static cpumask_var_t shuffle_tmp_mask;
61161 @@ -187,11 +187,11 @@ rcu_torture_alloc(void)
61162
61163 spin_lock_bh(&rcu_torture_lock);
61164 if (list_empty(&rcu_torture_freelist)) {
61165 - atomic_inc(&n_rcu_torture_alloc_fail);
61166 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
61167 spin_unlock_bh(&rcu_torture_lock);
61168 return NULL;
61169 }
61170 - atomic_inc(&n_rcu_torture_alloc);
61171 + atomic_inc_unchecked(&n_rcu_torture_alloc);
61172 p = rcu_torture_freelist.next;
61173 list_del_init(p);
61174 spin_unlock_bh(&rcu_torture_lock);
61175 @@ -204,7 +204,7 @@ rcu_torture_alloc(void)
61176 static void
61177 rcu_torture_free(struct rcu_torture *p)
61178 {
61179 - atomic_inc(&n_rcu_torture_free);
61180 + atomic_inc_unchecked(&n_rcu_torture_free);
61181 spin_lock_bh(&rcu_torture_lock);
61182 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
61183 spin_unlock_bh(&rcu_torture_lock);
61184 @@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
61185 i = rp->rtort_pipe_count;
61186 if (i > RCU_TORTURE_PIPE_LEN)
61187 i = RCU_TORTURE_PIPE_LEN;
61188 - atomic_inc(&rcu_torture_wcount[i]);
61189 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
61190 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
61191 rp->rtort_mbtest = 0;
61192 rcu_torture_free(rp);
61193 @@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_fr
61194 i = rp->rtort_pipe_count;
61195 if (i > RCU_TORTURE_PIPE_LEN)
61196 i = RCU_TORTURE_PIPE_LEN;
61197 - atomic_inc(&rcu_torture_wcount[i]);
61198 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
61199 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
61200 rp->rtort_mbtest = 0;
61201 list_del(&rp->rtort_free);
61202 @@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
61203 i = old_rp->rtort_pipe_count;
61204 if (i > RCU_TORTURE_PIPE_LEN)
61205 i = RCU_TORTURE_PIPE_LEN;
61206 - atomic_inc(&rcu_torture_wcount[i]);
61207 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
61208 old_rp->rtort_pipe_count++;
61209 cur_ops->deferred_free(old_rp);
61210 }
61211 @@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned l
61212 return;
61213 }
61214 if (p->rtort_mbtest == 0)
61215 - atomic_inc(&n_rcu_torture_mberror);
61216 + atomic_inc_unchecked(&n_rcu_torture_mberror);
61217 spin_lock(&rand_lock);
61218 cur_ops->read_delay(&rand);
61219 n_rcu_torture_timers++;
61220 @@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
61221 continue;
61222 }
61223 if (p->rtort_mbtest == 0)
61224 - atomic_inc(&n_rcu_torture_mberror);
61225 + atomic_inc_unchecked(&n_rcu_torture_mberror);
61226 cur_ops->read_delay(&rand);
61227 preempt_disable();
61228 pipe_count = p->rtort_pipe_count;
61229 @@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
61230 rcu_torture_current,
61231 rcu_torture_current_version,
61232 list_empty(&rcu_torture_freelist),
61233 - atomic_read(&n_rcu_torture_alloc),
61234 - atomic_read(&n_rcu_torture_alloc_fail),
61235 - atomic_read(&n_rcu_torture_free),
61236 - atomic_read(&n_rcu_torture_mberror),
61237 + atomic_read_unchecked(&n_rcu_torture_alloc),
61238 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
61239 + atomic_read_unchecked(&n_rcu_torture_free),
61240 + atomic_read_unchecked(&n_rcu_torture_mberror),
61241 n_rcu_torture_timers);
61242 - if (atomic_read(&n_rcu_torture_mberror) != 0)
61243 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
61244 cnt += sprintf(&page[cnt], " !!!");
61245 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
61246 if (i > 1) {
61247 cnt += sprintf(&page[cnt], "!!! ");
61248 - atomic_inc(&n_rcu_torture_error);
61249 + atomic_inc_unchecked(&n_rcu_torture_error);
61250 WARN_ON_ONCE(1);
61251 }
61252 cnt += sprintf(&page[cnt], "Reader Pipe: ");
61253 @@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
61254 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
61255 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
61256 cnt += sprintf(&page[cnt], " %d",
61257 - atomic_read(&rcu_torture_wcount[i]));
61258 + atomic_read_unchecked(&rcu_torture_wcount[i]));
61259 }
61260 cnt += sprintf(&page[cnt], "\n");
61261 if (cur_ops->stats)
61262 @@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
61263
61264 if (cur_ops->cleanup)
61265 cur_ops->cleanup();
61266 - if (atomic_read(&n_rcu_torture_error))
61267 + if (atomic_read_unchecked(&n_rcu_torture_error))
61268 rcu_torture_print_module_parms("End of test: FAILURE");
61269 else
61270 rcu_torture_print_module_parms("End of test: SUCCESS");
61271 @@ -1138,13 +1138,13 @@ rcu_torture_init(void)
61272
61273 rcu_torture_current = NULL;
61274 rcu_torture_current_version = 0;
61275 - atomic_set(&n_rcu_torture_alloc, 0);
61276 - atomic_set(&n_rcu_torture_alloc_fail, 0);
61277 - atomic_set(&n_rcu_torture_free, 0);
61278 - atomic_set(&n_rcu_torture_mberror, 0);
61279 - atomic_set(&n_rcu_torture_error, 0);
61280 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
61281 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
61282 + atomic_set_unchecked(&n_rcu_torture_free, 0);
61283 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
61284 + atomic_set_unchecked(&n_rcu_torture_error, 0);
61285 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
61286 - atomic_set(&rcu_torture_wcount[i], 0);
61287 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
61288 for_each_possible_cpu(cpu) {
61289 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
61290 per_cpu(rcu_torture_count, cpu)[i] = 0;
61291 diff -urNp linux-2.6.32.41/kernel/rcutree.c linux-2.6.32.41/kernel/rcutree.c
61292 --- linux-2.6.32.41/kernel/rcutree.c 2011-03-27 14:31:47.000000000 -0400
61293 +++ linux-2.6.32.41/kernel/rcutree.c 2011-04-17 15:56:46.000000000 -0400
61294 @@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state
61295 /*
61296 * Do softirq processing for the current CPU.
61297 */
61298 -static void rcu_process_callbacks(struct softirq_action *unused)
61299 +static void rcu_process_callbacks(void)
61300 {
61301 /*
61302 * Memory references from any prior RCU read-side critical sections
61303 diff -urNp linux-2.6.32.41/kernel/rcutree_plugin.h linux-2.6.32.41/kernel/rcutree_plugin.h
61304 --- linux-2.6.32.41/kernel/rcutree_plugin.h 2011-03-27 14:31:47.000000000 -0400
61305 +++ linux-2.6.32.41/kernel/rcutree_plugin.h 2011-04-17 15:56:46.000000000 -0400
61306 @@ -145,7 +145,7 @@ static void rcu_preempt_note_context_swi
61307 */
61308 void __rcu_read_lock(void)
61309 {
61310 - ACCESS_ONCE(current->rcu_read_lock_nesting)++;
61311 + ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
61312 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
61313 }
61314 EXPORT_SYMBOL_GPL(__rcu_read_lock);
61315 @@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
61316 struct task_struct *t = current;
61317
61318 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
61319 - if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
61320 + if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
61321 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
61322 rcu_read_unlock_special(t);
61323 }
61324 diff -urNp linux-2.6.32.41/kernel/relay.c linux-2.6.32.41/kernel/relay.c
61325 --- linux-2.6.32.41/kernel/relay.c 2011-03-27 14:31:47.000000000 -0400
61326 +++ linux-2.6.32.41/kernel/relay.c 2011-05-16 21:46:57.000000000 -0400
61327 @@ -1222,7 +1222,7 @@ static int subbuf_splice_actor(struct fi
61328 unsigned int flags,
61329 int *nonpad_ret)
61330 {
61331 - unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
61332 + unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
61333 struct rchan_buf *rbuf = in->private_data;
61334 unsigned int subbuf_size = rbuf->chan->subbuf_size;
61335 uint64_t pos = (uint64_t) *ppos;
61336 @@ -1241,6 +1241,9 @@ static int subbuf_splice_actor(struct fi
61337 .ops = &relay_pipe_buf_ops,
61338 .spd_release = relay_page_release,
61339 };
61340 + ssize_t ret;
61341 +
61342 + pax_track_stack();
61343
61344 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
61345 return 0;
61346 diff -urNp linux-2.6.32.41/kernel/resource.c linux-2.6.32.41/kernel/resource.c
61347 --- linux-2.6.32.41/kernel/resource.c 2011-03-27 14:31:47.000000000 -0400
61348 +++ linux-2.6.32.41/kernel/resource.c 2011-04-17 15:56:46.000000000 -0400
61349 @@ -132,8 +132,18 @@ static const struct file_operations proc
61350
61351 static int __init ioresources_init(void)
61352 {
61353 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
61354 +#ifdef CONFIG_GRKERNSEC_PROC_USER
61355 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
61356 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
61357 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61358 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
61359 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
61360 +#endif
61361 +#else
61362 proc_create("ioports", 0, NULL, &proc_ioports_operations);
61363 proc_create("iomem", 0, NULL, &proc_iomem_operations);
61364 +#endif
61365 return 0;
61366 }
61367 __initcall(ioresources_init);
61368 diff -urNp linux-2.6.32.41/kernel/rtmutex.c linux-2.6.32.41/kernel/rtmutex.c
61369 --- linux-2.6.32.41/kernel/rtmutex.c 2011-03-27 14:31:47.000000000 -0400
61370 +++ linux-2.6.32.41/kernel/rtmutex.c 2011-04-17 15:56:46.000000000 -0400
61371 @@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt
61372 */
61373 spin_lock_irqsave(&pendowner->pi_lock, flags);
61374
61375 - WARN_ON(!pendowner->pi_blocked_on);
61376 + BUG_ON(!pendowner->pi_blocked_on);
61377 WARN_ON(pendowner->pi_blocked_on != waiter);
61378 WARN_ON(pendowner->pi_blocked_on->lock != lock);
61379
61380 diff -urNp linux-2.6.32.41/kernel/rtmutex-tester.c linux-2.6.32.41/kernel/rtmutex-tester.c
61381 --- linux-2.6.32.41/kernel/rtmutex-tester.c 2011-03-27 14:31:47.000000000 -0400
61382 +++ linux-2.6.32.41/kernel/rtmutex-tester.c 2011-05-04 17:56:28.000000000 -0400
61383 @@ -21,7 +21,7 @@
61384 #define MAX_RT_TEST_MUTEXES 8
61385
61386 static spinlock_t rttest_lock;
61387 -static atomic_t rttest_event;
61388 +static atomic_unchecked_t rttest_event;
61389
61390 struct test_thread_data {
61391 int opcode;
61392 @@ -64,7 +64,7 @@ static int handle_op(struct test_thread_
61393
61394 case RTTEST_LOCKCONT:
61395 td->mutexes[td->opdata] = 1;
61396 - td->event = atomic_add_return(1, &rttest_event);
61397 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61398 return 0;
61399
61400 case RTTEST_RESET:
61401 @@ -82,7 +82,7 @@ static int handle_op(struct test_thread_
61402 return 0;
61403
61404 case RTTEST_RESETEVENT:
61405 - atomic_set(&rttest_event, 0);
61406 + atomic_set_unchecked(&rttest_event, 0);
61407 return 0;
61408
61409 default:
61410 @@ -99,9 +99,9 @@ static int handle_op(struct test_thread_
61411 return ret;
61412
61413 td->mutexes[id] = 1;
61414 - td->event = atomic_add_return(1, &rttest_event);
61415 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61416 rt_mutex_lock(&mutexes[id]);
61417 - td->event = atomic_add_return(1, &rttest_event);
61418 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61419 td->mutexes[id] = 4;
61420 return 0;
61421
61422 @@ -112,9 +112,9 @@ static int handle_op(struct test_thread_
61423 return ret;
61424
61425 td->mutexes[id] = 1;
61426 - td->event = atomic_add_return(1, &rttest_event);
61427 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61428 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
61429 - td->event = atomic_add_return(1, &rttest_event);
61430 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61431 td->mutexes[id] = ret ? 0 : 4;
61432 return ret ? -EINTR : 0;
61433
61434 @@ -123,9 +123,9 @@ static int handle_op(struct test_thread_
61435 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
61436 return ret;
61437
61438 - td->event = atomic_add_return(1, &rttest_event);
61439 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61440 rt_mutex_unlock(&mutexes[id]);
61441 - td->event = atomic_add_return(1, &rttest_event);
61442 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61443 td->mutexes[id] = 0;
61444 return 0;
61445
61446 @@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mu
61447 break;
61448
61449 td->mutexes[dat] = 2;
61450 - td->event = atomic_add_return(1, &rttest_event);
61451 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61452 break;
61453
61454 case RTTEST_LOCKBKL:
61455 @@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mu
61456 return;
61457
61458 td->mutexes[dat] = 3;
61459 - td->event = atomic_add_return(1, &rttest_event);
61460 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61461 break;
61462
61463 case RTTEST_LOCKNOWAIT:
61464 @@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mu
61465 return;
61466
61467 td->mutexes[dat] = 1;
61468 - td->event = atomic_add_return(1, &rttest_event);
61469 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61470 return;
61471
61472 case RTTEST_LOCKBKL:
61473 diff -urNp linux-2.6.32.41/kernel/sched.c linux-2.6.32.41/kernel/sched.c
61474 --- linux-2.6.32.41/kernel/sched.c 2011-03-27 14:31:47.000000000 -0400
61475 +++ linux-2.6.32.41/kernel/sched.c 2011-05-22 23:02:06.000000000 -0400
61476 @@ -5043,7 +5043,7 @@ out:
61477 * In CONFIG_NO_HZ case, the idle load balance owner will do the
61478 * rebalancing for all the cpus for whom scheduler ticks are stopped.
61479 */
61480 -static void run_rebalance_domains(struct softirq_action *h)
61481 +static void run_rebalance_domains(void)
61482 {
61483 int this_cpu = smp_processor_id();
61484 struct rq *this_rq = cpu_rq(this_cpu);
61485 @@ -5700,6 +5700,8 @@ asmlinkage void __sched schedule(void)
61486 struct rq *rq;
61487 int cpu;
61488
61489 + pax_track_stack();
61490 +
61491 need_resched:
61492 preempt_disable();
61493 cpu = smp_processor_id();
61494 @@ -5770,7 +5772,7 @@ EXPORT_SYMBOL(schedule);
61495 * Look out! "owner" is an entirely speculative pointer
61496 * access and not reliable.
61497 */
61498 -int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
61499 +int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
61500 {
61501 unsigned int cpu;
61502 struct rq *rq;
61503 @@ -5784,10 +5786,10 @@ int mutex_spin_on_owner(struct mutex *lo
61504 * DEBUG_PAGEALLOC could have unmapped it if
61505 * the mutex owner just released it and exited.
61506 */
61507 - if (probe_kernel_address(&owner->cpu, cpu))
61508 + if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
61509 return 0;
61510 #else
61511 - cpu = owner->cpu;
61512 + cpu = task_thread_info(owner)->cpu;
61513 #endif
61514
61515 /*
61516 @@ -5816,7 +5818,7 @@ int mutex_spin_on_owner(struct mutex *lo
61517 /*
61518 * Is that owner really running on that cpu?
61519 */
61520 - if (task_thread_info(rq->curr) != owner || need_resched())
61521 + if (rq->curr != owner || need_resched())
61522 return 0;
61523
61524 cpu_relax();
61525 @@ -6359,6 +6361,8 @@ int can_nice(const struct task_struct *p
61526 /* convert nice value [19,-20] to rlimit style value [1,40] */
61527 int nice_rlim = 20 - nice;
61528
61529 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
61530 +
61531 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
61532 capable(CAP_SYS_NICE));
61533 }
61534 @@ -6392,7 +6396,8 @@ SYSCALL_DEFINE1(nice, int, increment)
61535 if (nice > 19)
61536 nice = 19;
61537
61538 - if (increment < 0 && !can_nice(current, nice))
61539 + if (increment < 0 && (!can_nice(current, nice) ||
61540 + gr_handle_chroot_nice()))
61541 return -EPERM;
61542
61543 retval = security_task_setnice(current, nice);
61544 @@ -8774,7 +8779,7 @@ static void init_sched_groups_power(int
61545 long power;
61546 int weight;
61547
61548 - WARN_ON(!sd || !sd->groups);
61549 + BUG_ON(!sd || !sd->groups);
61550
61551 if (cpu != group_first_cpu(sd->groups))
61552 return;
61553 diff -urNp linux-2.6.32.41/kernel/signal.c linux-2.6.32.41/kernel/signal.c
61554 --- linux-2.6.32.41/kernel/signal.c 2011-04-17 17:00:52.000000000 -0400
61555 +++ linux-2.6.32.41/kernel/signal.c 2011-05-22 23:02:06.000000000 -0400
61556 @@ -41,12 +41,12 @@
61557
61558 static struct kmem_cache *sigqueue_cachep;
61559
61560 -static void __user *sig_handler(struct task_struct *t, int sig)
61561 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
61562 {
61563 return t->sighand->action[sig - 1].sa.sa_handler;
61564 }
61565
61566 -static int sig_handler_ignored(void __user *handler, int sig)
61567 +static int sig_handler_ignored(__sighandler_t handler, int sig)
61568 {
61569 /* Is it explicitly or implicitly ignored? */
61570 return handler == SIG_IGN ||
61571 @@ -56,7 +56,7 @@ static int sig_handler_ignored(void __us
61572 static int sig_task_ignored(struct task_struct *t, int sig,
61573 int from_ancestor_ns)
61574 {
61575 - void __user *handler;
61576 + __sighandler_t handler;
61577
61578 handler = sig_handler(t, sig);
61579
61580 @@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc
61581 */
61582 user = get_uid(__task_cred(t)->user);
61583 atomic_inc(&user->sigpending);
61584 +
61585 + if (!override_rlimit)
61586 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
61587 if (override_rlimit ||
61588 atomic_read(&user->sigpending) <=
61589 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
61590 @@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct
61591
61592 int unhandled_signal(struct task_struct *tsk, int sig)
61593 {
61594 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
61595 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
61596 if (is_global_init(tsk))
61597 return 1;
61598 if (handler != SIG_IGN && handler != SIG_DFL)
61599 @@ -627,6 +630,9 @@ static int check_kill_permission(int sig
61600 }
61601 }
61602
61603 + if (gr_handle_signal(t, sig))
61604 + return -EPERM;
61605 +
61606 return security_task_kill(t, info, sig, 0);
61607 }
61608
61609 @@ -968,7 +974,7 @@ __group_send_sig_info(int sig, struct si
61610 return send_signal(sig, info, p, 1);
61611 }
61612
61613 -static int
61614 +int
61615 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
61616 {
61617 return send_signal(sig, info, t, 0);
61618 @@ -1005,6 +1011,7 @@ force_sig_info(int sig, struct siginfo *
61619 unsigned long int flags;
61620 int ret, blocked, ignored;
61621 struct k_sigaction *action;
61622 + int is_unhandled = 0;
61623
61624 spin_lock_irqsave(&t->sighand->siglock, flags);
61625 action = &t->sighand->action[sig-1];
61626 @@ -1019,9 +1026,18 @@ force_sig_info(int sig, struct siginfo *
61627 }
61628 if (action->sa.sa_handler == SIG_DFL)
61629 t->signal->flags &= ~SIGNAL_UNKILLABLE;
61630 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
61631 + is_unhandled = 1;
61632 ret = specific_send_sig_info(sig, info, t);
61633 spin_unlock_irqrestore(&t->sighand->siglock, flags);
61634
61635 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
61636 + normal operation */
61637 + if (is_unhandled) {
61638 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
61639 + gr_handle_crash(t, sig);
61640 + }
61641 +
61642 return ret;
61643 }
61644
61645 @@ -1081,8 +1097,11 @@ int group_send_sig_info(int sig, struct
61646 {
61647 int ret = check_kill_permission(sig, info, p);
61648
61649 - if (!ret && sig)
61650 + if (!ret && sig) {
61651 ret = do_send_sig_info(sig, info, p, true);
61652 + if (!ret)
61653 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
61654 + }
61655
61656 return ret;
61657 }
61658 @@ -1644,6 +1663,8 @@ void ptrace_notify(int exit_code)
61659 {
61660 siginfo_t info;
61661
61662 + pax_track_stack();
61663 +
61664 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
61665
61666 memset(&info, 0, sizeof info);
61667 diff -urNp linux-2.6.32.41/kernel/smp.c linux-2.6.32.41/kernel/smp.c
61668 --- linux-2.6.32.41/kernel/smp.c 2011-03-27 14:31:47.000000000 -0400
61669 +++ linux-2.6.32.41/kernel/smp.c 2011-04-17 15:56:46.000000000 -0400
61670 @@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void
61671 }
61672 EXPORT_SYMBOL(smp_call_function);
61673
61674 -void ipi_call_lock(void)
61675 +void ipi_call_lock(void) __acquires(call_function.lock)
61676 {
61677 spin_lock(&call_function.lock);
61678 }
61679
61680 -void ipi_call_unlock(void)
61681 +void ipi_call_unlock(void) __releases(call_function.lock)
61682 {
61683 spin_unlock(&call_function.lock);
61684 }
61685
61686 -void ipi_call_lock_irq(void)
61687 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
61688 {
61689 spin_lock_irq(&call_function.lock);
61690 }
61691
61692 -void ipi_call_unlock_irq(void)
61693 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
61694 {
61695 spin_unlock_irq(&call_function.lock);
61696 }
61697 diff -urNp linux-2.6.32.41/kernel/softirq.c linux-2.6.32.41/kernel/softirq.c
61698 --- linux-2.6.32.41/kernel/softirq.c 2011-03-27 14:31:47.000000000 -0400
61699 +++ linux-2.6.32.41/kernel/softirq.c 2011-04-17 15:56:46.000000000 -0400
61700 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
61701
61702 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
61703
61704 -char *softirq_to_name[NR_SOFTIRQS] = {
61705 +const char * const softirq_to_name[NR_SOFTIRQS] = {
61706 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
61707 "TASKLET", "SCHED", "HRTIMER", "RCU"
61708 };
61709 @@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
61710
61711 asmlinkage void __do_softirq(void)
61712 {
61713 - struct softirq_action *h;
61714 + const struct softirq_action *h;
61715 __u32 pending;
61716 int max_restart = MAX_SOFTIRQ_RESTART;
61717 int cpu;
61718 @@ -233,7 +233,7 @@ restart:
61719 kstat_incr_softirqs_this_cpu(h - softirq_vec);
61720
61721 trace_softirq_entry(h, softirq_vec);
61722 - h->action(h);
61723 + h->action();
61724 trace_softirq_exit(h, softirq_vec);
61725 if (unlikely(prev_count != preempt_count())) {
61726 printk(KERN_ERR "huh, entered softirq %td %s %p"
61727 @@ -363,7 +363,7 @@ void raise_softirq(unsigned int nr)
61728 local_irq_restore(flags);
61729 }
61730
61731 -void open_softirq(int nr, void (*action)(struct softirq_action *))
61732 +void open_softirq(int nr, void (*action)(void))
61733 {
61734 softirq_vec[nr].action = action;
61735 }
61736 @@ -419,7 +419,7 @@ void __tasklet_hi_schedule_first(struct
61737
61738 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
61739
61740 -static void tasklet_action(struct softirq_action *a)
61741 +static void tasklet_action(void)
61742 {
61743 struct tasklet_struct *list;
61744
61745 @@ -454,7 +454,7 @@ static void tasklet_action(struct softir
61746 }
61747 }
61748
61749 -static void tasklet_hi_action(struct softirq_action *a)
61750 +static void tasklet_hi_action(void)
61751 {
61752 struct tasklet_struct *list;
61753
61754 diff -urNp linux-2.6.32.41/kernel/sys.c linux-2.6.32.41/kernel/sys.c
61755 --- linux-2.6.32.41/kernel/sys.c 2011-03-27 14:31:47.000000000 -0400
61756 +++ linux-2.6.32.41/kernel/sys.c 2011-04-17 15:56:46.000000000 -0400
61757 @@ -133,6 +133,12 @@ static int set_one_prio(struct task_stru
61758 error = -EACCES;
61759 goto out;
61760 }
61761 +
61762 + if (gr_handle_chroot_setpriority(p, niceval)) {
61763 + error = -EACCES;
61764 + goto out;
61765 + }
61766 +
61767 no_nice = security_task_setnice(p, niceval);
61768 if (no_nice) {
61769 error = no_nice;
61770 @@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which,
61771 !(user = find_user(who)))
61772 goto out_unlock; /* No processes for this user */
61773
61774 - do_each_thread(g, p)
61775 + do_each_thread(g, p) {
61776 if (__task_cred(p)->uid == who)
61777 error = set_one_prio(p, niceval, error);
61778 - while_each_thread(g, p);
61779 + } while_each_thread(g, p);
61780 if (who != cred->uid)
61781 free_uid(user); /* For find_user() */
61782 break;
61783 @@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which,
61784 !(user = find_user(who)))
61785 goto out_unlock; /* No processes for this user */
61786
61787 - do_each_thread(g, p)
61788 + do_each_thread(g, p) {
61789 if (__task_cred(p)->uid == who) {
61790 niceval = 20 - task_nice(p);
61791 if (niceval > retval)
61792 retval = niceval;
61793 }
61794 - while_each_thread(g, p);
61795 + } while_each_thread(g, p);
61796 if (who != cred->uid)
61797 free_uid(user); /* for find_user() */
61798 break;
61799 @@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
61800 goto error;
61801 }
61802
61803 + if (gr_check_group_change(new->gid, new->egid, -1))
61804 + goto error;
61805 +
61806 if (rgid != (gid_t) -1 ||
61807 (egid != (gid_t) -1 && egid != old->gid))
61808 new->sgid = new->egid;
61809 @@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
61810 goto error;
61811
61812 retval = -EPERM;
61813 +
61814 + if (gr_check_group_change(gid, gid, gid))
61815 + goto error;
61816 +
61817 if (capable(CAP_SETGID))
61818 new->gid = new->egid = new->sgid = new->fsgid = gid;
61819 else if (gid == old->gid || gid == old->sgid)
61820 @@ -627,6 +640,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
61821 goto error;
61822 }
61823
61824 + if (gr_check_user_change(new->uid, new->euid, -1))
61825 + goto error;
61826 +
61827 if (new->uid != old->uid) {
61828 retval = set_user(new);
61829 if (retval < 0)
61830 @@ -675,6 +691,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
61831 goto error;
61832
61833 retval = -EPERM;
61834 +
61835 + if (gr_check_crash_uid(uid))
61836 + goto error;
61837 + if (gr_check_user_change(uid, uid, uid))
61838 + goto error;
61839 +
61840 if (capable(CAP_SETUID)) {
61841 new->suid = new->uid = uid;
61842 if (uid != old->uid) {
61843 @@ -732,6 +754,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
61844 goto error;
61845 }
61846
61847 + if (gr_check_user_change(ruid, euid, -1))
61848 + goto error;
61849 +
61850 if (ruid != (uid_t) -1) {
61851 new->uid = ruid;
61852 if (ruid != old->uid) {
61853 @@ -800,6 +825,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
61854 goto error;
61855 }
61856
61857 + if (gr_check_group_change(rgid, egid, -1))
61858 + goto error;
61859 +
61860 if (rgid != (gid_t) -1)
61861 new->gid = rgid;
61862 if (egid != (gid_t) -1)
61863 @@ -849,6 +877,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
61864 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
61865 goto error;
61866
61867 + if (gr_check_user_change(-1, -1, uid))
61868 + goto error;
61869 +
61870 if (uid == old->uid || uid == old->euid ||
61871 uid == old->suid || uid == old->fsuid ||
61872 capable(CAP_SETUID)) {
61873 @@ -889,6 +920,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
61874 if (gid == old->gid || gid == old->egid ||
61875 gid == old->sgid || gid == old->fsgid ||
61876 capable(CAP_SETGID)) {
61877 + if (gr_check_group_change(-1, -1, gid))
61878 + goto error;
61879 +
61880 if (gid != old_fsgid) {
61881 new->fsgid = gid;
61882 goto change_okay;
61883 @@ -1454,7 +1488,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
61884 error = get_dumpable(me->mm);
61885 break;
61886 case PR_SET_DUMPABLE:
61887 - if (arg2 < 0 || arg2 > 1) {
61888 + if (arg2 > 1) {
61889 error = -EINVAL;
61890 break;
61891 }
61892 diff -urNp linux-2.6.32.41/kernel/sysctl.c linux-2.6.32.41/kernel/sysctl.c
61893 --- linux-2.6.32.41/kernel/sysctl.c 2011-03-27 14:31:47.000000000 -0400
61894 +++ linux-2.6.32.41/kernel/sysctl.c 2011-04-17 15:56:46.000000000 -0400
61895 @@ -63,6 +63,13 @@
61896 static int deprecated_sysctl_warning(struct __sysctl_args *args);
61897
61898 #if defined(CONFIG_SYSCTL)
61899 +#include <linux/grsecurity.h>
61900 +#include <linux/grinternal.h>
61901 +
61902 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
61903 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
61904 + const int op);
61905 +extern int gr_handle_chroot_sysctl(const int op);
61906
61907 /* External variables not in a header file. */
61908 extern int C_A_D;
61909 @@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_ta
61910 static int proc_taint(struct ctl_table *table, int write,
61911 void __user *buffer, size_t *lenp, loff_t *ppos);
61912 #endif
61913 +extern ctl_table grsecurity_table[];
61914
61915 static struct ctl_table root_table[];
61916 static struct ctl_table_root sysctl_table_root;
61917 @@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
61918 int sysctl_legacy_va_layout;
61919 #endif
61920
61921 +#ifdef CONFIG_PAX_SOFTMODE
61922 +static ctl_table pax_table[] = {
61923 + {
61924 + .ctl_name = CTL_UNNUMBERED,
61925 + .procname = "softmode",
61926 + .data = &pax_softmode,
61927 + .maxlen = sizeof(unsigned int),
61928 + .mode = 0600,
61929 + .proc_handler = &proc_dointvec,
61930 + },
61931 +
61932 + { .ctl_name = 0 }
61933 +};
61934 +#endif
61935 +
61936 extern int prove_locking;
61937 extern int lock_stat;
61938
61939 @@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = N
61940 #endif
61941
61942 static struct ctl_table kern_table[] = {
61943 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
61944 + {
61945 + .ctl_name = CTL_UNNUMBERED,
61946 + .procname = "grsecurity",
61947 + .mode = 0500,
61948 + .child = grsecurity_table,
61949 + },
61950 +#endif
61951 +
61952 +#ifdef CONFIG_PAX_SOFTMODE
61953 + {
61954 + .ctl_name = CTL_UNNUMBERED,
61955 + .procname = "pax",
61956 + .mode = 0500,
61957 + .child = pax_table,
61958 + },
61959 +#endif
61960 +
61961 {
61962 .ctl_name = CTL_UNNUMBERED,
61963 .procname = "sched_child_runs_first",
61964 @@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
61965 .data = &modprobe_path,
61966 .maxlen = KMOD_PATH_LEN,
61967 .mode = 0644,
61968 - .proc_handler = &proc_dostring,
61969 - .strategy = &sysctl_string,
61970 + .proc_handler = &proc_dostring_modpriv,
61971 + .strategy = &sysctl_string_modpriv,
61972 },
61973 {
61974 .ctl_name = CTL_UNNUMBERED,
61975 @@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
61976 .mode = 0644,
61977 .proc_handler = &proc_dointvec
61978 },
61979 + {
61980 + .procname = "heap_stack_gap",
61981 + .data = &sysctl_heap_stack_gap,
61982 + .maxlen = sizeof(sysctl_heap_stack_gap),
61983 + .mode = 0644,
61984 + .proc_handler = proc_doulongvec_minmax,
61985 + },
61986 #else
61987 {
61988 .ctl_name = CTL_UNNUMBERED,
61989 @@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl
61990 return 0;
61991 }
61992
61993 +static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
61994 +
61995 static int parse_table(int __user *name, int nlen,
61996 void __user *oldval, size_t __user *oldlenp,
61997 void __user *newval, size_t newlen,
61998 @@ -1821,7 +1871,7 @@ repeat:
61999 if (n == table->ctl_name) {
62000 int error;
62001 if (table->child) {
62002 - if (sysctl_perm(root, table, MAY_EXEC))
62003 + if (sysctl_perm_nochk(root, table, MAY_EXEC))
62004 return -EPERM;
62005 name++;
62006 nlen--;
62007 @@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *r
62008 int error;
62009 int mode;
62010
62011 + if (table->parent != NULL && table->parent->procname != NULL &&
62012 + table->procname != NULL &&
62013 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
62014 + return -EACCES;
62015 + if (gr_handle_chroot_sysctl(op))
62016 + return -EACCES;
62017 + error = gr_handle_sysctl(table, op);
62018 + if (error)
62019 + return error;
62020 +
62021 + error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
62022 + if (error)
62023 + return error;
62024 +
62025 + if (root->permissions)
62026 + mode = root->permissions(root, current->nsproxy, table);
62027 + else
62028 + mode = table->mode;
62029 +
62030 + return test_perm(mode, op);
62031 +}
62032 +
62033 +int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
62034 +{
62035 + int error;
62036 + int mode;
62037 +
62038 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
62039 if (error)
62040 return error;
62041 @@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *tabl
62042 buffer, lenp, ppos);
62043 }
62044
62045 +int proc_dostring_modpriv(struct ctl_table *table, int write,
62046 + void __user *buffer, size_t *lenp, loff_t *ppos)
62047 +{
62048 + if (write && !capable(CAP_SYS_MODULE))
62049 + return -EPERM;
62050 +
62051 + return _proc_do_string(table->data, table->maxlen, write,
62052 + buffer, lenp, ppos);
62053 +}
62054 +
62055
62056 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
62057 int *valp,
62058 @@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(v
62059 vleft = table->maxlen / sizeof(unsigned long);
62060 left = *lenp;
62061
62062 - for (; left && vleft--; i++, min++, max++, first=0) {
62063 + for (; left && vleft--; i++, first=0) {
62064 if (write) {
62065 while (left) {
62066 char c;
62067 @@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *tabl
62068 return -ENOSYS;
62069 }
62070
62071 +int proc_dostring_modpriv(struct ctl_table *table, int write,
62072 + void __user *buffer, size_t *lenp, loff_t *ppos)
62073 +{
62074 + return -ENOSYS;
62075 +}
62076 +
62077 int proc_dointvec(struct ctl_table *table, int write,
62078 void __user *buffer, size_t *lenp, loff_t *ppos)
62079 {
62080 @@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *tabl
62081 return 1;
62082 }
62083
62084 +int sysctl_string_modpriv(struct ctl_table *table,
62085 + void __user *oldval, size_t __user *oldlenp,
62086 + void __user *newval, size_t newlen)
62087 +{
62088 + if (newval && newlen && !capable(CAP_SYS_MODULE))
62089 + return -EPERM;
62090 +
62091 + return sysctl_string(table, oldval, oldlenp, newval, newlen);
62092 +}
62093 +
62094 /*
62095 * This function makes sure that all of the integers in the vector
62096 * are between the minimum and maximum values given in the arrays
62097 @@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *tabl
62098 return -ENOSYS;
62099 }
62100
62101 +int sysctl_string_modpriv(struct ctl_table *table,
62102 + void __user *oldval, size_t __user *oldlenp,
62103 + void __user *newval, size_t newlen)
62104 +{
62105 + return -ENOSYS;
62106 +}
62107 +
62108 int sysctl_intvec(struct ctl_table *table,
62109 void __user *oldval, size_t __user *oldlenp,
62110 void __user *newval, size_t newlen)
62111 @@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
62112 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
62113 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
62114 EXPORT_SYMBOL(proc_dostring);
62115 +EXPORT_SYMBOL(proc_dostring_modpriv);
62116 EXPORT_SYMBOL(proc_doulongvec_minmax);
62117 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
62118 EXPORT_SYMBOL(register_sysctl_table);
62119 @@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
62120 EXPORT_SYMBOL(sysctl_jiffies);
62121 EXPORT_SYMBOL(sysctl_ms_jiffies);
62122 EXPORT_SYMBOL(sysctl_string);
62123 +EXPORT_SYMBOL(sysctl_string_modpriv);
62124 EXPORT_SYMBOL(sysctl_data);
62125 EXPORT_SYMBOL(unregister_sysctl_table);
62126 diff -urNp linux-2.6.32.41/kernel/sysctl_check.c linux-2.6.32.41/kernel/sysctl_check.c
62127 --- linux-2.6.32.41/kernel/sysctl_check.c 2011-03-27 14:31:47.000000000 -0400
62128 +++ linux-2.6.32.41/kernel/sysctl_check.c 2011-04-17 15:56:46.000000000 -0400
62129 @@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *n
62130 } else {
62131 if ((table->strategy == sysctl_data) ||
62132 (table->strategy == sysctl_string) ||
62133 + (table->strategy == sysctl_string_modpriv) ||
62134 (table->strategy == sysctl_intvec) ||
62135 (table->strategy == sysctl_jiffies) ||
62136 (table->strategy == sysctl_ms_jiffies) ||
62137 (table->proc_handler == proc_dostring) ||
62138 + (table->proc_handler == proc_dostring_modpriv) ||
62139 (table->proc_handler == proc_dointvec) ||
62140 (table->proc_handler == proc_dointvec_minmax) ||
62141 (table->proc_handler == proc_dointvec_jiffies) ||
62142 diff -urNp linux-2.6.32.41/kernel/taskstats.c linux-2.6.32.41/kernel/taskstats.c
62143 --- linux-2.6.32.41/kernel/taskstats.c 2011-03-27 14:31:47.000000000 -0400
62144 +++ linux-2.6.32.41/kernel/taskstats.c 2011-04-17 15:56:46.000000000 -0400
62145 @@ -26,9 +26,12 @@
62146 #include <linux/cgroup.h>
62147 #include <linux/fs.h>
62148 #include <linux/file.h>
62149 +#include <linux/grsecurity.h>
62150 #include <net/genetlink.h>
62151 #include <asm/atomic.h>
62152
62153 +extern int gr_is_taskstats_denied(int pid);
62154 +
62155 /*
62156 * Maximum length of a cpumask that can be specified in
62157 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
62158 @@ -433,6 +436,9 @@ static int taskstats_user_cmd(struct sk_
62159 size_t size;
62160 cpumask_var_t mask;
62161
62162 + if (gr_is_taskstats_denied(current->pid))
62163 + return -EACCES;
62164 +
62165 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
62166 return -ENOMEM;
62167
62168 diff -urNp linux-2.6.32.41/kernel/time/tick-broadcast.c linux-2.6.32.41/kernel/time/tick-broadcast.c
62169 --- linux-2.6.32.41/kernel/time/tick-broadcast.c 2011-05-23 16:56:59.000000000 -0400
62170 +++ linux-2.6.32.41/kernel/time/tick-broadcast.c 2011-05-23 16:57:13.000000000 -0400
62171 @@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct cl
62172 * then clear the broadcast bit.
62173 */
62174 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
62175 - int cpu = smp_processor_id();
62176 + cpu = smp_processor_id();
62177
62178 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
62179 tick_broadcast_clear_oneshot(cpu);
62180 diff -urNp linux-2.6.32.41/kernel/time/timekeeping.c linux-2.6.32.41/kernel/time/timekeeping.c
62181 --- linux-2.6.32.41/kernel/time/timekeeping.c 2011-05-23 16:56:59.000000000 -0400
62182 +++ linux-2.6.32.41/kernel/time/timekeeping.c 2011-05-23 19:09:33.000000000 -0400
62183 @@ -14,6 +14,7 @@
62184 #include <linux/init.h>
62185 #include <linux/mm.h>
62186 #include <linux/sched.h>
62187 +#include <linux/grsecurity.h>
62188 #include <linux/sysdev.h>
62189 #include <linux/clocksource.h>
62190 #include <linux/jiffies.h>
62191 @@ -176,7 +177,7 @@ void update_xtime_cache(u64 nsec)
62192 */
62193 struct timespec ts = xtime;
62194 timespec_add_ns(&ts, nsec);
62195 - ACCESS_ONCE(xtime_cache) = ts;
62196 + ACCESS_ONCE_RW(xtime_cache) = ts;
62197 }
62198
62199 /* must hold xtime_lock */
62200 @@ -329,6 +330,8 @@ int do_settimeofday(struct timespec *tv)
62201 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
62202 return -EINVAL;
62203
62204 + gr_log_timechange();
62205 +
62206 write_seqlock_irqsave(&xtime_lock, flags);
62207
62208 timekeeping_forward_now();
62209 diff -urNp linux-2.6.32.41/kernel/time/timer_list.c linux-2.6.32.41/kernel/time/timer_list.c
62210 --- linux-2.6.32.41/kernel/time/timer_list.c 2011-03-27 14:31:47.000000000 -0400
62211 +++ linux-2.6.32.41/kernel/time/timer_list.c 2011-04-17 15:56:46.000000000 -0400
62212 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
62213
62214 static void print_name_offset(struct seq_file *m, void *sym)
62215 {
62216 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62217 + SEQ_printf(m, "<%p>", NULL);
62218 +#else
62219 char symname[KSYM_NAME_LEN];
62220
62221 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
62222 SEQ_printf(m, "<%p>", sym);
62223 else
62224 SEQ_printf(m, "%s", symname);
62225 +#endif
62226 }
62227
62228 static void
62229 @@ -112,7 +116,11 @@ next_one:
62230 static void
62231 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
62232 {
62233 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62234 + SEQ_printf(m, " .base: %p\n", NULL);
62235 +#else
62236 SEQ_printf(m, " .base: %p\n", base);
62237 +#endif
62238 SEQ_printf(m, " .index: %d\n",
62239 base->index);
62240 SEQ_printf(m, " .resolution: %Lu nsecs\n",
62241 @@ -289,7 +297,11 @@ static int __init init_timer_list_procfs
62242 {
62243 struct proc_dir_entry *pe;
62244
62245 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
62246 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
62247 +#else
62248 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
62249 +#endif
62250 if (!pe)
62251 return -ENOMEM;
62252 return 0;
62253 diff -urNp linux-2.6.32.41/kernel/time/timer_stats.c linux-2.6.32.41/kernel/time/timer_stats.c
62254 --- linux-2.6.32.41/kernel/time/timer_stats.c 2011-03-27 14:31:47.000000000 -0400
62255 +++ linux-2.6.32.41/kernel/time/timer_stats.c 2011-05-04 17:56:28.000000000 -0400
62256 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
62257 static unsigned long nr_entries;
62258 static struct entry entries[MAX_ENTRIES];
62259
62260 -static atomic_t overflow_count;
62261 +static atomic_unchecked_t overflow_count;
62262
62263 /*
62264 * The entries are in a hash-table, for fast lookup:
62265 @@ -140,7 +140,7 @@ static void reset_entries(void)
62266 nr_entries = 0;
62267 memset(entries, 0, sizeof(entries));
62268 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
62269 - atomic_set(&overflow_count, 0);
62270 + atomic_set_unchecked(&overflow_count, 0);
62271 }
62272
62273 static struct entry *alloc_entry(void)
62274 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
62275 if (likely(entry))
62276 entry->count++;
62277 else
62278 - atomic_inc(&overflow_count);
62279 + atomic_inc_unchecked(&overflow_count);
62280
62281 out_unlock:
62282 spin_unlock_irqrestore(lock, flags);
62283 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
62284
62285 static void print_name_offset(struct seq_file *m, unsigned long addr)
62286 {
62287 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62288 + seq_printf(m, "<%p>", NULL);
62289 +#else
62290 char symname[KSYM_NAME_LEN];
62291
62292 if (lookup_symbol_name(addr, symname) < 0)
62293 seq_printf(m, "<%p>", (void *)addr);
62294 else
62295 seq_printf(m, "%s", symname);
62296 +#endif
62297 }
62298
62299 static int tstats_show(struct seq_file *m, void *v)
62300 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
62301
62302 seq_puts(m, "Timer Stats Version: v0.2\n");
62303 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
62304 - if (atomic_read(&overflow_count))
62305 + if (atomic_read_unchecked(&overflow_count))
62306 seq_printf(m, "Overflow: %d entries\n",
62307 - atomic_read(&overflow_count));
62308 + atomic_read_unchecked(&overflow_count));
62309
62310 for (i = 0; i < nr_entries; i++) {
62311 entry = entries + i;
62312 @@ -415,7 +419,11 @@ static int __init init_tstats_procfs(voi
62313 {
62314 struct proc_dir_entry *pe;
62315
62316 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
62317 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
62318 +#else
62319 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
62320 +#endif
62321 if (!pe)
62322 return -ENOMEM;
62323 return 0;
62324 diff -urNp linux-2.6.32.41/kernel/time.c linux-2.6.32.41/kernel/time.c
62325 --- linux-2.6.32.41/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
62326 +++ linux-2.6.32.41/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
62327 @@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec
62328 return error;
62329
62330 if (tz) {
62331 + /* we log in do_settimeofday called below, so don't log twice
62332 + */
62333 + if (!tv)
62334 + gr_log_timechange();
62335 +
62336 /* SMP safe, global irq locking makes it work. */
62337 sys_tz = *tz;
62338 update_vsyscall_tz();
62339 @@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
62340 * Avoid unnecessary multiplications/divisions in the
62341 * two most common HZ cases:
62342 */
62343 -unsigned int inline jiffies_to_msecs(const unsigned long j)
62344 +inline unsigned int jiffies_to_msecs(const unsigned long j)
62345 {
62346 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
62347 return (MSEC_PER_SEC / HZ) * j;
62348 @@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(con
62349 }
62350 EXPORT_SYMBOL(jiffies_to_msecs);
62351
62352 -unsigned int inline jiffies_to_usecs(const unsigned long j)
62353 +inline unsigned int jiffies_to_usecs(const unsigned long j)
62354 {
62355 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
62356 return (USEC_PER_SEC / HZ) * j;
62357 diff -urNp linux-2.6.32.41/kernel/timer.c linux-2.6.32.41/kernel/timer.c
62358 --- linux-2.6.32.41/kernel/timer.c 2011-03-27 14:31:47.000000000 -0400
62359 +++ linux-2.6.32.41/kernel/timer.c 2011-04-17 15:56:46.000000000 -0400
62360 @@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
62361 /*
62362 * This function runs timers and the timer-tq in bottom half context.
62363 */
62364 -static void run_timer_softirq(struct softirq_action *h)
62365 +static void run_timer_softirq(void)
62366 {
62367 struct tvec_base *base = __get_cpu_var(tvec_bases);
62368
62369 diff -urNp linux-2.6.32.41/kernel/trace/blktrace.c linux-2.6.32.41/kernel/trace/blktrace.c
62370 --- linux-2.6.32.41/kernel/trace/blktrace.c 2011-03-27 14:31:47.000000000 -0400
62371 +++ linux-2.6.32.41/kernel/trace/blktrace.c 2011-05-04 17:56:28.000000000 -0400
62372 @@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct f
62373 struct blk_trace *bt = filp->private_data;
62374 char buf[16];
62375
62376 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
62377 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
62378
62379 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
62380 }
62381 @@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(str
62382 return 1;
62383
62384 bt = buf->chan->private_data;
62385 - atomic_inc(&bt->dropped);
62386 + atomic_inc_unchecked(&bt->dropped);
62387 return 0;
62388 }
62389
62390 @@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_qu
62391
62392 bt->dir = dir;
62393 bt->dev = dev;
62394 - atomic_set(&bt->dropped, 0);
62395 + atomic_set_unchecked(&bt->dropped, 0);
62396
62397 ret = -EIO;
62398 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
62399 diff -urNp linux-2.6.32.41/kernel/trace/ftrace.c linux-2.6.32.41/kernel/trace/ftrace.c
62400 --- linux-2.6.32.41/kernel/trace/ftrace.c 2011-03-27 14:31:47.000000000 -0400
62401 +++ linux-2.6.32.41/kernel/trace/ftrace.c 2011-04-17 15:56:46.000000000 -0400
62402 @@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod,
62403
62404 ip = rec->ip;
62405
62406 + ret = ftrace_arch_code_modify_prepare();
62407 + FTRACE_WARN_ON(ret);
62408 + if (ret)
62409 + return 0;
62410 +
62411 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
62412 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
62413 if (ret) {
62414 ftrace_bug(ret, ip);
62415 rec->flags |= FTRACE_FL_FAILED;
62416 - return 0;
62417 }
62418 - return 1;
62419 + return ret ? 0 : 1;
62420 }
62421
62422 /*
62423 diff -urNp linux-2.6.32.41/kernel/trace/ring_buffer.c linux-2.6.32.41/kernel/trace/ring_buffer.c
62424 --- linux-2.6.32.41/kernel/trace/ring_buffer.c 2011-03-27 14:31:47.000000000 -0400
62425 +++ linux-2.6.32.41/kernel/trace/ring_buffer.c 2011-04-17 15:56:46.000000000 -0400
62426 @@ -606,7 +606,7 @@ static struct list_head *rb_list_head(st
62427 * the reader page). But if the next page is a header page,
62428 * its flags will be non zero.
62429 */
62430 -static int inline
62431 +static inline int
62432 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
62433 struct buffer_page *page, struct list_head *list)
62434 {
62435 diff -urNp linux-2.6.32.41/kernel/trace/trace.c linux-2.6.32.41/kernel/trace/trace.c
62436 --- linux-2.6.32.41/kernel/trace/trace.c 2011-03-27 14:31:47.000000000 -0400
62437 +++ linux-2.6.32.41/kernel/trace/trace.c 2011-05-16 21:46:57.000000000 -0400
62438 @@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(
62439 size_t rem;
62440 unsigned int i;
62441
62442 + pax_track_stack();
62443 +
62444 /* copy the tracer to avoid using a global lock all around */
62445 mutex_lock(&trace_types_lock);
62446 if (unlikely(old_tracer != current_trace && current_trace)) {
62447 @@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file
62448 int entries, size, i;
62449 size_t ret;
62450
62451 + pax_track_stack();
62452 +
62453 if (*ppos & (PAGE_SIZE - 1)) {
62454 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
62455 return -EINVAL;
62456 @@ -3816,10 +3820,9 @@ static const struct file_operations trac
62457 };
62458 #endif
62459
62460 -static struct dentry *d_tracer;
62461 -
62462 struct dentry *tracing_init_dentry(void)
62463 {
62464 + static struct dentry *d_tracer;
62465 static int once;
62466
62467 if (d_tracer)
62468 @@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
62469 return d_tracer;
62470 }
62471
62472 -static struct dentry *d_percpu;
62473 -
62474 struct dentry *tracing_dentry_percpu(void)
62475 {
62476 + static struct dentry *d_percpu;
62477 static int once;
62478 struct dentry *d_tracer;
62479
62480 diff -urNp linux-2.6.32.41/kernel/trace/trace_events.c linux-2.6.32.41/kernel/trace/trace_events.c
62481 --- linux-2.6.32.41/kernel/trace/trace_events.c 2011-03-27 14:31:47.000000000 -0400
62482 +++ linux-2.6.32.41/kernel/trace/trace_events.c 2011-04-17 15:56:46.000000000 -0400
62483 @@ -951,6 +951,8 @@ static LIST_HEAD(ftrace_module_file_list
62484 * Modules must own their file_operations to keep up with
62485 * reference counting.
62486 */
62487 +
62488 +/* cannot be const */
62489 struct ftrace_module_file_ops {
62490 struct list_head list;
62491 struct module *mod;
62492 diff -urNp linux-2.6.32.41/kernel/trace/trace_mmiotrace.c linux-2.6.32.41/kernel/trace/trace_mmiotrace.c
62493 --- linux-2.6.32.41/kernel/trace/trace_mmiotrace.c 2011-03-27 14:31:47.000000000 -0400
62494 +++ linux-2.6.32.41/kernel/trace/trace_mmiotrace.c 2011-05-04 17:56:28.000000000 -0400
62495 @@ -23,7 +23,7 @@ struct header_iter {
62496 static struct trace_array *mmio_trace_array;
62497 static bool overrun_detected;
62498 static unsigned long prev_overruns;
62499 -static atomic_t dropped_count;
62500 +static atomic_unchecked_t dropped_count;
62501
62502 static void mmio_reset_data(struct trace_array *tr)
62503 {
62504 @@ -126,7 +126,7 @@ static void mmio_close(struct trace_iter
62505
62506 static unsigned long count_overruns(struct trace_iterator *iter)
62507 {
62508 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
62509 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
62510 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
62511
62512 if (over > prev_overruns)
62513 @@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct
62514 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
62515 sizeof(*entry), 0, pc);
62516 if (!event) {
62517 - atomic_inc(&dropped_count);
62518 + atomic_inc_unchecked(&dropped_count);
62519 return;
62520 }
62521 entry = ring_buffer_event_data(event);
62522 @@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct
62523 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
62524 sizeof(*entry), 0, pc);
62525 if (!event) {
62526 - atomic_inc(&dropped_count);
62527 + atomic_inc_unchecked(&dropped_count);
62528 return;
62529 }
62530 entry = ring_buffer_event_data(event);
62531 diff -urNp linux-2.6.32.41/kernel/trace/trace_output.c linux-2.6.32.41/kernel/trace/trace_output.c
62532 --- linux-2.6.32.41/kernel/trace/trace_output.c 2011-03-27 14:31:47.000000000 -0400
62533 +++ linux-2.6.32.41/kernel/trace/trace_output.c 2011-04-17 15:56:46.000000000 -0400
62534 @@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s,
62535 return 0;
62536 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
62537 if (!IS_ERR(p)) {
62538 - p = mangle_path(s->buffer + s->len, p, "\n");
62539 + p = mangle_path(s->buffer + s->len, p, "\n\\");
62540 if (p) {
62541 s->len = p - s->buffer;
62542 return 1;
62543 diff -urNp linux-2.6.32.41/kernel/trace/trace_stack.c linux-2.6.32.41/kernel/trace/trace_stack.c
62544 --- linux-2.6.32.41/kernel/trace/trace_stack.c 2011-03-27 14:31:47.000000000 -0400
62545 +++ linux-2.6.32.41/kernel/trace/trace_stack.c 2011-04-17 15:56:46.000000000 -0400
62546 @@ -50,7 +50,7 @@ static inline void check_stack(void)
62547 return;
62548
62549 /* we do not handle interrupt stacks yet */
62550 - if (!object_is_on_stack(&this_size))
62551 + if (!object_starts_on_stack(&this_size))
62552 return;
62553
62554 local_irq_save(flags);
62555 diff -urNp linux-2.6.32.41/kernel/trace/trace_workqueue.c linux-2.6.32.41/kernel/trace/trace_workqueue.c
62556 --- linux-2.6.32.41/kernel/trace/trace_workqueue.c 2011-03-27 14:31:47.000000000 -0400
62557 +++ linux-2.6.32.41/kernel/trace/trace_workqueue.c 2011-04-17 15:56:46.000000000 -0400
62558 @@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
62559 int cpu;
62560 pid_t pid;
62561 /* Can be inserted from interrupt or user context, need to be atomic */
62562 - atomic_t inserted;
62563 + atomic_unchecked_t inserted;
62564 /*
62565 * Don't need to be atomic, works are serialized in a single workqueue thread
62566 * on a single CPU.
62567 @@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_st
62568 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
62569 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
62570 if (node->pid == wq_thread->pid) {
62571 - atomic_inc(&node->inserted);
62572 + atomic_inc_unchecked(&node->inserted);
62573 goto found;
62574 }
62575 }
62576 @@ -205,7 +205,7 @@ static int workqueue_stat_show(struct se
62577 tsk = get_pid_task(pid, PIDTYPE_PID);
62578 if (tsk) {
62579 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
62580 - atomic_read(&cws->inserted), cws->executed,
62581 + atomic_read_unchecked(&cws->inserted), cws->executed,
62582 tsk->comm);
62583 put_task_struct(tsk);
62584 }
62585 diff -urNp linux-2.6.32.41/kernel/user.c linux-2.6.32.41/kernel/user.c
62586 --- linux-2.6.32.41/kernel/user.c 2011-03-27 14:31:47.000000000 -0400
62587 +++ linux-2.6.32.41/kernel/user.c 2011-04-17 15:56:46.000000000 -0400
62588 @@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct use
62589 spin_lock_irq(&uidhash_lock);
62590 up = uid_hash_find(uid, hashent);
62591 if (up) {
62592 + put_user_ns(ns);
62593 key_put(new->uid_keyring);
62594 key_put(new->session_keyring);
62595 kmem_cache_free(uid_cachep, new);
62596 diff -urNp linux-2.6.32.41/lib/bug.c linux-2.6.32.41/lib/bug.c
62597 --- linux-2.6.32.41/lib/bug.c 2011-03-27 14:31:47.000000000 -0400
62598 +++ linux-2.6.32.41/lib/bug.c 2011-04-17 15:56:46.000000000 -0400
62599 @@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned l
62600 return BUG_TRAP_TYPE_NONE;
62601
62602 bug = find_bug(bugaddr);
62603 + if (!bug)
62604 + return BUG_TRAP_TYPE_NONE;
62605
62606 printk(KERN_EMERG "------------[ cut here ]------------\n");
62607
62608 diff -urNp linux-2.6.32.41/lib/debugobjects.c linux-2.6.32.41/lib/debugobjects.c
62609 --- linux-2.6.32.41/lib/debugobjects.c 2011-03-27 14:31:47.000000000 -0400
62610 +++ linux-2.6.32.41/lib/debugobjects.c 2011-04-17 15:56:46.000000000 -0400
62611 @@ -277,7 +277,7 @@ static void debug_object_is_on_stack(voi
62612 if (limit > 4)
62613 return;
62614
62615 - is_on_stack = object_is_on_stack(addr);
62616 + is_on_stack = object_starts_on_stack(addr);
62617 if (is_on_stack == onstack)
62618 return;
62619
62620 diff -urNp linux-2.6.32.41/lib/dma-debug.c linux-2.6.32.41/lib/dma-debug.c
62621 --- linux-2.6.32.41/lib/dma-debug.c 2011-03-27 14:31:47.000000000 -0400
62622 +++ linux-2.6.32.41/lib/dma-debug.c 2011-04-17 15:56:46.000000000 -0400
62623 @@ -861,7 +861,7 @@ out:
62624
62625 static void check_for_stack(struct device *dev, void *addr)
62626 {
62627 - if (object_is_on_stack(addr))
62628 + if (object_starts_on_stack(addr))
62629 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
62630 "stack [addr=%p]\n", addr);
62631 }
62632 diff -urNp linux-2.6.32.41/lib/idr.c linux-2.6.32.41/lib/idr.c
62633 --- linux-2.6.32.41/lib/idr.c 2011-03-27 14:31:47.000000000 -0400
62634 +++ linux-2.6.32.41/lib/idr.c 2011-04-17 15:56:46.000000000 -0400
62635 @@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, in
62636 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
62637
62638 /* if already at the top layer, we need to grow */
62639 - if (id >= 1 << (idp->layers * IDR_BITS)) {
62640 + if (id >= (1 << (idp->layers * IDR_BITS))) {
62641 *starting_id = id;
62642 return IDR_NEED_TO_GROW;
62643 }
62644 diff -urNp linux-2.6.32.41/lib/inflate.c linux-2.6.32.41/lib/inflate.c
62645 --- linux-2.6.32.41/lib/inflate.c 2011-03-27 14:31:47.000000000 -0400
62646 +++ linux-2.6.32.41/lib/inflate.c 2011-04-17 15:56:46.000000000 -0400
62647 @@ -266,7 +266,7 @@ static void free(void *where)
62648 malloc_ptr = free_mem_ptr;
62649 }
62650 #else
62651 -#define malloc(a) kmalloc(a, GFP_KERNEL)
62652 +#define malloc(a) kmalloc((a), GFP_KERNEL)
62653 #define free(a) kfree(a)
62654 #endif
62655
62656 diff -urNp linux-2.6.32.41/lib/Kconfig.debug linux-2.6.32.41/lib/Kconfig.debug
62657 --- linux-2.6.32.41/lib/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
62658 +++ linux-2.6.32.41/lib/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
62659 @@ -905,7 +905,7 @@ config LATENCYTOP
62660 select STACKTRACE
62661 select SCHEDSTATS
62662 select SCHED_DEBUG
62663 - depends on HAVE_LATENCYTOP_SUPPORT
62664 + depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
62665 help
62666 Enable this option if you want to use the LatencyTOP tool
62667 to find out which userspace is blocking on what kernel operations.
62668 diff -urNp linux-2.6.32.41/lib/kobject.c linux-2.6.32.41/lib/kobject.c
62669 --- linux-2.6.32.41/lib/kobject.c 2011-03-27 14:31:47.000000000 -0400
62670 +++ linux-2.6.32.41/lib/kobject.c 2011-04-17 15:56:46.000000000 -0400
62671 @@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct ko
62672 return ret;
62673 }
62674
62675 -struct sysfs_ops kobj_sysfs_ops = {
62676 +const struct sysfs_ops kobj_sysfs_ops = {
62677 .show = kobj_attr_show,
62678 .store = kobj_attr_store,
62679 };
62680 @@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
62681 * If the kset was not able to be created, NULL will be returned.
62682 */
62683 static struct kset *kset_create(const char *name,
62684 - struct kset_uevent_ops *uevent_ops,
62685 + const struct kset_uevent_ops *uevent_ops,
62686 struct kobject *parent_kobj)
62687 {
62688 struct kset *kset;
62689 @@ -832,7 +832,7 @@ static struct kset *kset_create(const ch
62690 * If the kset was not able to be created, NULL will be returned.
62691 */
62692 struct kset *kset_create_and_add(const char *name,
62693 - struct kset_uevent_ops *uevent_ops,
62694 + const struct kset_uevent_ops *uevent_ops,
62695 struct kobject *parent_kobj)
62696 {
62697 struct kset *kset;
62698 diff -urNp linux-2.6.32.41/lib/kobject_uevent.c linux-2.6.32.41/lib/kobject_uevent.c
62699 --- linux-2.6.32.41/lib/kobject_uevent.c 2011-03-27 14:31:47.000000000 -0400
62700 +++ linux-2.6.32.41/lib/kobject_uevent.c 2011-04-17 15:56:46.000000000 -0400
62701 @@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *k
62702 const char *subsystem;
62703 struct kobject *top_kobj;
62704 struct kset *kset;
62705 - struct kset_uevent_ops *uevent_ops;
62706 + const struct kset_uevent_ops *uevent_ops;
62707 u64 seq;
62708 int i = 0;
62709 int retval = 0;
62710 diff -urNp linux-2.6.32.41/lib/kref.c linux-2.6.32.41/lib/kref.c
62711 --- linux-2.6.32.41/lib/kref.c 2011-03-27 14:31:47.000000000 -0400
62712 +++ linux-2.6.32.41/lib/kref.c 2011-04-17 15:56:46.000000000 -0400
62713 @@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
62714 */
62715 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
62716 {
62717 - WARN_ON(release == NULL);
62718 + BUG_ON(release == NULL);
62719 WARN_ON(release == (void (*)(struct kref *))kfree);
62720
62721 if (atomic_dec_and_test(&kref->refcount)) {
62722 diff -urNp linux-2.6.32.41/lib/parser.c linux-2.6.32.41/lib/parser.c
62723 --- linux-2.6.32.41/lib/parser.c 2011-03-27 14:31:47.000000000 -0400
62724 +++ linux-2.6.32.41/lib/parser.c 2011-04-17 15:56:46.000000000 -0400
62725 @@ -126,7 +126,7 @@ static int match_number(substring_t *s,
62726 char *buf;
62727 int ret;
62728
62729 - buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
62730 + buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
62731 if (!buf)
62732 return -ENOMEM;
62733 memcpy(buf, s->from, s->to - s->from);
62734 diff -urNp linux-2.6.32.41/lib/radix-tree.c linux-2.6.32.41/lib/radix-tree.c
62735 --- linux-2.6.32.41/lib/radix-tree.c 2011-03-27 14:31:47.000000000 -0400
62736 +++ linux-2.6.32.41/lib/radix-tree.c 2011-04-17 15:56:46.000000000 -0400
62737 @@ -81,7 +81,7 @@ struct radix_tree_preload {
62738 int nr;
62739 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
62740 };
62741 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
62742 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
62743
62744 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
62745 {
62746 diff -urNp linux-2.6.32.41/lib/random32.c linux-2.6.32.41/lib/random32.c
62747 --- linux-2.6.32.41/lib/random32.c 2011-03-27 14:31:47.000000000 -0400
62748 +++ linux-2.6.32.41/lib/random32.c 2011-04-17 15:56:46.000000000 -0400
62749 @@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *
62750 */
62751 static inline u32 __seed(u32 x, u32 m)
62752 {
62753 - return (x < m) ? x + m : x;
62754 + return (x <= m) ? x + m + 1 : x;
62755 }
62756
62757 /**
62758 diff -urNp linux-2.6.32.41/lib/vsprintf.c linux-2.6.32.41/lib/vsprintf.c
62759 --- linux-2.6.32.41/lib/vsprintf.c 2011-03-27 14:31:47.000000000 -0400
62760 +++ linux-2.6.32.41/lib/vsprintf.c 2011-04-17 15:56:46.000000000 -0400
62761 @@ -16,6 +16,9 @@
62762 * - scnprintf and vscnprintf
62763 */
62764
62765 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62766 +#define __INCLUDED_BY_HIDESYM 1
62767 +#endif
62768 #include <stdarg.h>
62769 #include <linux/module.h>
62770 #include <linux/types.h>
62771 @@ -546,12 +549,12 @@ static char *number(char *buf, char *end
62772 return buf;
62773 }
62774
62775 -static char *string(char *buf, char *end, char *s, struct printf_spec spec)
62776 +static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
62777 {
62778 int len, i;
62779
62780 if ((unsigned long)s < PAGE_SIZE)
62781 - s = "<NULL>";
62782 + s = "(null)";
62783
62784 len = strnlen(s, spec.precision);
62785
62786 @@ -581,7 +584,7 @@ static char *symbol_string(char *buf, ch
62787 unsigned long value = (unsigned long) ptr;
62788 #ifdef CONFIG_KALLSYMS
62789 char sym[KSYM_SYMBOL_LEN];
62790 - if (ext != 'f' && ext != 's')
62791 + if (ext != 'f' && ext != 's' && ext != 'a')
62792 sprint_symbol(sym, value);
62793 else
62794 kallsyms_lookup(value, NULL, NULL, NULL, sym);
62795 @@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf,
62796 * - 'f' For simple symbolic function names without offset
62797 * - 'S' For symbolic direct pointers with offset
62798 * - 's' For symbolic direct pointers without offset
62799 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
62800 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
62801 * - 'R' For a struct resource pointer, it prints the range of
62802 * addresses (not the name nor the flags)
62803 * - 'M' For a 6-byte MAC address, it prints the address in the
62804 @@ -822,7 +827,7 @@ static char *pointer(const char *fmt, ch
62805 struct printf_spec spec)
62806 {
62807 if (!ptr)
62808 - return string(buf, end, "(null)", spec);
62809 + return string(buf, end, "(nil)", spec);
62810
62811 switch (*fmt) {
62812 case 'F':
62813 @@ -831,6 +836,14 @@ static char *pointer(const char *fmt, ch
62814 case 's':
62815 /* Fallthrough */
62816 case 'S':
62817 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62818 + break;
62819 +#else
62820 + return symbol_string(buf, end, ptr, spec, *fmt);
62821 +#endif
62822 + case 'a':
62823 + /* Fallthrough */
62824 + case 'A':
62825 return symbol_string(buf, end, ptr, spec, *fmt);
62826 case 'R':
62827 return resource_string(buf, end, ptr, spec);
62828 @@ -1445,7 +1458,7 @@ do { \
62829 size_t len;
62830 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
62831 || (unsigned long)save_str < PAGE_SIZE)
62832 - save_str = "<NULL>";
62833 + save_str = "(null)";
62834 len = strlen(save_str);
62835 if (str + len + 1 < end)
62836 memcpy(str, save_str, len + 1);
62837 @@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size,
62838 typeof(type) value; \
62839 if (sizeof(type) == 8) { \
62840 args = PTR_ALIGN(args, sizeof(u32)); \
62841 - *(u32 *)&value = *(u32 *)args; \
62842 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
62843 + *(u32 *)&value = *(const u32 *)args; \
62844 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
62845 } else { \
62846 args = PTR_ALIGN(args, sizeof(type)); \
62847 - value = *(typeof(type) *)args; \
62848 + value = *(const typeof(type) *)args; \
62849 } \
62850 args += sizeof(type); \
62851 value; \
62852 @@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size,
62853 const char *str_arg = args;
62854 size_t len = strlen(str_arg);
62855 args += len + 1;
62856 - str = string(str, end, (char *)str_arg, spec);
62857 + str = string(str, end, str_arg, spec);
62858 break;
62859 }
62860
62861 diff -urNp linux-2.6.32.41/localversion-grsec linux-2.6.32.41/localversion-grsec
62862 --- linux-2.6.32.41/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
62863 +++ linux-2.6.32.41/localversion-grsec 2011-04-17 15:56:46.000000000 -0400
62864 @@ -0,0 +1 @@
62865 +-grsec
62866 diff -urNp linux-2.6.32.41/Makefile linux-2.6.32.41/Makefile
62867 --- linux-2.6.32.41/Makefile 2011-05-23 16:56:59.000000000 -0400
62868 +++ linux-2.6.32.41/Makefile 2011-06-04 20:35:20.000000000 -0400
62869 @@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
62870
62871 HOSTCC = gcc
62872 HOSTCXX = g++
62873 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
62874 -HOSTCXXFLAGS = -O2
62875 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
62876 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
62877 +HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
62878
62879 # Decide whether to build built-in, modular, or both.
62880 # Normally, just do built-in.
62881 @@ -342,10 +343,12 @@ LINUXINCLUDE := -Iinclude \
62882 KBUILD_CPPFLAGS := -D__KERNEL__
62883
62884 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
62885 + -W -Wno-unused-parameter -Wno-missing-field-initializers \
62886 -fno-strict-aliasing -fno-common \
62887 -Werror-implicit-function-declaration \
62888 -Wno-format-security \
62889 -fno-delete-null-pointer-checks
62890 +KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
62891 KBUILD_AFLAGS := -D__ASSEMBLY__
62892
62893 # Read KERNELRELEASE from include/config/kernel.release (if it exists)
62894 @@ -644,7 +647,7 @@ export mod_strip_cmd
62895
62896
62897 ifeq ($(KBUILD_EXTMOD),)
62898 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
62899 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
62900
62901 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
62902 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
62903 @@ -949,7 +952,19 @@ include/config/kernel.release: include/c
62904 # version.h and scripts_basic is processed / created.
62905
62906 # Listed in dependency order
62907 -PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3
62908 +PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3 pax-plugin
62909 +
62910 +ifeq ($(CONFIG_PAX_MEMORY_STACKLEAK),y)
62911 +KBUILD_CFLAGS += $(call cc-ifversion, -ge, 0405, -fplugin=$(objtree)/tools/gcc/pax_plugin.so -fplugin-arg-pax_plugin-track-lowest-sp=100)
62912 +endif
62913 +pax-plugin:
62914 +ifneq (,$(findstring pax_plugin, $(KBUILD_CFLAGS)))
62915 + $(Q)$(MAKE) $(build)=tools/gcc
62916 +else
62917 +ifeq ($(CONFIG_PAX_MEMORY_STACKLEAK),y)
62918 + $(Q)echo "warning, your gcc does not support plugins, PAX_MEMORY_STACKLEAK will be less secure"
62919 +endif
62920 +endif
62921
62922 # prepare3 is used to check if we are building in a separate output directory,
62923 # and if so do:
62924 @@ -970,7 +985,7 @@ ifneq ($(KBUILD_SRC),)
62925 endif
62926
62927 # prepare2 creates a makefile if using a separate output directory
62928 -prepare2: prepare3 outputmakefile
62929 +prepare2: prepare3 outputmakefile pax-plugin
62930
62931 prepare1: prepare2 include/linux/version.h include/linux/utsrelease.h \
62932 include/asm include/config/auto.conf
62933 diff -urNp linux-2.6.32.41/mm/backing-dev.c linux-2.6.32.41/mm/backing-dev.c
62934 --- linux-2.6.32.41/mm/backing-dev.c 2011-03-27 14:31:47.000000000 -0400
62935 +++ linux-2.6.32.41/mm/backing-dev.c 2011-05-04 17:56:28.000000000 -0400
62936 @@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rc
62937 * Add the default flusher task that gets created for any bdi
62938 * that has dirty data pending writeout
62939 */
62940 -void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
62941 +static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
62942 {
62943 if (!bdi_cap_writeback_dirty(bdi))
62944 return;
62945 diff -urNp linux-2.6.32.41/mm/filemap.c linux-2.6.32.41/mm/filemap.c
62946 --- linux-2.6.32.41/mm/filemap.c 2011-03-27 14:31:47.000000000 -0400
62947 +++ linux-2.6.32.41/mm/filemap.c 2011-04-17 15:56:46.000000000 -0400
62948 @@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file
62949 struct address_space *mapping = file->f_mapping;
62950
62951 if (!mapping->a_ops->readpage)
62952 - return -ENOEXEC;
62953 + return -ENODEV;
62954 file_accessed(file);
62955 vma->vm_ops = &generic_file_vm_ops;
62956 vma->vm_flags |= VM_CAN_NONLINEAR;
62957 @@ -2027,6 +2027,7 @@ inline int generic_write_checks(struct f
62958 *pos = i_size_read(inode);
62959
62960 if (limit != RLIM_INFINITY) {
62961 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
62962 if (*pos >= limit) {
62963 send_sig(SIGXFSZ, current, 0);
62964 return -EFBIG;
62965 diff -urNp linux-2.6.32.41/mm/fremap.c linux-2.6.32.41/mm/fremap.c
62966 --- linux-2.6.32.41/mm/fremap.c 2011-03-27 14:31:47.000000000 -0400
62967 +++ linux-2.6.32.41/mm/fremap.c 2011-04-17 15:56:46.000000000 -0400
62968 @@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
62969 retry:
62970 vma = find_vma(mm, start);
62971
62972 +#ifdef CONFIG_PAX_SEGMEXEC
62973 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
62974 + goto out;
62975 +#endif
62976 +
62977 /*
62978 * Make sure the vma is shared, that it supports prefaulting,
62979 * and that the remapped range is valid and fully within
62980 @@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
62981 /*
62982 * drop PG_Mlocked flag for over-mapped range
62983 */
62984 - unsigned int saved_flags = vma->vm_flags;
62985 + unsigned long saved_flags = vma->vm_flags;
62986 munlock_vma_pages_range(vma, start, start + size);
62987 vma->vm_flags = saved_flags;
62988 }
62989 diff -urNp linux-2.6.32.41/mm/highmem.c linux-2.6.32.41/mm/highmem.c
62990 --- linux-2.6.32.41/mm/highmem.c 2011-03-27 14:31:47.000000000 -0400
62991 +++ linux-2.6.32.41/mm/highmem.c 2011-04-17 15:56:46.000000000 -0400
62992 @@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
62993 * So no dangers, even with speculative execution.
62994 */
62995 page = pte_page(pkmap_page_table[i]);
62996 + pax_open_kernel();
62997 pte_clear(&init_mm, (unsigned long)page_address(page),
62998 &pkmap_page_table[i]);
62999 -
63000 + pax_close_kernel();
63001 set_page_address(page, NULL);
63002 need_flush = 1;
63003 }
63004 @@ -177,9 +178,11 @@ start:
63005 }
63006 }
63007 vaddr = PKMAP_ADDR(last_pkmap_nr);
63008 +
63009 + pax_open_kernel();
63010 set_pte_at(&init_mm, vaddr,
63011 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
63012 -
63013 + pax_close_kernel();
63014 pkmap_count[last_pkmap_nr] = 1;
63015 set_page_address(page, (void *)vaddr);
63016
63017 diff -urNp linux-2.6.32.41/mm/hugetlb.c linux-2.6.32.41/mm/hugetlb.c
63018 --- linux-2.6.32.41/mm/hugetlb.c 2011-03-27 14:31:47.000000000 -0400
63019 +++ linux-2.6.32.41/mm/hugetlb.c 2011-04-17 15:56:46.000000000 -0400
63020 @@ -1925,6 +1925,26 @@ static int unmap_ref_private(struct mm_s
63021 return 1;
63022 }
63023
63024 +#ifdef CONFIG_PAX_SEGMEXEC
63025 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
63026 +{
63027 + struct mm_struct *mm = vma->vm_mm;
63028 + struct vm_area_struct *vma_m;
63029 + unsigned long address_m;
63030 + pte_t *ptep_m;
63031 +
63032 + vma_m = pax_find_mirror_vma(vma);
63033 + if (!vma_m)
63034 + return;
63035 +
63036 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63037 + address_m = address + SEGMEXEC_TASK_SIZE;
63038 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
63039 + get_page(page_m);
63040 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
63041 +}
63042 +#endif
63043 +
63044 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
63045 unsigned long address, pte_t *ptep, pte_t pte,
63046 struct page *pagecache_page)
63047 @@ -1996,6 +2016,11 @@ retry_avoidcopy:
63048 huge_ptep_clear_flush(vma, address, ptep);
63049 set_huge_pte_at(mm, address, ptep,
63050 make_huge_pte(vma, new_page, 1));
63051 +
63052 +#ifdef CONFIG_PAX_SEGMEXEC
63053 + pax_mirror_huge_pte(vma, address, new_page);
63054 +#endif
63055 +
63056 /* Make the old page be freed below */
63057 new_page = old_page;
63058 }
63059 @@ -2127,6 +2152,10 @@ retry:
63060 && (vma->vm_flags & VM_SHARED)));
63061 set_huge_pte_at(mm, address, ptep, new_pte);
63062
63063 +#ifdef CONFIG_PAX_SEGMEXEC
63064 + pax_mirror_huge_pte(vma, address, page);
63065 +#endif
63066 +
63067 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
63068 /* Optimization, do the COW without a second fault */
63069 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
63070 @@ -2155,6 +2184,28 @@ int hugetlb_fault(struct mm_struct *mm,
63071 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
63072 struct hstate *h = hstate_vma(vma);
63073
63074 +#ifdef CONFIG_PAX_SEGMEXEC
63075 + struct vm_area_struct *vma_m;
63076 +
63077 + vma_m = pax_find_mirror_vma(vma);
63078 + if (vma_m) {
63079 + unsigned long address_m;
63080 +
63081 + if (vma->vm_start > vma_m->vm_start) {
63082 + address_m = address;
63083 + address -= SEGMEXEC_TASK_SIZE;
63084 + vma = vma_m;
63085 + h = hstate_vma(vma);
63086 + } else
63087 + address_m = address + SEGMEXEC_TASK_SIZE;
63088 +
63089 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
63090 + return VM_FAULT_OOM;
63091 + address_m &= HPAGE_MASK;
63092 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
63093 + }
63094 +#endif
63095 +
63096 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
63097 if (!ptep)
63098 return VM_FAULT_OOM;
63099 diff -urNp linux-2.6.32.41/mm/Kconfig linux-2.6.32.41/mm/Kconfig
63100 --- linux-2.6.32.41/mm/Kconfig 2011-03-27 14:31:47.000000000 -0400
63101 +++ linux-2.6.32.41/mm/Kconfig 2011-04-17 15:56:46.000000000 -0400
63102 @@ -228,7 +228,7 @@ config KSM
63103 config DEFAULT_MMAP_MIN_ADDR
63104 int "Low address space to protect from user allocation"
63105 depends on MMU
63106 - default 4096
63107 + default 65536
63108 help
63109 This is the portion of low virtual memory which should be protected
63110 from userspace allocation. Keeping a user from writing to low pages
63111 diff -urNp linux-2.6.32.41/mm/kmemleak.c linux-2.6.32.41/mm/kmemleak.c
63112 --- linux-2.6.32.41/mm/kmemleak.c 2011-03-27 14:31:47.000000000 -0400
63113 +++ linux-2.6.32.41/mm/kmemleak.c 2011-04-17 15:56:46.000000000 -0400
63114 @@ -358,7 +358,7 @@ static void print_unreferenced(struct se
63115
63116 for (i = 0; i < object->trace_len; i++) {
63117 void *ptr = (void *)object->trace[i];
63118 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
63119 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
63120 }
63121 }
63122
63123 diff -urNp linux-2.6.32.41/mm/maccess.c linux-2.6.32.41/mm/maccess.c
63124 --- linux-2.6.32.41/mm/maccess.c 2011-03-27 14:31:47.000000000 -0400
63125 +++ linux-2.6.32.41/mm/maccess.c 2011-04-17 15:56:46.000000000 -0400
63126 @@ -14,7 +14,7 @@
63127 * Safely read from address @src to the buffer at @dst. If a kernel fault
63128 * happens, handle that and return -EFAULT.
63129 */
63130 -long probe_kernel_read(void *dst, void *src, size_t size)
63131 +long probe_kernel_read(void *dst, const void *src, size_t size)
63132 {
63133 long ret;
63134 mm_segment_t old_fs = get_fs();
63135 @@ -39,7 +39,7 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
63136 * Safely write to address @dst from the buffer at @src. If a kernel fault
63137 * happens, handle that and return -EFAULT.
63138 */
63139 -long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
63140 +long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
63141 {
63142 long ret;
63143 mm_segment_t old_fs = get_fs();
63144 diff -urNp linux-2.6.32.41/mm/madvise.c linux-2.6.32.41/mm/madvise.c
63145 --- linux-2.6.32.41/mm/madvise.c 2011-03-27 14:31:47.000000000 -0400
63146 +++ linux-2.6.32.41/mm/madvise.c 2011-04-17 15:56:46.000000000 -0400
63147 @@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_a
63148 pgoff_t pgoff;
63149 unsigned long new_flags = vma->vm_flags;
63150
63151 +#ifdef CONFIG_PAX_SEGMEXEC
63152 + struct vm_area_struct *vma_m;
63153 +#endif
63154 +
63155 switch (behavior) {
63156 case MADV_NORMAL:
63157 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
63158 @@ -103,6 +107,13 @@ success:
63159 /*
63160 * vm_flags is protected by the mmap_sem held in write mode.
63161 */
63162 +
63163 +#ifdef CONFIG_PAX_SEGMEXEC
63164 + vma_m = pax_find_mirror_vma(vma);
63165 + if (vma_m)
63166 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
63167 +#endif
63168 +
63169 vma->vm_flags = new_flags;
63170
63171 out:
63172 @@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_a
63173 struct vm_area_struct ** prev,
63174 unsigned long start, unsigned long end)
63175 {
63176 +
63177 +#ifdef CONFIG_PAX_SEGMEXEC
63178 + struct vm_area_struct *vma_m;
63179 +#endif
63180 +
63181 *prev = vma;
63182 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
63183 return -EINVAL;
63184 @@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_a
63185 zap_page_range(vma, start, end - start, &details);
63186 } else
63187 zap_page_range(vma, start, end - start, NULL);
63188 +
63189 +#ifdef CONFIG_PAX_SEGMEXEC
63190 + vma_m = pax_find_mirror_vma(vma);
63191 + if (vma_m) {
63192 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
63193 + struct zap_details details = {
63194 + .nonlinear_vma = vma_m,
63195 + .last_index = ULONG_MAX,
63196 + };
63197 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
63198 + } else
63199 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
63200 + }
63201 +#endif
63202 +
63203 return 0;
63204 }
63205
63206 @@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
63207 if (end < start)
63208 goto out;
63209
63210 +#ifdef CONFIG_PAX_SEGMEXEC
63211 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
63212 + if (end > SEGMEXEC_TASK_SIZE)
63213 + goto out;
63214 + } else
63215 +#endif
63216 +
63217 + if (end > TASK_SIZE)
63218 + goto out;
63219 +
63220 error = 0;
63221 if (end == start)
63222 goto out;
63223 diff -urNp linux-2.6.32.41/mm/memory.c linux-2.6.32.41/mm/memory.c
63224 --- linux-2.6.32.41/mm/memory.c 2011-03-27 14:31:47.000000000 -0400
63225 +++ linux-2.6.32.41/mm/memory.c 2011-04-17 15:56:46.000000000 -0400
63226 @@ -187,8 +187,12 @@ static inline void free_pmd_range(struct
63227 return;
63228
63229 pmd = pmd_offset(pud, start);
63230 +
63231 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
63232 pud_clear(pud);
63233 pmd_free_tlb(tlb, pmd, start);
63234 +#endif
63235 +
63236 }
63237
63238 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
63239 @@ -219,9 +223,12 @@ static inline void free_pud_range(struct
63240 if (end - 1 > ceiling - 1)
63241 return;
63242
63243 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
63244 pud = pud_offset(pgd, start);
63245 pgd_clear(pgd);
63246 pud_free_tlb(tlb, pud, start);
63247 +#endif
63248 +
63249 }
63250
63251 /*
63252 @@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct
63253 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
63254 i = 0;
63255
63256 - do {
63257 + while (nr_pages) {
63258 struct vm_area_struct *vma;
63259
63260 - vma = find_extend_vma(mm, start);
63261 + vma = find_vma(mm, start);
63262 if (!vma && in_gate_area(tsk, start)) {
63263 unsigned long pg = start & PAGE_MASK;
63264 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
63265 @@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct
63266 continue;
63267 }
63268
63269 - if (!vma ||
63270 + if (!vma || start < vma->vm_start ||
63271 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
63272 !(vm_flags & vma->vm_flags))
63273 return i ? : -EFAULT;
63274 @@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct
63275 start += PAGE_SIZE;
63276 nr_pages--;
63277 } while (nr_pages && start < vma->vm_end);
63278 - } while (nr_pages);
63279 + }
63280 return i;
63281 }
63282
63283 @@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_st
63284 page_add_file_rmap(page);
63285 set_pte_at(mm, addr, pte, mk_pte(page, prot));
63286
63287 +#ifdef CONFIG_PAX_SEGMEXEC
63288 + pax_mirror_file_pte(vma, addr, page, ptl);
63289 +#endif
63290 +
63291 retval = 0;
63292 pte_unmap_unlock(pte, ptl);
63293 return retval;
63294 @@ -1560,10 +1571,22 @@ out:
63295 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
63296 struct page *page)
63297 {
63298 +
63299 +#ifdef CONFIG_PAX_SEGMEXEC
63300 + struct vm_area_struct *vma_m;
63301 +#endif
63302 +
63303 if (addr < vma->vm_start || addr >= vma->vm_end)
63304 return -EFAULT;
63305 if (!page_count(page))
63306 return -EINVAL;
63307 +
63308 +#ifdef CONFIG_PAX_SEGMEXEC
63309 + vma_m = pax_find_mirror_vma(vma);
63310 + if (vma_m)
63311 + vma_m->vm_flags |= VM_INSERTPAGE;
63312 +#endif
63313 +
63314 vma->vm_flags |= VM_INSERTPAGE;
63315 return insert_page(vma, addr, page, vma->vm_page_prot);
63316 }
63317 @@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struc
63318 unsigned long pfn)
63319 {
63320 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
63321 + BUG_ON(vma->vm_mirror);
63322
63323 if (addr < vma->vm_start || addr >= vma->vm_end)
63324 return -EFAULT;
63325 @@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct
63326 copy_user_highpage(dst, src, va, vma);
63327 }
63328
63329 +#ifdef CONFIG_PAX_SEGMEXEC
63330 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
63331 +{
63332 + struct mm_struct *mm = vma->vm_mm;
63333 + spinlock_t *ptl;
63334 + pte_t *pte, entry;
63335 +
63336 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
63337 + entry = *pte;
63338 + if (!pte_present(entry)) {
63339 + if (!pte_none(entry)) {
63340 + BUG_ON(pte_file(entry));
63341 + free_swap_and_cache(pte_to_swp_entry(entry));
63342 + pte_clear_not_present_full(mm, address, pte, 0);
63343 + }
63344 + } else {
63345 + struct page *page;
63346 +
63347 + flush_cache_page(vma, address, pte_pfn(entry));
63348 + entry = ptep_clear_flush(vma, address, pte);
63349 + BUG_ON(pte_dirty(entry));
63350 + page = vm_normal_page(vma, address, entry);
63351 + if (page) {
63352 + update_hiwater_rss(mm);
63353 + if (PageAnon(page))
63354 + dec_mm_counter(mm, anon_rss);
63355 + else
63356 + dec_mm_counter(mm, file_rss);
63357 + page_remove_rmap(page);
63358 + page_cache_release(page);
63359 + }
63360 + }
63361 + pte_unmap_unlock(pte, ptl);
63362 +}
63363 +
63364 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
63365 + *
63366 + * the ptl of the lower mapped page is held on entry and is not released on exit
63367 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
63368 + */
63369 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
63370 +{
63371 + struct mm_struct *mm = vma->vm_mm;
63372 + unsigned long address_m;
63373 + spinlock_t *ptl_m;
63374 + struct vm_area_struct *vma_m;
63375 + pmd_t *pmd_m;
63376 + pte_t *pte_m, entry_m;
63377 +
63378 + BUG_ON(!page_m || !PageAnon(page_m));
63379 +
63380 + vma_m = pax_find_mirror_vma(vma);
63381 + if (!vma_m)
63382 + return;
63383 +
63384 + BUG_ON(!PageLocked(page_m));
63385 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63386 + address_m = address + SEGMEXEC_TASK_SIZE;
63387 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
63388 + pte_m = pte_offset_map_nested(pmd_m, address_m);
63389 + ptl_m = pte_lockptr(mm, pmd_m);
63390 + if (ptl != ptl_m) {
63391 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
63392 + if (!pte_none(*pte_m))
63393 + goto out;
63394 + }
63395 +
63396 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
63397 + page_cache_get(page_m);
63398 + page_add_anon_rmap(page_m, vma_m, address_m);
63399 + inc_mm_counter(mm, anon_rss);
63400 + set_pte_at(mm, address_m, pte_m, entry_m);
63401 + update_mmu_cache(vma_m, address_m, entry_m);
63402 +out:
63403 + if (ptl != ptl_m)
63404 + spin_unlock(ptl_m);
63405 + pte_unmap_nested(pte_m);
63406 + unlock_page(page_m);
63407 +}
63408 +
63409 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
63410 +{
63411 + struct mm_struct *mm = vma->vm_mm;
63412 + unsigned long address_m;
63413 + spinlock_t *ptl_m;
63414 + struct vm_area_struct *vma_m;
63415 + pmd_t *pmd_m;
63416 + pte_t *pte_m, entry_m;
63417 +
63418 + BUG_ON(!page_m || PageAnon(page_m));
63419 +
63420 + vma_m = pax_find_mirror_vma(vma);
63421 + if (!vma_m)
63422 + return;
63423 +
63424 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63425 + address_m = address + SEGMEXEC_TASK_SIZE;
63426 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
63427 + pte_m = pte_offset_map_nested(pmd_m, address_m);
63428 + ptl_m = pte_lockptr(mm, pmd_m);
63429 + if (ptl != ptl_m) {
63430 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
63431 + if (!pte_none(*pte_m))
63432 + goto out;
63433 + }
63434 +
63435 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
63436 + page_cache_get(page_m);
63437 + page_add_file_rmap(page_m);
63438 + inc_mm_counter(mm, file_rss);
63439 + set_pte_at(mm, address_m, pte_m, entry_m);
63440 + update_mmu_cache(vma_m, address_m, entry_m);
63441 +out:
63442 + if (ptl != ptl_m)
63443 + spin_unlock(ptl_m);
63444 + pte_unmap_nested(pte_m);
63445 +}
63446 +
63447 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
63448 +{
63449 + struct mm_struct *mm = vma->vm_mm;
63450 + unsigned long address_m;
63451 + spinlock_t *ptl_m;
63452 + struct vm_area_struct *vma_m;
63453 + pmd_t *pmd_m;
63454 + pte_t *pte_m, entry_m;
63455 +
63456 + vma_m = pax_find_mirror_vma(vma);
63457 + if (!vma_m)
63458 + return;
63459 +
63460 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63461 + address_m = address + SEGMEXEC_TASK_SIZE;
63462 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
63463 + pte_m = pte_offset_map_nested(pmd_m, address_m);
63464 + ptl_m = pte_lockptr(mm, pmd_m);
63465 + if (ptl != ptl_m) {
63466 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
63467 + if (!pte_none(*pte_m))
63468 + goto out;
63469 + }
63470 +
63471 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
63472 + set_pte_at(mm, address_m, pte_m, entry_m);
63473 +out:
63474 + if (ptl != ptl_m)
63475 + spin_unlock(ptl_m);
63476 + pte_unmap_nested(pte_m);
63477 +}
63478 +
63479 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
63480 +{
63481 + struct page *page_m;
63482 + pte_t entry;
63483 +
63484 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
63485 + goto out;
63486 +
63487 + entry = *pte;
63488 + page_m = vm_normal_page(vma, address, entry);
63489 + if (!page_m)
63490 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
63491 + else if (PageAnon(page_m)) {
63492 + if (pax_find_mirror_vma(vma)) {
63493 + pte_unmap_unlock(pte, ptl);
63494 + lock_page(page_m);
63495 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
63496 + if (pte_same(entry, *pte))
63497 + pax_mirror_anon_pte(vma, address, page_m, ptl);
63498 + else
63499 + unlock_page(page_m);
63500 + }
63501 + } else
63502 + pax_mirror_file_pte(vma, address, page_m, ptl);
63503 +
63504 +out:
63505 + pte_unmap_unlock(pte, ptl);
63506 +}
63507 +#endif
63508 +
63509 /*
63510 * This routine handles present pages, when users try to write
63511 * to a shared page. It is done by copying the page to a new address
63512 @@ -2156,6 +2360,12 @@ gotten:
63513 */
63514 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
63515 if (likely(pte_same(*page_table, orig_pte))) {
63516 +
63517 +#ifdef CONFIG_PAX_SEGMEXEC
63518 + if (pax_find_mirror_vma(vma))
63519 + BUG_ON(!trylock_page(new_page));
63520 +#endif
63521 +
63522 if (old_page) {
63523 if (!PageAnon(old_page)) {
63524 dec_mm_counter(mm, file_rss);
63525 @@ -2207,6 +2417,10 @@ gotten:
63526 page_remove_rmap(old_page);
63527 }
63528
63529 +#ifdef CONFIG_PAX_SEGMEXEC
63530 + pax_mirror_anon_pte(vma, address, new_page, ptl);
63531 +#endif
63532 +
63533 /* Free the old page.. */
63534 new_page = old_page;
63535 ret |= VM_FAULT_WRITE;
63536 @@ -2604,6 +2818,11 @@ static int do_swap_page(struct mm_struct
63537 swap_free(entry);
63538 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
63539 try_to_free_swap(page);
63540 +
63541 +#ifdef CONFIG_PAX_SEGMEXEC
63542 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
63543 +#endif
63544 +
63545 unlock_page(page);
63546
63547 if (flags & FAULT_FLAG_WRITE) {
63548 @@ -2615,6 +2834,11 @@ static int do_swap_page(struct mm_struct
63549
63550 /* No need to invalidate - it was non-present before */
63551 update_mmu_cache(vma, address, pte);
63552 +
63553 +#ifdef CONFIG_PAX_SEGMEXEC
63554 + pax_mirror_anon_pte(vma, address, page, ptl);
63555 +#endif
63556 +
63557 unlock:
63558 pte_unmap_unlock(page_table, ptl);
63559 out:
63560 @@ -2630,40 +2854,6 @@ out_release:
63561 }
63562
63563 /*
63564 - * This is like a special single-page "expand_{down|up}wards()",
63565 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
63566 - * doesn't hit another vma.
63567 - */
63568 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
63569 -{
63570 - address &= PAGE_MASK;
63571 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
63572 - struct vm_area_struct *prev = vma->vm_prev;
63573 -
63574 - /*
63575 - * Is there a mapping abutting this one below?
63576 - *
63577 - * That's only ok if it's the same stack mapping
63578 - * that has gotten split..
63579 - */
63580 - if (prev && prev->vm_end == address)
63581 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
63582 -
63583 - expand_stack(vma, address - PAGE_SIZE);
63584 - }
63585 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
63586 - struct vm_area_struct *next = vma->vm_next;
63587 -
63588 - /* As VM_GROWSDOWN but s/below/above/ */
63589 - if (next && next->vm_start == address + PAGE_SIZE)
63590 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
63591 -
63592 - expand_upwards(vma, address + PAGE_SIZE);
63593 - }
63594 - return 0;
63595 -}
63596 -
63597 -/*
63598 * We enter with non-exclusive mmap_sem (to exclude vma changes,
63599 * but allow concurrent faults), and pte mapped but not yet locked.
63600 * We return with mmap_sem still held, but pte unmapped and unlocked.
63601 @@ -2672,27 +2862,23 @@ static int do_anonymous_page(struct mm_s
63602 unsigned long address, pte_t *page_table, pmd_t *pmd,
63603 unsigned int flags)
63604 {
63605 - struct page *page;
63606 + struct page *page = NULL;
63607 spinlock_t *ptl;
63608 pte_t entry;
63609
63610 - pte_unmap(page_table);
63611 -
63612 - /* Check if we need to add a guard page to the stack */
63613 - if (check_stack_guard_page(vma, address) < 0)
63614 - return VM_FAULT_SIGBUS;
63615 -
63616 - /* Use the zero-page for reads */
63617 if (!(flags & FAULT_FLAG_WRITE)) {
63618 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
63619 vma->vm_page_prot));
63620 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
63621 + ptl = pte_lockptr(mm, pmd);
63622 + spin_lock(ptl);
63623 if (!pte_none(*page_table))
63624 goto unlock;
63625 goto setpte;
63626 }
63627
63628 /* Allocate our own private page. */
63629 + pte_unmap(page_table);
63630 +
63631 if (unlikely(anon_vma_prepare(vma)))
63632 goto oom;
63633 page = alloc_zeroed_user_highpage_movable(vma, address);
63634 @@ -2711,6 +2897,11 @@ static int do_anonymous_page(struct mm_s
63635 if (!pte_none(*page_table))
63636 goto release;
63637
63638 +#ifdef CONFIG_PAX_SEGMEXEC
63639 + if (pax_find_mirror_vma(vma))
63640 + BUG_ON(!trylock_page(page));
63641 +#endif
63642 +
63643 inc_mm_counter(mm, anon_rss);
63644 page_add_new_anon_rmap(page, vma, address);
63645 setpte:
63646 @@ -2718,6 +2909,12 @@ setpte:
63647
63648 /* No need to invalidate - it was non-present before */
63649 update_mmu_cache(vma, address, entry);
63650 +
63651 +#ifdef CONFIG_PAX_SEGMEXEC
63652 + if (page)
63653 + pax_mirror_anon_pte(vma, address, page, ptl);
63654 +#endif
63655 +
63656 unlock:
63657 pte_unmap_unlock(page_table, ptl);
63658 return 0;
63659 @@ -2860,6 +3057,12 @@ static int __do_fault(struct mm_struct *
63660 */
63661 /* Only go through if we didn't race with anybody else... */
63662 if (likely(pte_same(*page_table, orig_pte))) {
63663 +
63664 +#ifdef CONFIG_PAX_SEGMEXEC
63665 + if (anon && pax_find_mirror_vma(vma))
63666 + BUG_ON(!trylock_page(page));
63667 +#endif
63668 +
63669 flush_icache_page(vma, page);
63670 entry = mk_pte(page, vma->vm_page_prot);
63671 if (flags & FAULT_FLAG_WRITE)
63672 @@ -2879,6 +3082,14 @@ static int __do_fault(struct mm_struct *
63673
63674 /* no need to invalidate: a not-present page won't be cached */
63675 update_mmu_cache(vma, address, entry);
63676 +
63677 +#ifdef CONFIG_PAX_SEGMEXEC
63678 + if (anon)
63679 + pax_mirror_anon_pte(vma, address, page, ptl);
63680 + else
63681 + pax_mirror_file_pte(vma, address, page, ptl);
63682 +#endif
63683 +
63684 } else {
63685 if (charged)
63686 mem_cgroup_uncharge_page(page);
63687 @@ -3026,6 +3237,12 @@ static inline int handle_pte_fault(struc
63688 if (flags & FAULT_FLAG_WRITE)
63689 flush_tlb_page(vma, address);
63690 }
63691 +
63692 +#ifdef CONFIG_PAX_SEGMEXEC
63693 + pax_mirror_pte(vma, address, pte, pmd, ptl);
63694 + return 0;
63695 +#endif
63696 +
63697 unlock:
63698 pte_unmap_unlock(pte, ptl);
63699 return 0;
63700 @@ -3042,6 +3259,10 @@ int handle_mm_fault(struct mm_struct *mm
63701 pmd_t *pmd;
63702 pte_t *pte;
63703
63704 +#ifdef CONFIG_PAX_SEGMEXEC
63705 + struct vm_area_struct *vma_m;
63706 +#endif
63707 +
63708 __set_current_state(TASK_RUNNING);
63709
63710 count_vm_event(PGFAULT);
63711 @@ -3049,6 +3270,34 @@ int handle_mm_fault(struct mm_struct *mm
63712 if (unlikely(is_vm_hugetlb_page(vma)))
63713 return hugetlb_fault(mm, vma, address, flags);
63714
63715 +#ifdef CONFIG_PAX_SEGMEXEC
63716 + vma_m = pax_find_mirror_vma(vma);
63717 + if (vma_m) {
63718 + unsigned long address_m;
63719 + pgd_t *pgd_m;
63720 + pud_t *pud_m;
63721 + pmd_t *pmd_m;
63722 +
63723 + if (vma->vm_start > vma_m->vm_start) {
63724 + address_m = address;
63725 + address -= SEGMEXEC_TASK_SIZE;
63726 + vma = vma_m;
63727 + } else
63728 + address_m = address + SEGMEXEC_TASK_SIZE;
63729 +
63730 + pgd_m = pgd_offset(mm, address_m);
63731 + pud_m = pud_alloc(mm, pgd_m, address_m);
63732 + if (!pud_m)
63733 + return VM_FAULT_OOM;
63734 + pmd_m = pmd_alloc(mm, pud_m, address_m);
63735 + if (!pmd_m)
63736 + return VM_FAULT_OOM;
63737 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
63738 + return VM_FAULT_OOM;
63739 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
63740 + }
63741 +#endif
63742 +
63743 pgd = pgd_offset(mm, address);
63744 pud = pud_alloc(mm, pgd, address);
63745 if (!pud)
63746 @@ -3146,7 +3395,7 @@ static int __init gate_vma_init(void)
63747 gate_vma.vm_start = FIXADDR_USER_START;
63748 gate_vma.vm_end = FIXADDR_USER_END;
63749 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
63750 - gate_vma.vm_page_prot = __P101;
63751 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
63752 /*
63753 * Make sure the vDSO gets into every core dump.
63754 * Dumping its contents makes post-mortem fully interpretable later
63755 diff -urNp linux-2.6.32.41/mm/memory-failure.c linux-2.6.32.41/mm/memory-failure.c
63756 --- linux-2.6.32.41/mm/memory-failure.c 2011-03-27 14:31:47.000000000 -0400
63757 +++ linux-2.6.32.41/mm/memory-failure.c 2011-04-17 15:56:46.000000000 -0400
63758 @@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __r
63759
63760 int sysctl_memory_failure_recovery __read_mostly = 1;
63761
63762 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
63763 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
63764
63765 /*
63766 * Send all the processes who have the page mapped an ``action optional''
63767 @@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn,
63768 return 0;
63769 }
63770
63771 - atomic_long_add(1, &mce_bad_pages);
63772 + atomic_long_add_unchecked(1, &mce_bad_pages);
63773
63774 /*
63775 * We need/can do nothing about count=0 pages.
63776 diff -urNp linux-2.6.32.41/mm/mempolicy.c linux-2.6.32.41/mm/mempolicy.c
63777 --- linux-2.6.32.41/mm/mempolicy.c 2011-03-27 14:31:47.000000000 -0400
63778 +++ linux-2.6.32.41/mm/mempolicy.c 2011-04-17 15:56:46.000000000 -0400
63779 @@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_st
63780 struct vm_area_struct *next;
63781 int err;
63782
63783 +#ifdef CONFIG_PAX_SEGMEXEC
63784 + struct vm_area_struct *vma_m;
63785 +#endif
63786 +
63787 err = 0;
63788 for (; vma && vma->vm_start < end; vma = next) {
63789 next = vma->vm_next;
63790 @@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_st
63791 err = policy_vma(vma, new);
63792 if (err)
63793 break;
63794 +
63795 +#ifdef CONFIG_PAX_SEGMEXEC
63796 + vma_m = pax_find_mirror_vma(vma);
63797 + if (vma_m) {
63798 + err = policy_vma(vma_m, new);
63799 + if (err)
63800 + break;
63801 + }
63802 +#endif
63803 +
63804 }
63805 return err;
63806 }
63807 @@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start
63808
63809 if (end < start)
63810 return -EINVAL;
63811 +
63812 +#ifdef CONFIG_PAX_SEGMEXEC
63813 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
63814 + if (end > SEGMEXEC_TASK_SIZE)
63815 + return -EINVAL;
63816 + } else
63817 +#endif
63818 +
63819 + if (end > TASK_SIZE)
63820 + return -EINVAL;
63821 +
63822 if (end == start)
63823 return 0;
63824
63825 @@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
63826 if (!mm)
63827 return -EINVAL;
63828
63829 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63830 + if (mm != current->mm &&
63831 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
63832 + err = -EPERM;
63833 + goto out;
63834 + }
63835 +#endif
63836 +
63837 /*
63838 * Check if this process has the right to modify the specified
63839 * process. The right exists if the process has administrative
63840 @@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
63841 rcu_read_lock();
63842 tcred = __task_cred(task);
63843 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
63844 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
63845 - !capable(CAP_SYS_NICE)) {
63846 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
63847 rcu_read_unlock();
63848 err = -EPERM;
63849 goto out;
63850 @@ -2396,7 +2428,7 @@ int show_numa_map(struct seq_file *m, vo
63851
63852 if (file) {
63853 seq_printf(m, " file=");
63854 - seq_path(m, &file->f_path, "\n\t= ");
63855 + seq_path(m, &file->f_path, "\n\t\\= ");
63856 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
63857 seq_printf(m, " heap");
63858 } else if (vma->vm_start <= mm->start_stack &&
63859 diff -urNp linux-2.6.32.41/mm/migrate.c linux-2.6.32.41/mm/migrate.c
63860 --- linux-2.6.32.41/mm/migrate.c 2011-03-27 14:31:47.000000000 -0400
63861 +++ linux-2.6.32.41/mm/migrate.c 2011-05-16 21:46:57.000000000 -0400
63862 @@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struc
63863 unsigned long chunk_start;
63864 int err;
63865
63866 + pax_track_stack();
63867 +
63868 task_nodes = cpuset_mems_allowed(task);
63869
63870 err = -ENOMEM;
63871 @@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
63872 if (!mm)
63873 return -EINVAL;
63874
63875 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63876 + if (mm != current->mm &&
63877 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
63878 + err = -EPERM;
63879 + goto out;
63880 + }
63881 +#endif
63882 +
63883 /*
63884 * Check if this process has the right to modify the specified
63885 * process. The right exists if the process has administrative
63886 @@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
63887 rcu_read_lock();
63888 tcred = __task_cred(task);
63889 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
63890 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
63891 - !capable(CAP_SYS_NICE)) {
63892 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
63893 rcu_read_unlock();
63894 err = -EPERM;
63895 goto out;
63896 diff -urNp linux-2.6.32.41/mm/mlock.c linux-2.6.32.41/mm/mlock.c
63897 --- linux-2.6.32.41/mm/mlock.c 2011-03-27 14:31:47.000000000 -0400
63898 +++ linux-2.6.32.41/mm/mlock.c 2011-04-17 15:56:46.000000000 -0400
63899 @@ -13,6 +13,7 @@
63900 #include <linux/pagemap.h>
63901 #include <linux/mempolicy.h>
63902 #include <linux/syscalls.h>
63903 +#include <linux/security.h>
63904 #include <linux/sched.h>
63905 #include <linux/module.h>
63906 #include <linux/rmap.h>
63907 @@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
63908 }
63909 }
63910
63911 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
63912 -{
63913 - return (vma->vm_flags & VM_GROWSDOWN) &&
63914 - (vma->vm_start == addr) &&
63915 - !vma_stack_continue(vma->vm_prev, addr);
63916 -}
63917 -
63918 /**
63919 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
63920 * @vma: target vma
63921 @@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(stru
63922 if (vma->vm_flags & VM_WRITE)
63923 gup_flags |= FOLL_WRITE;
63924
63925 - /* We don't try to access the guard page of a stack vma */
63926 - if (stack_guard_page(vma, start)) {
63927 - addr += PAGE_SIZE;
63928 - nr_pages--;
63929 - }
63930 -
63931 while (nr_pages > 0) {
63932 int i;
63933
63934 @@ -440,7 +428,7 @@ static int do_mlock(unsigned long start,
63935 {
63936 unsigned long nstart, end, tmp;
63937 struct vm_area_struct * vma, * prev;
63938 - int error;
63939 + int error = -EINVAL;
63940
63941 len = PAGE_ALIGN(len);
63942 end = start + len;
63943 @@ -448,6 +436,9 @@ static int do_mlock(unsigned long start,
63944 return -EINVAL;
63945 if (end == start)
63946 return 0;
63947 + if (end > TASK_SIZE)
63948 + return -EINVAL;
63949 +
63950 vma = find_vma_prev(current->mm, start, &prev);
63951 if (!vma || vma->vm_start > start)
63952 return -ENOMEM;
63953 @@ -458,6 +449,11 @@ static int do_mlock(unsigned long start,
63954 for (nstart = start ; ; ) {
63955 unsigned int newflags;
63956
63957 +#ifdef CONFIG_PAX_SEGMEXEC
63958 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
63959 + break;
63960 +#endif
63961 +
63962 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
63963
63964 newflags = vma->vm_flags | VM_LOCKED;
63965 @@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
63966 lock_limit >>= PAGE_SHIFT;
63967
63968 /* check against resource limits */
63969 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
63970 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
63971 error = do_mlock(start, len, 1);
63972 up_write(&current->mm->mmap_sem);
63973 @@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
63974 static int do_mlockall(int flags)
63975 {
63976 struct vm_area_struct * vma, * prev = NULL;
63977 - unsigned int def_flags = 0;
63978
63979 if (flags & MCL_FUTURE)
63980 - def_flags = VM_LOCKED;
63981 - current->mm->def_flags = def_flags;
63982 + current->mm->def_flags |= VM_LOCKED;
63983 + else
63984 + current->mm->def_flags &= ~VM_LOCKED;
63985 if (flags == MCL_FUTURE)
63986 goto out;
63987
63988 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
63989 - unsigned int newflags;
63990 + unsigned long newflags;
63991 +
63992 +#ifdef CONFIG_PAX_SEGMEXEC
63993 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
63994 + break;
63995 +#endif
63996
63997 + BUG_ON(vma->vm_end > TASK_SIZE);
63998 newflags = vma->vm_flags | VM_LOCKED;
63999 if (!(flags & MCL_CURRENT))
64000 newflags &= ~VM_LOCKED;
64001 @@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
64002 lock_limit >>= PAGE_SHIFT;
64003
64004 ret = -ENOMEM;
64005 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
64006 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
64007 capable(CAP_IPC_LOCK))
64008 ret = do_mlockall(flags);
64009 diff -urNp linux-2.6.32.41/mm/mmap.c linux-2.6.32.41/mm/mmap.c
64010 --- linux-2.6.32.41/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
64011 +++ linux-2.6.32.41/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
64012 @@ -45,6 +45,16 @@
64013 #define arch_rebalance_pgtables(addr, len) (addr)
64014 #endif
64015
64016 +static inline void verify_mm_writelocked(struct mm_struct *mm)
64017 +{
64018 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
64019 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
64020 + up_read(&mm->mmap_sem);
64021 + BUG();
64022 + }
64023 +#endif
64024 +}
64025 +
64026 static void unmap_region(struct mm_struct *mm,
64027 struct vm_area_struct *vma, struct vm_area_struct *prev,
64028 unsigned long start, unsigned long end);
64029 @@ -70,22 +80,32 @@ static void unmap_region(struct mm_struc
64030 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
64031 *
64032 */
64033 -pgprot_t protection_map[16] = {
64034 +pgprot_t protection_map[16] __read_only = {
64035 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
64036 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
64037 };
64038
64039 pgprot_t vm_get_page_prot(unsigned long vm_flags)
64040 {
64041 - return __pgprot(pgprot_val(protection_map[vm_flags &
64042 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
64043 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
64044 pgprot_val(arch_vm_get_page_prot(vm_flags)));
64045 +
64046 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
64047 + if (!nx_enabled &&
64048 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
64049 + (vm_flags & (VM_READ | VM_WRITE)))
64050 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
64051 +#endif
64052 +
64053 + return prot;
64054 }
64055 EXPORT_SYMBOL(vm_get_page_prot);
64056
64057 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
64058 int sysctl_overcommit_ratio = 50; /* default is 50% */
64059 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
64060 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
64061 struct percpu_counter vm_committed_as;
64062
64063 /*
64064 @@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma
64065 struct vm_area_struct *next = vma->vm_next;
64066
64067 might_sleep();
64068 + BUG_ON(vma->vm_mirror);
64069 if (vma->vm_ops && vma->vm_ops->close)
64070 vma->vm_ops->close(vma);
64071 if (vma->vm_file) {
64072 @@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
64073 * not page aligned -Ram Gupta
64074 */
64075 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
64076 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
64077 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
64078 (mm->end_data - mm->start_data) > rlim)
64079 goto out;
64080 @@ -704,6 +726,12 @@ static int
64081 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
64082 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
64083 {
64084 +
64085 +#ifdef CONFIG_PAX_SEGMEXEC
64086 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
64087 + return 0;
64088 +#endif
64089 +
64090 if (is_mergeable_vma(vma, file, vm_flags) &&
64091 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
64092 if (vma->vm_pgoff == vm_pgoff)
64093 @@ -723,6 +751,12 @@ static int
64094 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
64095 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
64096 {
64097 +
64098 +#ifdef CONFIG_PAX_SEGMEXEC
64099 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
64100 + return 0;
64101 +#endif
64102 +
64103 if (is_mergeable_vma(vma, file, vm_flags) &&
64104 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
64105 pgoff_t vm_pglen;
64106 @@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struc
64107 struct vm_area_struct *vma_merge(struct mm_struct *mm,
64108 struct vm_area_struct *prev, unsigned long addr,
64109 unsigned long end, unsigned long vm_flags,
64110 - struct anon_vma *anon_vma, struct file *file,
64111 + struct anon_vma *anon_vma, struct file *file,
64112 pgoff_t pgoff, struct mempolicy *policy)
64113 {
64114 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
64115 struct vm_area_struct *area, *next;
64116
64117 +#ifdef CONFIG_PAX_SEGMEXEC
64118 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
64119 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
64120 +
64121 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
64122 +#endif
64123 +
64124 /*
64125 * We later require that vma->vm_flags == vm_flags,
64126 * so this tests vma->vm_flags & VM_SPECIAL, too.
64127 @@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct
64128 if (next && next->vm_end == end) /* cases 6, 7, 8 */
64129 next = next->vm_next;
64130
64131 +#ifdef CONFIG_PAX_SEGMEXEC
64132 + if (prev)
64133 + prev_m = pax_find_mirror_vma(prev);
64134 + if (area)
64135 + area_m = pax_find_mirror_vma(area);
64136 + if (next)
64137 + next_m = pax_find_mirror_vma(next);
64138 +#endif
64139 +
64140 /*
64141 * Can it merge with the predecessor?
64142 */
64143 @@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct
64144 /* cases 1, 6 */
64145 vma_adjust(prev, prev->vm_start,
64146 next->vm_end, prev->vm_pgoff, NULL);
64147 - } else /* cases 2, 5, 7 */
64148 +
64149 +#ifdef CONFIG_PAX_SEGMEXEC
64150 + if (prev_m)
64151 + vma_adjust(prev_m, prev_m->vm_start,
64152 + next_m->vm_end, prev_m->vm_pgoff, NULL);
64153 +#endif
64154 +
64155 + } else { /* cases 2, 5, 7 */
64156 vma_adjust(prev, prev->vm_start,
64157 end, prev->vm_pgoff, NULL);
64158 +
64159 +#ifdef CONFIG_PAX_SEGMEXEC
64160 + if (prev_m)
64161 + vma_adjust(prev_m, prev_m->vm_start,
64162 + end_m, prev_m->vm_pgoff, NULL);
64163 +#endif
64164 +
64165 + }
64166 return prev;
64167 }
64168
64169 @@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct
64170 mpol_equal(policy, vma_policy(next)) &&
64171 can_vma_merge_before(next, vm_flags,
64172 anon_vma, file, pgoff+pglen)) {
64173 - if (prev && addr < prev->vm_end) /* case 4 */
64174 + if (prev && addr < prev->vm_end) { /* case 4 */
64175 vma_adjust(prev, prev->vm_start,
64176 addr, prev->vm_pgoff, NULL);
64177 - else /* cases 3, 8 */
64178 +
64179 +#ifdef CONFIG_PAX_SEGMEXEC
64180 + if (prev_m)
64181 + vma_adjust(prev_m, prev_m->vm_start,
64182 + addr_m, prev_m->vm_pgoff, NULL);
64183 +#endif
64184 +
64185 + } else { /* cases 3, 8 */
64186 vma_adjust(area, addr, next->vm_end,
64187 next->vm_pgoff - pglen, NULL);
64188 +
64189 +#ifdef CONFIG_PAX_SEGMEXEC
64190 + if (area_m)
64191 + vma_adjust(area_m, addr_m, next_m->vm_end,
64192 + next_m->vm_pgoff - pglen, NULL);
64193 +#endif
64194 +
64195 + }
64196 return area;
64197 }
64198
64199 @@ -898,14 +978,11 @@ none:
64200 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
64201 struct file *file, long pages)
64202 {
64203 - const unsigned long stack_flags
64204 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
64205 -
64206 if (file) {
64207 mm->shared_vm += pages;
64208 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
64209 mm->exec_vm += pages;
64210 - } else if (flags & stack_flags)
64211 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
64212 mm->stack_vm += pages;
64213 if (flags & (VM_RESERVED|VM_IO))
64214 mm->reserved_vm += pages;
64215 @@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file
64216 * (the exception is when the underlying filesystem is noexec
64217 * mounted, in which case we dont add PROT_EXEC.)
64218 */
64219 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
64220 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
64221 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
64222 prot |= PROT_EXEC;
64223
64224 @@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file
64225 /* Obtain the address to map to. we verify (or select) it and ensure
64226 * that it represents a valid section of the address space.
64227 */
64228 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
64229 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
64230 if (addr & ~PAGE_MASK)
64231 return addr;
64232
64233 @@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file
64234 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
64235 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
64236
64237 +#ifdef CONFIG_PAX_MPROTECT
64238 + if (mm->pax_flags & MF_PAX_MPROTECT) {
64239 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
64240 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
64241 + gr_log_rwxmmap(file);
64242 +
64243 +#ifdef CONFIG_PAX_EMUPLT
64244 + vm_flags &= ~VM_EXEC;
64245 +#else
64246 + return -EPERM;
64247 +#endif
64248 +
64249 + }
64250 +
64251 + if (!(vm_flags & VM_EXEC))
64252 + vm_flags &= ~VM_MAYEXEC;
64253 +#else
64254 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
64255 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
64256 +#endif
64257 + else
64258 + vm_flags &= ~VM_MAYWRITE;
64259 + }
64260 +#endif
64261 +
64262 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
64263 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
64264 + vm_flags &= ~VM_PAGEEXEC;
64265 +#endif
64266 +
64267 if (flags & MAP_LOCKED)
64268 if (!can_do_mlock())
64269 return -EPERM;
64270 @@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file
64271 locked += mm->locked_vm;
64272 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
64273 lock_limit >>= PAGE_SHIFT;
64274 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
64275 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
64276 return -EAGAIN;
64277 }
64278 @@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file
64279 if (error)
64280 return error;
64281
64282 + if (!gr_acl_handle_mmap(file, prot))
64283 + return -EACCES;
64284 +
64285 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
64286 }
64287 EXPORT_SYMBOL(do_mmap_pgoff);
64288 @@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
64289 */
64290 int vma_wants_writenotify(struct vm_area_struct *vma)
64291 {
64292 - unsigned int vm_flags = vma->vm_flags;
64293 + unsigned long vm_flags = vma->vm_flags;
64294
64295 /* If it was private or non-writable, the write bit is already clear */
64296 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
64297 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
64298 return 0;
64299
64300 /* The backer wishes to know when pages are first written to? */
64301 @@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *f
64302 unsigned long charged = 0;
64303 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
64304
64305 +#ifdef CONFIG_PAX_SEGMEXEC
64306 + struct vm_area_struct *vma_m = NULL;
64307 +#endif
64308 +
64309 + /*
64310 + * mm->mmap_sem is required to protect against another thread
64311 + * changing the mappings in case we sleep.
64312 + */
64313 + verify_mm_writelocked(mm);
64314 +
64315 /* Clear old maps */
64316 error = -ENOMEM;
64317 -munmap_back:
64318 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
64319 if (vma && vma->vm_start < addr + len) {
64320 if (do_munmap(mm, addr, len))
64321 return -ENOMEM;
64322 - goto munmap_back;
64323 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
64324 + BUG_ON(vma && vma->vm_start < addr + len);
64325 }
64326
64327 /* Check against address space limit. */
64328 @@ -1173,6 +1294,16 @@ munmap_back:
64329 goto unacct_error;
64330 }
64331
64332 +#ifdef CONFIG_PAX_SEGMEXEC
64333 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
64334 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
64335 + if (!vma_m) {
64336 + error = -ENOMEM;
64337 + goto free_vma;
64338 + }
64339 + }
64340 +#endif
64341 +
64342 vma->vm_mm = mm;
64343 vma->vm_start = addr;
64344 vma->vm_end = addr + len;
64345 @@ -1195,6 +1326,19 @@ munmap_back:
64346 error = file->f_op->mmap(file, vma);
64347 if (error)
64348 goto unmap_and_free_vma;
64349 +
64350 +#ifdef CONFIG_PAX_SEGMEXEC
64351 + if (vma_m && (vm_flags & VM_EXECUTABLE))
64352 + added_exe_file_vma(mm);
64353 +#endif
64354 +
64355 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
64356 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
64357 + vma->vm_flags |= VM_PAGEEXEC;
64358 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
64359 + }
64360 +#endif
64361 +
64362 if (vm_flags & VM_EXECUTABLE)
64363 added_exe_file_vma(mm);
64364
64365 @@ -1218,6 +1362,11 @@ munmap_back:
64366 vma_link(mm, vma, prev, rb_link, rb_parent);
64367 file = vma->vm_file;
64368
64369 +#ifdef CONFIG_PAX_SEGMEXEC
64370 + if (vma_m)
64371 + pax_mirror_vma(vma_m, vma);
64372 +#endif
64373 +
64374 /* Once vma denies write, undo our temporary denial count */
64375 if (correct_wcount)
64376 atomic_inc(&inode->i_writecount);
64377 @@ -1226,6 +1375,7 @@ out:
64378
64379 mm->total_vm += len >> PAGE_SHIFT;
64380 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
64381 + track_exec_limit(mm, addr, addr + len, vm_flags);
64382 if (vm_flags & VM_LOCKED) {
64383 /*
64384 * makes pages present; downgrades, drops, reacquires mmap_sem
64385 @@ -1248,6 +1398,12 @@ unmap_and_free_vma:
64386 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
64387 charged = 0;
64388 free_vma:
64389 +
64390 +#ifdef CONFIG_PAX_SEGMEXEC
64391 + if (vma_m)
64392 + kmem_cache_free(vm_area_cachep, vma_m);
64393 +#endif
64394 +
64395 kmem_cache_free(vm_area_cachep, vma);
64396 unacct_error:
64397 if (charged)
64398 @@ -1255,6 +1411,44 @@ unacct_error:
64399 return error;
64400 }
64401
64402 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
64403 +{
64404 + if (!vma) {
64405 +#ifdef CONFIG_STACK_GROWSUP
64406 + if (addr > sysctl_heap_stack_gap)
64407 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
64408 + else
64409 + vma = find_vma(current->mm, 0);
64410 + if (vma && (vma->vm_flags & VM_GROWSUP))
64411 + return false;
64412 +#endif
64413 + return true;
64414 + }
64415 +
64416 + if (addr + len > vma->vm_start)
64417 + return false;
64418 +
64419 + if (vma->vm_flags & VM_GROWSDOWN)
64420 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
64421 +#ifdef CONFIG_STACK_GROWSUP
64422 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
64423 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
64424 +#endif
64425 +
64426 + return true;
64427 +}
64428 +
64429 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
64430 +{
64431 + if (vma->vm_start < len)
64432 + return -ENOMEM;
64433 + if (!(vma->vm_flags & VM_GROWSDOWN))
64434 + return vma->vm_start - len;
64435 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
64436 + return vma->vm_start - len - sysctl_heap_stack_gap;
64437 + return -ENOMEM;
64438 +}
64439 +
64440 /* Get an address range which is currently unmapped.
64441 * For shmat() with addr=0.
64442 *
64443 @@ -1281,18 +1475,23 @@ arch_get_unmapped_area(struct file *filp
64444 if (flags & MAP_FIXED)
64445 return addr;
64446
64447 +#ifdef CONFIG_PAX_RANDMMAP
64448 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
64449 +#endif
64450 +
64451 if (addr) {
64452 addr = PAGE_ALIGN(addr);
64453 - vma = find_vma(mm, addr);
64454 - if (TASK_SIZE - len >= addr &&
64455 - (!vma || addr + len <= vma->vm_start))
64456 - return addr;
64457 + if (TASK_SIZE - len >= addr) {
64458 + vma = find_vma(mm, addr);
64459 + if (check_heap_stack_gap(vma, addr, len))
64460 + return addr;
64461 + }
64462 }
64463 if (len > mm->cached_hole_size) {
64464 - start_addr = addr = mm->free_area_cache;
64465 + start_addr = addr = mm->free_area_cache;
64466 } else {
64467 - start_addr = addr = TASK_UNMAPPED_BASE;
64468 - mm->cached_hole_size = 0;
64469 + start_addr = addr = mm->mmap_base;
64470 + mm->cached_hole_size = 0;
64471 }
64472
64473 full_search:
64474 @@ -1303,34 +1502,40 @@ full_search:
64475 * Start a new search - just in case we missed
64476 * some holes.
64477 */
64478 - if (start_addr != TASK_UNMAPPED_BASE) {
64479 - addr = TASK_UNMAPPED_BASE;
64480 - start_addr = addr;
64481 + if (start_addr != mm->mmap_base) {
64482 + start_addr = addr = mm->mmap_base;
64483 mm->cached_hole_size = 0;
64484 goto full_search;
64485 }
64486 return -ENOMEM;
64487 }
64488 - if (!vma || addr + len <= vma->vm_start) {
64489 - /*
64490 - * Remember the place where we stopped the search:
64491 - */
64492 - mm->free_area_cache = addr + len;
64493 - return addr;
64494 - }
64495 + if (check_heap_stack_gap(vma, addr, len))
64496 + break;
64497 if (addr + mm->cached_hole_size < vma->vm_start)
64498 mm->cached_hole_size = vma->vm_start - addr;
64499 addr = vma->vm_end;
64500 }
64501 +
64502 + /*
64503 + * Remember the place where we stopped the search:
64504 + */
64505 + mm->free_area_cache = addr + len;
64506 + return addr;
64507 }
64508 #endif
64509
64510 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
64511 {
64512 +
64513 +#ifdef CONFIG_PAX_SEGMEXEC
64514 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
64515 + return;
64516 +#endif
64517 +
64518 /*
64519 * Is this a new hole at the lowest possible address?
64520 */
64521 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
64522 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
64523 mm->free_area_cache = addr;
64524 mm->cached_hole_size = ~0UL;
64525 }
64526 @@ -1348,7 +1553,7 @@ arch_get_unmapped_area_topdown(struct fi
64527 {
64528 struct vm_area_struct *vma;
64529 struct mm_struct *mm = current->mm;
64530 - unsigned long addr = addr0;
64531 + unsigned long base = mm->mmap_base, addr = addr0;
64532
64533 /* requested length too big for entire address space */
64534 if (len > TASK_SIZE)
64535 @@ -1357,13 +1562,18 @@ arch_get_unmapped_area_topdown(struct fi
64536 if (flags & MAP_FIXED)
64537 return addr;
64538
64539 +#ifdef CONFIG_PAX_RANDMMAP
64540 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
64541 +#endif
64542 +
64543 /* requesting a specific address */
64544 if (addr) {
64545 addr = PAGE_ALIGN(addr);
64546 - vma = find_vma(mm, addr);
64547 - if (TASK_SIZE - len >= addr &&
64548 - (!vma || addr + len <= vma->vm_start))
64549 - return addr;
64550 + if (TASK_SIZE - len >= addr) {
64551 + vma = find_vma(mm, addr);
64552 + if (check_heap_stack_gap(vma, addr, len))
64553 + return addr;
64554 + }
64555 }
64556
64557 /* check if free_area_cache is useful for us */
64558 @@ -1378,7 +1588,7 @@ arch_get_unmapped_area_topdown(struct fi
64559 /* make sure it can fit in the remaining address space */
64560 if (addr > len) {
64561 vma = find_vma(mm, addr-len);
64562 - if (!vma || addr <= vma->vm_start)
64563 + if (check_heap_stack_gap(vma, addr - len, len))
64564 /* remember the address as a hint for next time */
64565 return (mm->free_area_cache = addr-len);
64566 }
64567 @@ -1395,7 +1605,7 @@ arch_get_unmapped_area_topdown(struct fi
64568 * return with success:
64569 */
64570 vma = find_vma(mm, addr);
64571 - if (!vma || addr+len <= vma->vm_start)
64572 + if (check_heap_stack_gap(vma, addr, len))
64573 /* remember the address as a hint for next time */
64574 return (mm->free_area_cache = addr);
64575
64576 @@ -1404,8 +1614,8 @@ arch_get_unmapped_area_topdown(struct fi
64577 mm->cached_hole_size = vma->vm_start - addr;
64578
64579 /* try just below the current vma->vm_start */
64580 - addr = vma->vm_start-len;
64581 - } while (len < vma->vm_start);
64582 + addr = skip_heap_stack_gap(vma, len);
64583 + } while (!IS_ERR_VALUE(addr));
64584
64585 bottomup:
64586 /*
64587 @@ -1414,13 +1624,21 @@ bottomup:
64588 * can happen with large stack limits and large mmap()
64589 * allocations.
64590 */
64591 + mm->mmap_base = TASK_UNMAPPED_BASE;
64592 +
64593 +#ifdef CONFIG_PAX_RANDMMAP
64594 + if (mm->pax_flags & MF_PAX_RANDMMAP)
64595 + mm->mmap_base += mm->delta_mmap;
64596 +#endif
64597 +
64598 + mm->free_area_cache = mm->mmap_base;
64599 mm->cached_hole_size = ~0UL;
64600 - mm->free_area_cache = TASK_UNMAPPED_BASE;
64601 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
64602 /*
64603 * Restore the topdown base:
64604 */
64605 - mm->free_area_cache = mm->mmap_base;
64606 + mm->mmap_base = base;
64607 + mm->free_area_cache = base;
64608 mm->cached_hole_size = ~0UL;
64609
64610 return addr;
64611 @@ -1429,6 +1647,12 @@ bottomup:
64612
64613 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
64614 {
64615 +
64616 +#ifdef CONFIG_PAX_SEGMEXEC
64617 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
64618 + return;
64619 +#endif
64620 +
64621 /*
64622 * Is this a new hole at the highest possible address?
64623 */
64624 @@ -1436,8 +1660,10 @@ void arch_unmap_area_topdown(struct mm_s
64625 mm->free_area_cache = addr;
64626
64627 /* dont allow allocations above current base */
64628 - if (mm->free_area_cache > mm->mmap_base)
64629 + if (mm->free_area_cache > mm->mmap_base) {
64630 mm->free_area_cache = mm->mmap_base;
64631 + mm->cached_hole_size = ~0UL;
64632 + }
64633 }
64634
64635 unsigned long
64636 @@ -1545,6 +1771,27 @@ out:
64637 return prev ? prev->vm_next : vma;
64638 }
64639
64640 +#ifdef CONFIG_PAX_SEGMEXEC
64641 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
64642 +{
64643 + struct vm_area_struct *vma_m;
64644 +
64645 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
64646 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
64647 + BUG_ON(vma->vm_mirror);
64648 + return NULL;
64649 + }
64650 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
64651 + vma_m = vma->vm_mirror;
64652 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
64653 + BUG_ON(vma->vm_file != vma_m->vm_file);
64654 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
64655 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
64656 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
64657 + return vma_m;
64658 +}
64659 +#endif
64660 +
64661 /*
64662 * Verify that the stack growth is acceptable and
64663 * update accounting. This is shared with both the
64664 @@ -1561,6 +1808,7 @@ static int acct_stack_growth(struct vm_a
64665 return -ENOMEM;
64666
64667 /* Stack limit test */
64668 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
64669 if (size > rlim[RLIMIT_STACK].rlim_cur)
64670 return -ENOMEM;
64671
64672 @@ -1570,6 +1818,7 @@ static int acct_stack_growth(struct vm_a
64673 unsigned long limit;
64674 locked = mm->locked_vm + grow;
64675 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
64676 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
64677 if (locked > limit && !capable(CAP_IPC_LOCK))
64678 return -ENOMEM;
64679 }
64680 @@ -1600,37 +1849,48 @@ static int acct_stack_growth(struct vm_a
64681 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
64682 * vma is the last one with address > vma->vm_end. Have to extend vma.
64683 */
64684 +#ifndef CONFIG_IA64
64685 +static
64686 +#endif
64687 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
64688 {
64689 int error;
64690 + bool locknext;
64691
64692 if (!(vma->vm_flags & VM_GROWSUP))
64693 return -EFAULT;
64694
64695 + /* Also guard against wrapping around to address 0. */
64696 + if (address < PAGE_ALIGN(address+1))
64697 + address = PAGE_ALIGN(address+1);
64698 + else
64699 + return -ENOMEM;
64700 +
64701 /*
64702 * We must make sure the anon_vma is allocated
64703 * so that the anon_vma locking is not a noop.
64704 */
64705 if (unlikely(anon_vma_prepare(vma)))
64706 return -ENOMEM;
64707 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
64708 + if (locknext && anon_vma_prepare(vma->vm_next))
64709 + return -ENOMEM;
64710 anon_vma_lock(vma);
64711 + if (locknext)
64712 + anon_vma_lock(vma->vm_next);
64713
64714 /*
64715 * vma->vm_start/vm_end cannot change under us because the caller
64716 * is required to hold the mmap_sem in read mode. We need the
64717 - * anon_vma lock to serialize against concurrent expand_stacks.
64718 - * Also guard against wrapping around to address 0.
64719 + * anon_vma locks to serialize against concurrent expand_stacks
64720 + * and expand_upwards.
64721 */
64722 - if (address < PAGE_ALIGN(address+4))
64723 - address = PAGE_ALIGN(address+4);
64724 - else {
64725 - anon_vma_unlock(vma);
64726 - return -ENOMEM;
64727 - }
64728 error = 0;
64729
64730 /* Somebody else might have raced and expanded it already */
64731 - if (address > vma->vm_end) {
64732 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
64733 + error = -ENOMEM;
64734 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
64735 unsigned long size, grow;
64736
64737 size = address - vma->vm_start;
64738 @@ -1640,6 +1900,8 @@ int expand_upwards(struct vm_area_struct
64739 if (!error)
64740 vma->vm_end = address;
64741 }
64742 + if (locknext)
64743 + anon_vma_unlock(vma->vm_next);
64744 anon_vma_unlock(vma);
64745 return error;
64746 }
64747 @@ -1652,6 +1914,8 @@ static int expand_downwards(struct vm_ar
64748 unsigned long address)
64749 {
64750 int error;
64751 + bool lockprev = false;
64752 + struct vm_area_struct *prev;
64753
64754 /*
64755 * We must make sure the anon_vma is allocated
64756 @@ -1665,6 +1929,15 @@ static int expand_downwards(struct vm_ar
64757 if (error)
64758 return error;
64759
64760 + prev = vma->vm_prev;
64761 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
64762 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
64763 +#endif
64764 + if (lockprev && anon_vma_prepare(prev))
64765 + return -ENOMEM;
64766 + if (lockprev)
64767 + anon_vma_lock(prev);
64768 +
64769 anon_vma_lock(vma);
64770
64771 /*
64772 @@ -1674,9 +1947,17 @@ static int expand_downwards(struct vm_ar
64773 */
64774
64775 /* Somebody else might have raced and expanded it already */
64776 - if (address < vma->vm_start) {
64777 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
64778 + error = -ENOMEM;
64779 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
64780 unsigned long size, grow;
64781
64782 +#ifdef CONFIG_PAX_SEGMEXEC
64783 + struct vm_area_struct *vma_m;
64784 +
64785 + vma_m = pax_find_mirror_vma(vma);
64786 +#endif
64787 +
64788 size = vma->vm_end - address;
64789 grow = (vma->vm_start - address) >> PAGE_SHIFT;
64790
64791 @@ -1684,9 +1965,20 @@ static int expand_downwards(struct vm_ar
64792 if (!error) {
64793 vma->vm_start = address;
64794 vma->vm_pgoff -= grow;
64795 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
64796 +
64797 +#ifdef CONFIG_PAX_SEGMEXEC
64798 + if (vma_m) {
64799 + vma_m->vm_start -= grow << PAGE_SHIFT;
64800 + vma_m->vm_pgoff -= grow;
64801 + }
64802 +#endif
64803 +
64804 }
64805 }
64806 anon_vma_unlock(vma);
64807 + if (lockprev)
64808 + anon_vma_unlock(prev);
64809 return error;
64810 }
64811
64812 @@ -1762,6 +2054,13 @@ static void remove_vma_list(struct mm_st
64813 do {
64814 long nrpages = vma_pages(vma);
64815
64816 +#ifdef CONFIG_PAX_SEGMEXEC
64817 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
64818 + vma = remove_vma(vma);
64819 + continue;
64820 + }
64821 +#endif
64822 +
64823 mm->total_vm -= nrpages;
64824 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
64825 vma = remove_vma(vma);
64826 @@ -1807,6 +2106,16 @@ detach_vmas_to_be_unmapped(struct mm_str
64827 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
64828 vma->vm_prev = NULL;
64829 do {
64830 +
64831 +#ifdef CONFIG_PAX_SEGMEXEC
64832 + if (vma->vm_mirror) {
64833 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
64834 + vma->vm_mirror->vm_mirror = NULL;
64835 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
64836 + vma->vm_mirror = NULL;
64837 + }
64838 +#endif
64839 +
64840 rb_erase(&vma->vm_rb, &mm->mm_rb);
64841 mm->map_count--;
64842 tail_vma = vma;
64843 @@ -1834,10 +2143,25 @@ int split_vma(struct mm_struct * mm, str
64844 struct mempolicy *pol;
64845 struct vm_area_struct *new;
64846
64847 +#ifdef CONFIG_PAX_SEGMEXEC
64848 + struct vm_area_struct *vma_m, *new_m = NULL;
64849 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
64850 +#endif
64851 +
64852 if (is_vm_hugetlb_page(vma) && (addr &
64853 ~(huge_page_mask(hstate_vma(vma)))))
64854 return -EINVAL;
64855
64856 +#ifdef CONFIG_PAX_SEGMEXEC
64857 + vma_m = pax_find_mirror_vma(vma);
64858 +
64859 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
64860 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
64861 + if (mm->map_count >= sysctl_max_map_count-1)
64862 + return -ENOMEM;
64863 + } else
64864 +#endif
64865 +
64866 if (mm->map_count >= sysctl_max_map_count)
64867 return -ENOMEM;
64868
64869 @@ -1845,6 +2169,16 @@ int split_vma(struct mm_struct * mm, str
64870 if (!new)
64871 return -ENOMEM;
64872
64873 +#ifdef CONFIG_PAX_SEGMEXEC
64874 + if (vma_m) {
64875 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
64876 + if (!new_m) {
64877 + kmem_cache_free(vm_area_cachep, new);
64878 + return -ENOMEM;
64879 + }
64880 + }
64881 +#endif
64882 +
64883 /* most fields are the same, copy all, and then fixup */
64884 *new = *vma;
64885
64886 @@ -1855,8 +2189,29 @@ int split_vma(struct mm_struct * mm, str
64887 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
64888 }
64889
64890 +#ifdef CONFIG_PAX_SEGMEXEC
64891 + if (vma_m) {
64892 + *new_m = *vma_m;
64893 + new_m->vm_mirror = new;
64894 + new->vm_mirror = new_m;
64895 +
64896 + if (new_below)
64897 + new_m->vm_end = addr_m;
64898 + else {
64899 + new_m->vm_start = addr_m;
64900 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
64901 + }
64902 + }
64903 +#endif
64904 +
64905 pol = mpol_dup(vma_policy(vma));
64906 if (IS_ERR(pol)) {
64907 +
64908 +#ifdef CONFIG_PAX_SEGMEXEC
64909 + if (new_m)
64910 + kmem_cache_free(vm_area_cachep, new_m);
64911 +#endif
64912 +
64913 kmem_cache_free(vm_area_cachep, new);
64914 return PTR_ERR(pol);
64915 }
64916 @@ -1877,6 +2232,28 @@ int split_vma(struct mm_struct * mm, str
64917 else
64918 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
64919
64920 +#ifdef CONFIG_PAX_SEGMEXEC
64921 + if (vma_m) {
64922 + mpol_get(pol);
64923 + vma_set_policy(new_m, pol);
64924 +
64925 + if (new_m->vm_file) {
64926 + get_file(new_m->vm_file);
64927 + if (vma_m->vm_flags & VM_EXECUTABLE)
64928 + added_exe_file_vma(mm);
64929 + }
64930 +
64931 + if (new_m->vm_ops && new_m->vm_ops->open)
64932 + new_m->vm_ops->open(new_m);
64933 +
64934 + if (new_below)
64935 + vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
64936 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
64937 + else
64938 + vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
64939 + }
64940 +#endif
64941 +
64942 return 0;
64943 }
64944
64945 @@ -1885,11 +2262,30 @@ int split_vma(struct mm_struct * mm, str
64946 * work. This now handles partial unmappings.
64947 * Jeremy Fitzhardinge <jeremy@goop.org>
64948 */
64949 +#ifdef CONFIG_PAX_SEGMEXEC
64950 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
64951 +{
64952 + int ret = __do_munmap(mm, start, len);
64953 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
64954 + return ret;
64955 +
64956 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
64957 +}
64958 +
64959 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
64960 +#else
64961 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
64962 +#endif
64963 {
64964 unsigned long end;
64965 struct vm_area_struct *vma, *prev, *last;
64966
64967 + /*
64968 + * mm->mmap_sem is required to protect against another thread
64969 + * changing the mappings in case we sleep.
64970 + */
64971 + verify_mm_writelocked(mm);
64972 +
64973 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
64974 return -EINVAL;
64975
64976 @@ -1953,6 +2349,8 @@ int do_munmap(struct mm_struct *mm, unsi
64977 /* Fix up all other VM information */
64978 remove_vma_list(mm, vma);
64979
64980 + track_exec_limit(mm, start, end, 0UL);
64981 +
64982 return 0;
64983 }
64984
64985 @@ -1965,22 +2363,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
64986
64987 profile_munmap(addr);
64988
64989 +#ifdef CONFIG_PAX_SEGMEXEC
64990 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
64991 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
64992 + return -EINVAL;
64993 +#endif
64994 +
64995 down_write(&mm->mmap_sem);
64996 ret = do_munmap(mm, addr, len);
64997 up_write(&mm->mmap_sem);
64998 return ret;
64999 }
65000
65001 -static inline void verify_mm_writelocked(struct mm_struct *mm)
65002 -{
65003 -#ifdef CONFIG_DEBUG_VM
65004 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
65005 - WARN_ON(1);
65006 - up_read(&mm->mmap_sem);
65007 - }
65008 -#endif
65009 -}
65010 -
65011 /*
65012 * this is really a simplified "do_mmap". it only handles
65013 * anonymous maps. eventually we may be able to do some
65014 @@ -1994,6 +2388,7 @@ unsigned long do_brk(unsigned long addr,
65015 struct rb_node ** rb_link, * rb_parent;
65016 pgoff_t pgoff = addr >> PAGE_SHIFT;
65017 int error;
65018 + unsigned long charged;
65019
65020 len = PAGE_ALIGN(len);
65021 if (!len)
65022 @@ -2005,16 +2400,30 @@ unsigned long do_brk(unsigned long addr,
65023
65024 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
65025
65026 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
65027 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
65028 + flags &= ~VM_EXEC;
65029 +
65030 +#ifdef CONFIG_PAX_MPROTECT
65031 + if (mm->pax_flags & MF_PAX_MPROTECT)
65032 + flags &= ~VM_MAYEXEC;
65033 +#endif
65034 +
65035 + }
65036 +#endif
65037 +
65038 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
65039 if (error & ~PAGE_MASK)
65040 return error;
65041
65042 + charged = len >> PAGE_SHIFT;
65043 +
65044 /*
65045 * mlock MCL_FUTURE?
65046 */
65047 if (mm->def_flags & VM_LOCKED) {
65048 unsigned long locked, lock_limit;
65049 - locked = len >> PAGE_SHIFT;
65050 + locked = charged;
65051 locked += mm->locked_vm;
65052 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
65053 lock_limit >>= PAGE_SHIFT;
65054 @@ -2031,22 +2440,22 @@ unsigned long do_brk(unsigned long addr,
65055 /*
65056 * Clear old maps. this also does some error checking for us
65057 */
65058 - munmap_back:
65059 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
65060 if (vma && vma->vm_start < addr + len) {
65061 if (do_munmap(mm, addr, len))
65062 return -ENOMEM;
65063 - goto munmap_back;
65064 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
65065 + BUG_ON(vma && vma->vm_start < addr + len);
65066 }
65067
65068 /* Check against address space limits *after* clearing old maps... */
65069 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
65070 + if (!may_expand_vm(mm, charged))
65071 return -ENOMEM;
65072
65073 if (mm->map_count > sysctl_max_map_count)
65074 return -ENOMEM;
65075
65076 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
65077 + if (security_vm_enough_memory(charged))
65078 return -ENOMEM;
65079
65080 /* Can we just expand an old private anonymous mapping? */
65081 @@ -2060,7 +2469,7 @@ unsigned long do_brk(unsigned long addr,
65082 */
65083 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65084 if (!vma) {
65085 - vm_unacct_memory(len >> PAGE_SHIFT);
65086 + vm_unacct_memory(charged);
65087 return -ENOMEM;
65088 }
65089
65090 @@ -2072,11 +2481,12 @@ unsigned long do_brk(unsigned long addr,
65091 vma->vm_page_prot = vm_get_page_prot(flags);
65092 vma_link(mm, vma, prev, rb_link, rb_parent);
65093 out:
65094 - mm->total_vm += len >> PAGE_SHIFT;
65095 + mm->total_vm += charged;
65096 if (flags & VM_LOCKED) {
65097 if (!mlock_vma_pages_range(vma, addr, addr + len))
65098 - mm->locked_vm += (len >> PAGE_SHIFT);
65099 + mm->locked_vm += charged;
65100 }
65101 + track_exec_limit(mm, addr, addr + len, flags);
65102 return addr;
65103 }
65104
65105 @@ -2123,8 +2533,10 @@ void exit_mmap(struct mm_struct *mm)
65106 * Walk the list again, actually closing and freeing it,
65107 * with preemption enabled, without holding any MM locks.
65108 */
65109 - while (vma)
65110 + while (vma) {
65111 + vma->vm_mirror = NULL;
65112 vma = remove_vma(vma);
65113 + }
65114
65115 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
65116 }
65117 @@ -2138,6 +2550,10 @@ int insert_vm_struct(struct mm_struct *
65118 struct vm_area_struct * __vma, * prev;
65119 struct rb_node ** rb_link, * rb_parent;
65120
65121 +#ifdef CONFIG_PAX_SEGMEXEC
65122 + struct vm_area_struct *vma_m = NULL;
65123 +#endif
65124 +
65125 /*
65126 * The vm_pgoff of a purely anonymous vma should be irrelevant
65127 * until its first write fault, when page's anon_vma and index
65128 @@ -2160,7 +2576,22 @@ int insert_vm_struct(struct mm_struct *
65129 if ((vma->vm_flags & VM_ACCOUNT) &&
65130 security_vm_enough_memory_mm(mm, vma_pages(vma)))
65131 return -ENOMEM;
65132 +
65133 +#ifdef CONFIG_PAX_SEGMEXEC
65134 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
65135 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65136 + if (!vma_m)
65137 + return -ENOMEM;
65138 + }
65139 +#endif
65140 +
65141 vma_link(mm, vma, prev, rb_link, rb_parent);
65142 +
65143 +#ifdef CONFIG_PAX_SEGMEXEC
65144 + if (vma_m)
65145 + pax_mirror_vma(vma_m, vma);
65146 +#endif
65147 +
65148 return 0;
65149 }
65150
65151 @@ -2178,6 +2609,8 @@ struct vm_area_struct *copy_vma(struct v
65152 struct rb_node **rb_link, *rb_parent;
65153 struct mempolicy *pol;
65154
65155 + BUG_ON(vma->vm_mirror);
65156 +
65157 /*
65158 * If anonymous vma has not yet been faulted, update new pgoff
65159 * to match new location, to increase its chance of merging.
65160 @@ -2221,6 +2654,35 @@ struct vm_area_struct *copy_vma(struct v
65161 return new_vma;
65162 }
65163
65164 +#ifdef CONFIG_PAX_SEGMEXEC
65165 +void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
65166 +{
65167 + struct vm_area_struct *prev_m;
65168 + struct rb_node **rb_link_m, *rb_parent_m;
65169 + struct mempolicy *pol_m;
65170 +
65171 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
65172 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
65173 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
65174 + *vma_m = *vma;
65175 + pol_m = vma_policy(vma_m);
65176 + mpol_get(pol_m);
65177 + vma_set_policy(vma_m, pol_m);
65178 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
65179 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
65180 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
65181 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
65182 + if (vma_m->vm_file)
65183 + get_file(vma_m->vm_file);
65184 + if (vma_m->vm_ops && vma_m->vm_ops->open)
65185 + vma_m->vm_ops->open(vma_m);
65186 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
65187 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
65188 + vma_m->vm_mirror = vma;
65189 + vma->vm_mirror = vma_m;
65190 +}
65191 +#endif
65192 +
65193 /*
65194 * Return true if the calling process may expand its vm space by the passed
65195 * number of pages
65196 @@ -2231,7 +2693,7 @@ int may_expand_vm(struct mm_struct *mm,
65197 unsigned long lim;
65198
65199 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
65200 -
65201 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
65202 if (cur + npages > lim)
65203 return 0;
65204 return 1;
65205 @@ -2301,6 +2763,22 @@ int install_special_mapping(struct mm_st
65206 vma->vm_start = addr;
65207 vma->vm_end = addr + len;
65208
65209 +#ifdef CONFIG_PAX_MPROTECT
65210 + if (mm->pax_flags & MF_PAX_MPROTECT) {
65211 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
65212 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
65213 + return -EPERM;
65214 + if (!(vm_flags & VM_EXEC))
65215 + vm_flags &= ~VM_MAYEXEC;
65216 +#else
65217 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
65218 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
65219 +#endif
65220 + else
65221 + vm_flags &= ~VM_MAYWRITE;
65222 + }
65223 +#endif
65224 +
65225 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
65226 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
65227
65228 diff -urNp linux-2.6.32.41/mm/mprotect.c linux-2.6.32.41/mm/mprotect.c
65229 --- linux-2.6.32.41/mm/mprotect.c 2011-03-27 14:31:47.000000000 -0400
65230 +++ linux-2.6.32.41/mm/mprotect.c 2011-04-17 15:56:46.000000000 -0400
65231 @@ -24,10 +24,16 @@
65232 #include <linux/mmu_notifier.h>
65233 #include <linux/migrate.h>
65234 #include <linux/perf_event.h>
65235 +
65236 +#ifdef CONFIG_PAX_MPROTECT
65237 +#include <linux/elf.h>
65238 +#endif
65239 +
65240 #include <asm/uaccess.h>
65241 #include <asm/pgtable.h>
65242 #include <asm/cacheflush.h>
65243 #include <asm/tlbflush.h>
65244 +#include <asm/mmu_context.h>
65245
65246 #ifndef pgprot_modify
65247 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
65248 @@ -132,6 +138,48 @@ static void change_protection(struct vm_
65249 flush_tlb_range(vma, start, end);
65250 }
65251
65252 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
65253 +/* called while holding the mmap semaphor for writing except stack expansion */
65254 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
65255 +{
65256 + unsigned long oldlimit, newlimit = 0UL;
65257 +
65258 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
65259 + return;
65260 +
65261 + spin_lock(&mm->page_table_lock);
65262 + oldlimit = mm->context.user_cs_limit;
65263 + if ((prot & VM_EXEC) && oldlimit < end)
65264 + /* USER_CS limit moved up */
65265 + newlimit = end;
65266 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
65267 + /* USER_CS limit moved down */
65268 + newlimit = start;
65269 +
65270 + if (newlimit) {
65271 + mm->context.user_cs_limit = newlimit;
65272 +
65273 +#ifdef CONFIG_SMP
65274 + wmb();
65275 + cpus_clear(mm->context.cpu_user_cs_mask);
65276 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
65277 +#endif
65278 +
65279 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
65280 + }
65281 + spin_unlock(&mm->page_table_lock);
65282 + if (newlimit == end) {
65283 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
65284 +
65285 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
65286 + if (is_vm_hugetlb_page(vma))
65287 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
65288 + else
65289 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
65290 + }
65291 +}
65292 +#endif
65293 +
65294 int
65295 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
65296 unsigned long start, unsigned long end, unsigned long newflags)
65297 @@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vm
65298 int error;
65299 int dirty_accountable = 0;
65300
65301 +#ifdef CONFIG_PAX_SEGMEXEC
65302 + struct vm_area_struct *vma_m = NULL;
65303 + unsigned long start_m, end_m;
65304 +
65305 + start_m = start + SEGMEXEC_TASK_SIZE;
65306 + end_m = end + SEGMEXEC_TASK_SIZE;
65307 +#endif
65308 +
65309 if (newflags == oldflags) {
65310 *pprev = vma;
65311 return 0;
65312 }
65313
65314 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
65315 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
65316 +
65317 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
65318 + return -ENOMEM;
65319 +
65320 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
65321 + return -ENOMEM;
65322 + }
65323 +
65324 /*
65325 * If we make a private mapping writable we increase our commit;
65326 * but (without finer accounting) cannot reduce our commit if we
65327 @@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vm
65328 }
65329 }
65330
65331 +#ifdef CONFIG_PAX_SEGMEXEC
65332 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
65333 + if (start != vma->vm_start) {
65334 + error = split_vma(mm, vma, start, 1);
65335 + if (error)
65336 + goto fail;
65337 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
65338 + *pprev = (*pprev)->vm_next;
65339 + }
65340 +
65341 + if (end != vma->vm_end) {
65342 + error = split_vma(mm, vma, end, 0);
65343 + if (error)
65344 + goto fail;
65345 + }
65346 +
65347 + if (pax_find_mirror_vma(vma)) {
65348 + error = __do_munmap(mm, start_m, end_m - start_m);
65349 + if (error)
65350 + goto fail;
65351 + } else {
65352 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65353 + if (!vma_m) {
65354 + error = -ENOMEM;
65355 + goto fail;
65356 + }
65357 + vma->vm_flags = newflags;
65358 + pax_mirror_vma(vma_m, vma);
65359 + }
65360 + }
65361 +#endif
65362 +
65363 /*
65364 * First try to merge with previous and/or next vma.
65365 */
65366 @@ -195,9 +293,21 @@ success:
65367 * vm_flags and vm_page_prot are protected by the mmap_sem
65368 * held in write mode.
65369 */
65370 +
65371 +#ifdef CONFIG_PAX_SEGMEXEC
65372 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
65373 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
65374 +#endif
65375 +
65376 vma->vm_flags = newflags;
65377 +
65378 +#ifdef CONFIG_PAX_MPROTECT
65379 + if (mm->binfmt && mm->binfmt->handle_mprotect)
65380 + mm->binfmt->handle_mprotect(vma, newflags);
65381 +#endif
65382 +
65383 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
65384 - vm_get_page_prot(newflags));
65385 + vm_get_page_prot(vma->vm_flags));
65386
65387 if (vma_wants_writenotify(vma)) {
65388 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
65389 @@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65390 end = start + len;
65391 if (end <= start)
65392 return -ENOMEM;
65393 +
65394 +#ifdef CONFIG_PAX_SEGMEXEC
65395 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
65396 + if (end > SEGMEXEC_TASK_SIZE)
65397 + return -EINVAL;
65398 + } else
65399 +#endif
65400 +
65401 + if (end > TASK_SIZE)
65402 + return -EINVAL;
65403 +
65404 if (!arch_validate_prot(prot))
65405 return -EINVAL;
65406
65407 @@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65408 /*
65409 * Does the application expect PROT_READ to imply PROT_EXEC:
65410 */
65411 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
65412 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
65413 prot |= PROT_EXEC;
65414
65415 vm_flags = calc_vm_prot_bits(prot);
65416 @@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65417 if (start > vma->vm_start)
65418 prev = vma;
65419
65420 +#ifdef CONFIG_PAX_MPROTECT
65421 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
65422 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
65423 +#endif
65424 +
65425 for (nstart = start ; ; ) {
65426 unsigned long newflags;
65427
65428 @@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65429
65430 /* newflags >> 4 shift VM_MAY% in place of VM_% */
65431 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
65432 + if (prot & (PROT_WRITE | PROT_EXEC))
65433 + gr_log_rwxmprotect(vma->vm_file);
65434 +
65435 + error = -EACCES;
65436 + goto out;
65437 + }
65438 +
65439 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
65440 error = -EACCES;
65441 goto out;
65442 }
65443 @@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65444 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
65445 if (error)
65446 goto out;
65447 +
65448 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
65449 +
65450 nstart = tmp;
65451
65452 if (nstart < prev->vm_end)
65453 diff -urNp linux-2.6.32.41/mm/mremap.c linux-2.6.32.41/mm/mremap.c
65454 --- linux-2.6.32.41/mm/mremap.c 2011-04-17 17:00:52.000000000 -0400
65455 +++ linux-2.6.32.41/mm/mremap.c 2011-04-17 17:03:58.000000000 -0400
65456 @@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_str
65457 continue;
65458 pte = ptep_clear_flush(vma, old_addr, old_pte);
65459 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
65460 +
65461 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
65462 + if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
65463 + pte = pte_exprotect(pte);
65464 +#endif
65465 +
65466 set_pte_at(mm, new_addr, new_pte, pte);
65467 }
65468
65469 @@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_res
65470 if (is_vm_hugetlb_page(vma))
65471 goto Einval;
65472
65473 +#ifdef CONFIG_PAX_SEGMEXEC
65474 + if (pax_find_mirror_vma(vma))
65475 + goto Einval;
65476 +#endif
65477 +
65478 /* We can't remap across vm area boundaries */
65479 if (old_len > vma->vm_end - addr)
65480 goto Efault;
65481 @@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned
65482 unsigned long ret = -EINVAL;
65483 unsigned long charged = 0;
65484 unsigned long map_flags;
65485 + unsigned long pax_task_size = TASK_SIZE;
65486
65487 if (new_addr & ~PAGE_MASK)
65488 goto out;
65489
65490 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
65491 +#ifdef CONFIG_PAX_SEGMEXEC
65492 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
65493 + pax_task_size = SEGMEXEC_TASK_SIZE;
65494 +#endif
65495 +
65496 + pax_task_size -= PAGE_SIZE;
65497 +
65498 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
65499 goto out;
65500
65501 /* Check if the location we're moving into overlaps the
65502 * old location at all, and fail if it does.
65503 */
65504 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
65505 - goto out;
65506 -
65507 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
65508 + if (addr + old_len > new_addr && new_addr + new_len > addr)
65509 goto out;
65510
65511 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
65512 @@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long ad
65513 struct vm_area_struct *vma;
65514 unsigned long ret = -EINVAL;
65515 unsigned long charged = 0;
65516 + unsigned long pax_task_size = TASK_SIZE;
65517
65518 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
65519 goto out;
65520 @@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long ad
65521 if (!new_len)
65522 goto out;
65523
65524 +#ifdef CONFIG_PAX_SEGMEXEC
65525 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
65526 + pax_task_size = SEGMEXEC_TASK_SIZE;
65527 +#endif
65528 +
65529 + pax_task_size -= PAGE_SIZE;
65530 +
65531 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
65532 + old_len > pax_task_size || addr > pax_task_size-old_len)
65533 + goto out;
65534 +
65535 if (flags & MREMAP_FIXED) {
65536 if (flags & MREMAP_MAYMOVE)
65537 ret = mremap_to(addr, old_len, new_addr, new_len);
65538 @@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long ad
65539 addr + new_len);
65540 }
65541 ret = addr;
65542 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
65543 goto out;
65544 }
65545 }
65546 @@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long ad
65547 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
65548 if (ret)
65549 goto out;
65550 +
65551 + map_flags = vma->vm_flags;
65552 ret = move_vma(vma, addr, old_len, new_len, new_addr);
65553 + if (!(ret & ~PAGE_MASK)) {
65554 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
65555 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
65556 + }
65557 }
65558 out:
65559 if (ret & ~PAGE_MASK)
65560 diff -urNp linux-2.6.32.41/mm/nommu.c linux-2.6.32.41/mm/nommu.c
65561 --- linux-2.6.32.41/mm/nommu.c 2011-03-27 14:31:47.000000000 -0400
65562 +++ linux-2.6.32.41/mm/nommu.c 2011-04-17 15:56:46.000000000 -0400
65563 @@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMI
65564 int sysctl_overcommit_ratio = 50; /* default is 50% */
65565 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
65566 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
65567 -int heap_stack_gap = 0;
65568
65569 atomic_long_t mmap_pages_allocated;
65570
65571 @@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct m
65572 EXPORT_SYMBOL(find_vma);
65573
65574 /*
65575 - * find a VMA
65576 - * - we don't extend stack VMAs under NOMMU conditions
65577 - */
65578 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
65579 -{
65580 - return find_vma(mm, addr);
65581 -}
65582 -
65583 -/*
65584 * expand a stack to a given address
65585 * - not supported under NOMMU conditions
65586 */
65587 diff -urNp linux-2.6.32.41/mm/page_alloc.c linux-2.6.32.41/mm/page_alloc.c
65588 --- linux-2.6.32.41/mm/page_alloc.c 2011-03-27 14:31:47.000000000 -0400
65589 +++ linux-2.6.32.41/mm/page_alloc.c 2011-05-16 21:46:57.000000000 -0400
65590 @@ -587,6 +587,10 @@ static void __free_pages_ok(struct page
65591 int bad = 0;
65592 int wasMlocked = __TestClearPageMlocked(page);
65593
65594 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
65595 + unsigned long index = 1UL << order;
65596 +#endif
65597 +
65598 kmemcheck_free_shadow(page, order);
65599
65600 for (i = 0 ; i < (1 << order) ; ++i)
65601 @@ -599,6 +603,12 @@ static void __free_pages_ok(struct page
65602 debug_check_no_obj_freed(page_address(page),
65603 PAGE_SIZE << order);
65604 }
65605 +
65606 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
65607 + for (; index; --index)
65608 + sanitize_highpage(page + index - 1);
65609 +#endif
65610 +
65611 arch_free_page(page, order);
65612 kernel_map_pages(page, 1 << order, 0);
65613
65614 @@ -702,8 +712,10 @@ static int prep_new_page(struct page *pa
65615 arch_alloc_page(page, order);
65616 kernel_map_pages(page, 1 << order, 1);
65617
65618 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
65619 if (gfp_flags & __GFP_ZERO)
65620 prep_zero_page(page, order, gfp_flags);
65621 +#endif
65622
65623 if (order && (gfp_flags & __GFP_COMP))
65624 prep_compound_page(page, order);
65625 @@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct pa
65626 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
65627 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
65628 }
65629 +
65630 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
65631 + sanitize_highpage(page);
65632 +#endif
65633 +
65634 arch_free_page(page, 0);
65635 kernel_map_pages(page, 1, 0);
65636
65637 @@ -2179,6 +2196,8 @@ void show_free_areas(void)
65638 int cpu;
65639 struct zone *zone;
65640
65641 + pax_track_stack();
65642 +
65643 for_each_populated_zone(zone) {
65644 show_node(zone);
65645 printk("%s per-cpu:\n", zone->name);
65646 @@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct p
65647 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
65648 }
65649 #else
65650 -static void inline setup_usemap(struct pglist_data *pgdat,
65651 +static inline void setup_usemap(struct pglist_data *pgdat,
65652 struct zone *zone, unsigned long zonesize) {}
65653 #endif /* CONFIG_SPARSEMEM */
65654
65655 diff -urNp linux-2.6.32.41/mm/percpu.c linux-2.6.32.41/mm/percpu.c
65656 --- linux-2.6.32.41/mm/percpu.c 2011-03-27 14:31:47.000000000 -0400
65657 +++ linux-2.6.32.41/mm/percpu.c 2011-04-17 15:56:46.000000000 -0400
65658 @@ -115,7 +115,7 @@ static unsigned int pcpu_first_unit_cpu
65659 static unsigned int pcpu_last_unit_cpu __read_mostly;
65660
65661 /* the address of the first chunk which starts with the kernel static area */
65662 -void *pcpu_base_addr __read_mostly;
65663 +void *pcpu_base_addr __read_only;
65664 EXPORT_SYMBOL_GPL(pcpu_base_addr);
65665
65666 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
65667 diff -urNp linux-2.6.32.41/mm/rmap.c linux-2.6.32.41/mm/rmap.c
65668 --- linux-2.6.32.41/mm/rmap.c 2011-03-27 14:31:47.000000000 -0400
65669 +++ linux-2.6.32.41/mm/rmap.c 2011-04-17 15:56:46.000000000 -0400
65670 @@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_stru
65671 /* page_table_lock to protect against threads */
65672 spin_lock(&mm->page_table_lock);
65673 if (likely(!vma->anon_vma)) {
65674 +
65675 +#ifdef CONFIG_PAX_SEGMEXEC
65676 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
65677 +
65678 + if (vma_m) {
65679 + BUG_ON(vma_m->anon_vma);
65680 + vma_m->anon_vma = anon_vma;
65681 + list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
65682 + }
65683 +#endif
65684 +
65685 vma->anon_vma = anon_vma;
65686 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
65687 allocated = NULL;
65688 diff -urNp linux-2.6.32.41/mm/shmem.c linux-2.6.32.41/mm/shmem.c
65689 --- linux-2.6.32.41/mm/shmem.c 2011-03-27 14:31:47.000000000 -0400
65690 +++ linux-2.6.32.41/mm/shmem.c 2011-05-18 20:09:37.000000000 -0400
65691 @@ -31,7 +31,7 @@
65692 #include <linux/swap.h>
65693 #include <linux/ima.h>
65694
65695 -static struct vfsmount *shm_mnt;
65696 +struct vfsmount *shm_mnt;
65697
65698 #ifdef CONFIG_SHMEM
65699 /*
65700 @@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *
65701 goto unlock;
65702 }
65703 entry = shmem_swp_entry(info, index, NULL);
65704 + if (!entry)
65705 + goto unlock;
65706 if (entry->val) {
65707 /*
65708 * The more uptodate page coming down from a stacked
65709 @@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_ent
65710 struct vm_area_struct pvma;
65711 struct page *page;
65712
65713 + pax_track_stack();
65714 +
65715 spol = mpol_cond_copy(&mpol,
65716 mpol_shared_policy_lookup(&info->policy, idx));
65717
65718 @@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *d
65719
65720 info = SHMEM_I(inode);
65721 inode->i_size = len-1;
65722 - if (len <= (char *)inode - (char *)info) {
65723 + if (len <= (char *)inode - (char *)info && len <= 64) {
65724 /* do it inline */
65725 memcpy(info, symname, len);
65726 inode->i_op = &shmem_symlink_inline_operations;
65727 @@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block
65728 int err = -ENOMEM;
65729
65730 /* Round up to L1_CACHE_BYTES to resist false sharing */
65731 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
65732 - L1_CACHE_BYTES), GFP_KERNEL);
65733 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
65734 if (!sbinfo)
65735 return -ENOMEM;
65736
65737 diff -urNp linux-2.6.32.41/mm/slab.c linux-2.6.32.41/mm/slab.c
65738 --- linux-2.6.32.41/mm/slab.c 2011-03-27 14:31:47.000000000 -0400
65739 +++ linux-2.6.32.41/mm/slab.c 2011-05-04 17:56:20.000000000 -0400
65740 @@ -174,7 +174,7 @@
65741
65742 /* Legal flag mask for kmem_cache_create(). */
65743 #if DEBUG
65744 -# define CREATE_MASK (SLAB_RED_ZONE | \
65745 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
65746 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
65747 SLAB_CACHE_DMA | \
65748 SLAB_STORE_USER | \
65749 @@ -182,7 +182,7 @@
65750 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
65751 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
65752 #else
65753 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
65754 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
65755 SLAB_CACHE_DMA | \
65756 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
65757 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
65758 @@ -308,7 +308,7 @@ struct kmem_list3 {
65759 * Need this for bootstrapping a per node allocator.
65760 */
65761 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
65762 -struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
65763 +struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
65764 #define CACHE_CACHE 0
65765 #define SIZE_AC MAX_NUMNODES
65766 #define SIZE_L3 (2 * MAX_NUMNODES)
65767 @@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_
65768 if ((x)->max_freeable < i) \
65769 (x)->max_freeable = i; \
65770 } while (0)
65771 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
65772 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
65773 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
65774 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
65775 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
65776 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
65777 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
65778 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
65779 #else
65780 #define STATS_INC_ACTIVE(x) do { } while (0)
65781 #define STATS_DEC_ACTIVE(x) do { } while (0)
65782 @@ -558,7 +558,7 @@ static inline void *index_to_obj(struct
65783 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
65784 */
65785 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
65786 - const struct slab *slab, void *obj)
65787 + const struct slab *slab, const void *obj)
65788 {
65789 u32 offset = (obj - slab->s_mem);
65790 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
65791 @@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
65792 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
65793 sizes[INDEX_AC].cs_size,
65794 ARCH_KMALLOC_MINALIGN,
65795 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
65796 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
65797 NULL);
65798
65799 if (INDEX_AC != INDEX_L3) {
65800 @@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
65801 kmem_cache_create(names[INDEX_L3].name,
65802 sizes[INDEX_L3].cs_size,
65803 ARCH_KMALLOC_MINALIGN,
65804 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
65805 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
65806 NULL);
65807 }
65808
65809 @@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
65810 sizes->cs_cachep = kmem_cache_create(names->name,
65811 sizes->cs_size,
65812 ARCH_KMALLOC_MINALIGN,
65813 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
65814 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
65815 NULL);
65816 }
65817 #ifdef CONFIG_ZONE_DMA
65818 @@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, vo
65819 }
65820 /* cpu stats */
65821 {
65822 - unsigned long allochit = atomic_read(&cachep->allochit);
65823 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
65824 - unsigned long freehit = atomic_read(&cachep->freehit);
65825 - unsigned long freemiss = atomic_read(&cachep->freemiss);
65826 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
65827 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
65828 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
65829 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
65830
65831 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
65832 allochit, allocmiss, freehit, freemiss);
65833 @@ -4471,15 +4471,66 @@ static const struct file_operations proc
65834
65835 static int __init slab_proc_init(void)
65836 {
65837 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
65838 + mode_t gr_mode = S_IRUGO;
65839 +
65840 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
65841 + gr_mode = S_IRUSR;
65842 +#endif
65843 +
65844 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
65845 #ifdef CONFIG_DEBUG_SLAB_LEAK
65846 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
65847 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
65848 #endif
65849 return 0;
65850 }
65851 module_init(slab_proc_init);
65852 #endif
65853
65854 +void check_object_size(const void *ptr, unsigned long n, bool to)
65855 +{
65856 +
65857 +#ifdef CONFIG_PAX_USERCOPY
65858 + struct page *page;
65859 + struct kmem_cache *cachep = NULL;
65860 + struct slab *slabp;
65861 + unsigned int objnr;
65862 + unsigned long offset;
65863 +
65864 + if (!n)
65865 + return;
65866 +
65867 + if (ZERO_OR_NULL_PTR(ptr))
65868 + goto report;
65869 +
65870 + if (!virt_addr_valid(ptr))
65871 + return;
65872 +
65873 + page = virt_to_head_page(ptr);
65874 +
65875 + if (!PageSlab(page)) {
65876 + if (object_is_on_stack(ptr, n) == -1)
65877 + goto report;
65878 + return;
65879 + }
65880 +
65881 + cachep = page_get_cache(page);
65882 + if (!(cachep->flags & SLAB_USERCOPY))
65883 + goto report;
65884 +
65885 + slabp = page_get_slab(page);
65886 + objnr = obj_to_index(cachep, slabp, ptr);
65887 + BUG_ON(objnr >= cachep->num);
65888 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
65889 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
65890 + return;
65891 +
65892 +report:
65893 + pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
65894 +#endif
65895 +
65896 +}
65897 +EXPORT_SYMBOL(check_object_size);
65898 +
65899 /**
65900 * ksize - get the actual amount of memory allocated for a given object
65901 * @objp: Pointer to the object
65902 diff -urNp linux-2.6.32.41/mm/slob.c linux-2.6.32.41/mm/slob.c
65903 --- linux-2.6.32.41/mm/slob.c 2011-03-27 14:31:47.000000000 -0400
65904 +++ linux-2.6.32.41/mm/slob.c 2011-04-17 15:56:46.000000000 -0400
65905 @@ -29,7 +29,7 @@
65906 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
65907 * alloc_pages() directly, allocating compound pages so the page order
65908 * does not have to be separately tracked, and also stores the exact
65909 - * allocation size in page->private so that it can be used to accurately
65910 + * allocation size in slob_page->size so that it can be used to accurately
65911 * provide ksize(). These objects are detected in kfree() because slob_page()
65912 * is false for them.
65913 *
65914 @@ -58,6 +58,7 @@
65915 */
65916
65917 #include <linux/kernel.h>
65918 +#include <linux/sched.h>
65919 #include <linux/slab.h>
65920 #include <linux/mm.h>
65921 #include <linux/swap.h> /* struct reclaim_state */
65922 @@ -100,7 +101,8 @@ struct slob_page {
65923 unsigned long flags; /* mandatory */
65924 atomic_t _count; /* mandatory */
65925 slobidx_t units; /* free units left in page */
65926 - unsigned long pad[2];
65927 + unsigned long pad[1];
65928 + unsigned long size; /* size when >=PAGE_SIZE */
65929 slob_t *free; /* first free slob_t in page */
65930 struct list_head list; /* linked list of free pages */
65931 };
65932 @@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
65933 */
65934 static inline int is_slob_page(struct slob_page *sp)
65935 {
65936 - return PageSlab((struct page *)sp);
65937 + return PageSlab((struct page *)sp) && !sp->size;
65938 }
65939
65940 static inline void set_slob_page(struct slob_page *sp)
65941 @@ -148,7 +150,7 @@ static inline void clear_slob_page(struc
65942
65943 static inline struct slob_page *slob_page(const void *addr)
65944 {
65945 - return (struct slob_page *)virt_to_page(addr);
65946 + return (struct slob_page *)virt_to_head_page(addr);
65947 }
65948
65949 /*
65950 @@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_
65951 /*
65952 * Return the size of a slob block.
65953 */
65954 -static slobidx_t slob_units(slob_t *s)
65955 +static slobidx_t slob_units(const slob_t *s)
65956 {
65957 if (s->units > 0)
65958 return s->units;
65959 @@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
65960 /*
65961 * Return the next free slob block pointer after this one.
65962 */
65963 -static slob_t *slob_next(slob_t *s)
65964 +static slob_t *slob_next(const slob_t *s)
65965 {
65966 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
65967 slobidx_t next;
65968 @@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
65969 /*
65970 * Returns true if s is the last free block in its page.
65971 */
65972 -static int slob_last(slob_t *s)
65973 +static int slob_last(const slob_t *s)
65974 {
65975 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
65976 }
65977 @@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, i
65978 if (!page)
65979 return NULL;
65980
65981 + set_slob_page(page);
65982 return page_address(page);
65983 }
65984
65985 @@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp
65986 if (!b)
65987 return NULL;
65988 sp = slob_page(b);
65989 - set_slob_page(sp);
65990
65991 spin_lock_irqsave(&slob_lock, flags);
65992 sp->units = SLOB_UNITS(PAGE_SIZE);
65993 sp->free = b;
65994 + sp->size = 0;
65995 INIT_LIST_HEAD(&sp->list);
65996 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
65997 set_slob_page_free(sp, slob_list);
65998 @@ -475,10 +478,9 @@ out:
65999 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
66000 #endif
66001
66002 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
66003 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
66004 {
66005 - unsigned int *m;
66006 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66007 + slob_t *m;
66008 void *ret;
66009
66010 lockdep_trace_alloc(gfp);
66011 @@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t
66012
66013 if (!m)
66014 return NULL;
66015 - *m = size;
66016 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
66017 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
66018 + m[0].units = size;
66019 + m[1].units = align;
66020 ret = (void *)m + align;
66021
66022 trace_kmalloc_node(_RET_IP_, ret,
66023 @@ -501,9 +506,9 @@ void *__kmalloc_node(size_t size, gfp_t
66024
66025 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
66026 if (ret) {
66027 - struct page *page;
66028 - page = virt_to_page(ret);
66029 - page->private = size;
66030 + struct slob_page *sp;
66031 + sp = slob_page(ret);
66032 + sp->size = size;
66033 }
66034
66035 trace_kmalloc_node(_RET_IP_, ret,
66036 @@ -513,6 +518,13 @@ void *__kmalloc_node(size_t size, gfp_t
66037 kmemleak_alloc(ret, size, 1, gfp);
66038 return ret;
66039 }
66040 +
66041 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
66042 +{
66043 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66044 +
66045 + return __kmalloc_node_align(size, gfp, node, align);
66046 +}
66047 EXPORT_SYMBOL(__kmalloc_node);
66048
66049 void kfree(const void *block)
66050 @@ -528,13 +540,81 @@ void kfree(const void *block)
66051 sp = slob_page(block);
66052 if (is_slob_page(sp)) {
66053 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66054 - unsigned int *m = (unsigned int *)(block - align);
66055 - slob_free(m, *m + align);
66056 - } else
66057 + slob_t *m = (slob_t *)(block - align);
66058 + slob_free(m, m[0].units + align);
66059 + } else {
66060 + clear_slob_page(sp);
66061 + free_slob_page(sp);
66062 + sp->size = 0;
66063 put_page(&sp->page);
66064 + }
66065 }
66066 EXPORT_SYMBOL(kfree);
66067
66068 +void check_object_size(const void *ptr, unsigned long n, bool to)
66069 +{
66070 +
66071 +#ifdef CONFIG_PAX_USERCOPY
66072 + struct slob_page *sp;
66073 + const slob_t *free;
66074 + const void *base;
66075 +
66076 + if (!n)
66077 + return;
66078 +
66079 + if (ZERO_OR_NULL_PTR(ptr))
66080 + goto report;
66081 +
66082 + if (!virt_addr_valid(ptr))
66083 + return;
66084 +
66085 + sp = slob_page(ptr);
66086 + if (!PageSlab((struct page*)sp)) {
66087 + if (object_is_on_stack(ptr, n) == -1)
66088 + goto report;
66089 + return;
66090 + }
66091 +
66092 + if (sp->size) {
66093 + base = page_address(&sp->page);
66094 + if (base <= ptr && n <= sp->size - (ptr - base))
66095 + return;
66096 + goto report;
66097 + }
66098 +
66099 + /* some tricky double walking to find the chunk */
66100 + base = (void *)((unsigned long)ptr & PAGE_MASK);
66101 + free = sp->free;
66102 +
66103 + while (!slob_last(free) && (void *)free <= ptr) {
66104 + base = free + slob_units(free);
66105 + free = slob_next(free);
66106 + }
66107 +
66108 + while (base < (void *)free) {
66109 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
66110 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
66111 + int offset;
66112 +
66113 + if (ptr < base + align)
66114 + goto report;
66115 +
66116 + offset = ptr - base - align;
66117 + if (offset < m) {
66118 + if (n <= m - offset)
66119 + return;
66120 + goto report;
66121 + }
66122 + base += size;
66123 + }
66124 +
66125 +report:
66126 + pax_report_usercopy(ptr, n, to, NULL);
66127 +#endif
66128 +
66129 +}
66130 +EXPORT_SYMBOL(check_object_size);
66131 +
66132 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
66133 size_t ksize(const void *block)
66134 {
66135 @@ -547,10 +627,10 @@ size_t ksize(const void *block)
66136 sp = slob_page(block);
66137 if (is_slob_page(sp)) {
66138 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66139 - unsigned int *m = (unsigned int *)(block - align);
66140 - return SLOB_UNITS(*m) * SLOB_UNIT;
66141 + slob_t *m = (slob_t *)(block - align);
66142 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
66143 } else
66144 - return sp->page.private;
66145 + return sp->size;
66146 }
66147 EXPORT_SYMBOL(ksize);
66148
66149 @@ -605,17 +685,25 @@ void *kmem_cache_alloc_node(struct kmem_
66150 {
66151 void *b;
66152
66153 +#ifdef CONFIG_PAX_USERCOPY
66154 + b = __kmalloc_node_align(c->size, flags, node, c->align);
66155 +#else
66156 if (c->size < PAGE_SIZE) {
66157 b = slob_alloc(c->size, flags, c->align, node);
66158 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
66159 SLOB_UNITS(c->size) * SLOB_UNIT,
66160 flags, node);
66161 } else {
66162 + struct slob_page *sp;
66163 +
66164 b = slob_new_pages(flags, get_order(c->size), node);
66165 + sp = slob_page(b);
66166 + sp->size = c->size;
66167 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
66168 PAGE_SIZE << get_order(c->size),
66169 flags, node);
66170 }
66171 +#endif
66172
66173 if (c->ctor)
66174 c->ctor(b);
66175 @@ -627,10 +715,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
66176
66177 static void __kmem_cache_free(void *b, int size)
66178 {
66179 - if (size < PAGE_SIZE)
66180 + struct slob_page *sp = slob_page(b);
66181 +
66182 + if (is_slob_page(sp))
66183 slob_free(b, size);
66184 - else
66185 + else {
66186 + clear_slob_page(sp);
66187 + free_slob_page(sp);
66188 + sp->size = 0;
66189 slob_free_pages(b, get_order(size));
66190 + }
66191 }
66192
66193 static void kmem_rcu_free(struct rcu_head *head)
66194 @@ -643,15 +737,24 @@ static void kmem_rcu_free(struct rcu_hea
66195
66196 void kmem_cache_free(struct kmem_cache *c, void *b)
66197 {
66198 + int size = c->size;
66199 +
66200 +#ifdef CONFIG_PAX_USERCOPY
66201 + if (size + c->align < PAGE_SIZE) {
66202 + size += c->align;
66203 + b -= c->align;
66204 + }
66205 +#endif
66206 +
66207 kmemleak_free_recursive(b, c->flags);
66208 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
66209 struct slob_rcu *slob_rcu;
66210 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
66211 + slob_rcu = b + (size - sizeof(struct slob_rcu));
66212 INIT_RCU_HEAD(&slob_rcu->head);
66213 - slob_rcu->size = c->size;
66214 + slob_rcu->size = size;
66215 call_rcu(&slob_rcu->head, kmem_rcu_free);
66216 } else {
66217 - __kmem_cache_free(b, c->size);
66218 + __kmem_cache_free(b, size);
66219 }
66220
66221 trace_kmem_cache_free(_RET_IP_, b);
66222 diff -urNp linux-2.6.32.41/mm/slub.c linux-2.6.32.41/mm/slub.c
66223 --- linux-2.6.32.41/mm/slub.c 2011-03-27 14:31:47.000000000 -0400
66224 +++ linux-2.6.32.41/mm/slub.c 2011-04-17 15:56:46.000000000 -0400
66225 @@ -410,7 +410,7 @@ static void print_track(const char *s, s
66226 if (!t->addr)
66227 return;
66228
66229 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
66230 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
66231 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
66232 }
66233
66234 @@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *
66235
66236 page = virt_to_head_page(x);
66237
66238 + BUG_ON(!PageSlab(page));
66239 +
66240 slab_free(s, page, x, _RET_IP_);
66241
66242 trace_kmem_cache_free(_RET_IP_, x);
66243 @@ -1937,7 +1939,7 @@ static int slub_min_objects;
66244 * Merge control. If this is set then no merging of slab caches will occur.
66245 * (Could be removed. This was introduced to pacify the merge skeptics.)
66246 */
66247 -static int slub_nomerge;
66248 +static int slub_nomerge = 1;
66249
66250 /*
66251 * Calculate the order of allocation given an slab object size.
66252 @@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_c
66253 * list to avoid pounding the page allocator excessively.
66254 */
66255 set_min_partial(s, ilog2(s->size));
66256 - s->refcount = 1;
66257 + atomic_set(&s->refcount, 1);
66258 #ifdef CONFIG_NUMA
66259 s->remote_node_defrag_ratio = 1000;
66260 #endif
66261 @@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struc
66262 void kmem_cache_destroy(struct kmem_cache *s)
66263 {
66264 down_write(&slub_lock);
66265 - s->refcount--;
66266 - if (!s->refcount) {
66267 + if (atomic_dec_and_test(&s->refcount)) {
66268 list_del(&s->list);
66269 up_write(&slub_lock);
66270 if (kmem_cache_close(s)) {
66271 @@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(cha
66272 __setup("slub_nomerge", setup_slub_nomerge);
66273
66274 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
66275 - const char *name, int size, gfp_t gfp_flags)
66276 + const char *name, int size, gfp_t gfp_flags, unsigned int flags)
66277 {
66278 - unsigned int flags = 0;
66279 -
66280 if (gfp_flags & SLUB_DMA)
66281 - flags = SLAB_CACHE_DMA;
66282 + flags |= SLAB_CACHE_DMA;
66283
66284 /*
66285 * This function is called with IRQs disabled during early-boot on
66286 @@ -2915,6 +2914,46 @@ void *__kmalloc_node(size_t size, gfp_t
66287 EXPORT_SYMBOL(__kmalloc_node);
66288 #endif
66289
66290 +void check_object_size(const void *ptr, unsigned long n, bool to)
66291 +{
66292 +
66293 +#ifdef CONFIG_PAX_USERCOPY
66294 + struct page *page;
66295 + struct kmem_cache *s = NULL;
66296 + unsigned long offset;
66297 +
66298 + if (!n)
66299 + return;
66300 +
66301 + if (ZERO_OR_NULL_PTR(ptr))
66302 + goto report;
66303 +
66304 + if (!virt_addr_valid(ptr))
66305 + return;
66306 +
66307 + page = get_object_page(ptr);
66308 +
66309 + if (!page) {
66310 + if (object_is_on_stack(ptr, n) == -1)
66311 + goto report;
66312 + return;
66313 + }
66314 +
66315 + s = page->slab;
66316 + if (!(s->flags & SLAB_USERCOPY))
66317 + goto report;
66318 +
66319 + offset = (ptr - page_address(page)) % s->size;
66320 + if (offset <= s->objsize && n <= s->objsize - offset)
66321 + return;
66322 +
66323 +report:
66324 + pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
66325 +#endif
66326 +
66327 +}
66328 +EXPORT_SYMBOL(check_object_size);
66329 +
66330 size_t ksize(const void *object)
66331 {
66332 struct page *page;
66333 @@ -3185,8 +3224,8 @@ void __init kmem_cache_init(void)
66334 * kmem_cache_open for slab_state == DOWN.
66335 */
66336 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
66337 - sizeof(struct kmem_cache_node), GFP_NOWAIT);
66338 - kmalloc_caches[0].refcount = -1;
66339 + sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
66340 + atomic_set(&kmalloc_caches[0].refcount, -1);
66341 caches++;
66342
66343 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
66344 @@ -3198,18 +3237,18 @@ void __init kmem_cache_init(void)
66345 /* Caches that are not of the two-to-the-power-of size */
66346 if (KMALLOC_MIN_SIZE <= 32) {
66347 create_kmalloc_cache(&kmalloc_caches[1],
66348 - "kmalloc-96", 96, GFP_NOWAIT);
66349 + "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
66350 caches++;
66351 }
66352 if (KMALLOC_MIN_SIZE <= 64) {
66353 create_kmalloc_cache(&kmalloc_caches[2],
66354 - "kmalloc-192", 192, GFP_NOWAIT);
66355 + "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
66356 caches++;
66357 }
66358
66359 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
66360 create_kmalloc_cache(&kmalloc_caches[i],
66361 - "kmalloc", 1 << i, GFP_NOWAIT);
66362 + "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
66363 caches++;
66364 }
66365
66366 @@ -3293,7 +3332,7 @@ static int slab_unmergeable(struct kmem_
66367 /*
66368 * We may have set a slab to be unmergeable during bootstrap.
66369 */
66370 - if (s->refcount < 0)
66371 + if (atomic_read(&s->refcount) < 0)
66372 return 1;
66373
66374 return 0;
66375 @@ -3353,7 +3392,7 @@ struct kmem_cache *kmem_cache_create(con
66376 if (s) {
66377 int cpu;
66378
66379 - s->refcount++;
66380 + atomic_inc(&s->refcount);
66381 /*
66382 * Adjust the object sizes so that we clear
66383 * the complete object on kzalloc.
66384 @@ -3372,7 +3411,7 @@ struct kmem_cache *kmem_cache_create(con
66385
66386 if (sysfs_slab_alias(s, name)) {
66387 down_write(&slub_lock);
66388 - s->refcount--;
66389 + atomic_dec(&s->refcount);
66390 up_write(&slub_lock);
66391 goto err;
66392 }
66393 @@ -4101,7 +4140,7 @@ SLAB_ATTR_RO(ctor);
66394
66395 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
66396 {
66397 - return sprintf(buf, "%d\n", s->refcount - 1);
66398 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
66399 }
66400 SLAB_ATTR_RO(aliases);
66401
66402 @@ -4503,7 +4542,7 @@ static void kmem_cache_release(struct ko
66403 kfree(s);
66404 }
66405
66406 -static struct sysfs_ops slab_sysfs_ops = {
66407 +static const struct sysfs_ops slab_sysfs_ops = {
66408 .show = slab_attr_show,
66409 .store = slab_attr_store,
66410 };
66411 @@ -4522,7 +4561,7 @@ static int uevent_filter(struct kset *ks
66412 return 0;
66413 }
66414
66415 -static struct kset_uevent_ops slab_uevent_ops = {
66416 +static const struct kset_uevent_ops slab_uevent_ops = {
66417 .filter = uevent_filter,
66418 };
66419
66420 @@ -4785,7 +4824,13 @@ static const struct file_operations proc
66421
66422 static int __init slab_proc_init(void)
66423 {
66424 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
66425 + mode_t gr_mode = S_IRUGO;
66426 +
66427 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66428 + gr_mode = S_IRUSR;
66429 +#endif
66430 +
66431 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
66432 return 0;
66433 }
66434 module_init(slab_proc_init);
66435 diff -urNp linux-2.6.32.41/mm/util.c linux-2.6.32.41/mm/util.c
66436 --- linux-2.6.32.41/mm/util.c 2011-03-27 14:31:47.000000000 -0400
66437 +++ linux-2.6.32.41/mm/util.c 2011-04-17 15:56:46.000000000 -0400
66438 @@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
66439 void arch_pick_mmap_layout(struct mm_struct *mm)
66440 {
66441 mm->mmap_base = TASK_UNMAPPED_BASE;
66442 +
66443 +#ifdef CONFIG_PAX_RANDMMAP
66444 + if (mm->pax_flags & MF_PAX_RANDMMAP)
66445 + mm->mmap_base += mm->delta_mmap;
66446 +#endif
66447 +
66448 mm->get_unmapped_area = arch_get_unmapped_area;
66449 mm->unmap_area = arch_unmap_area;
66450 }
66451 diff -urNp linux-2.6.32.41/mm/vmalloc.c linux-2.6.32.41/mm/vmalloc.c
66452 --- linux-2.6.32.41/mm/vmalloc.c 2011-03-27 14:31:47.000000000 -0400
66453 +++ linux-2.6.32.41/mm/vmalloc.c 2011-04-17 15:56:46.000000000 -0400
66454 @@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd,
66455
66456 pte = pte_offset_kernel(pmd, addr);
66457 do {
66458 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
66459 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
66460 +
66461 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66462 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
66463 + BUG_ON(!pte_exec(*pte));
66464 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
66465 + continue;
66466 + }
66467 +#endif
66468 +
66469 + {
66470 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
66471 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
66472 + }
66473 } while (pte++, addr += PAGE_SIZE, addr != end);
66474 }
66475
66476 @@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, un
66477 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
66478 {
66479 pte_t *pte;
66480 + int ret = -ENOMEM;
66481
66482 /*
66483 * nr is a running index into the array which helps higher level
66484 @@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, un
66485 pte = pte_alloc_kernel(pmd, addr);
66486 if (!pte)
66487 return -ENOMEM;
66488 +
66489 + pax_open_kernel();
66490 do {
66491 struct page *page = pages[*nr];
66492
66493 - if (WARN_ON(!pte_none(*pte)))
66494 - return -EBUSY;
66495 - if (WARN_ON(!page))
66496 - return -ENOMEM;
66497 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66498 + if (!(pgprot_val(prot) & _PAGE_NX))
66499 + BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
66500 + else
66501 +#endif
66502 +
66503 + if (WARN_ON(!pte_none(*pte))) {
66504 + ret = -EBUSY;
66505 + goto out;
66506 + }
66507 + if (WARN_ON(!page)) {
66508 + ret = -ENOMEM;
66509 + goto out;
66510 + }
66511 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
66512 (*nr)++;
66513 } while (pte++, addr += PAGE_SIZE, addr != end);
66514 - return 0;
66515 + ret = 0;
66516 +out:
66517 + pax_close_kernel();
66518 + return ret;
66519 }
66520
66521 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
66522 @@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void
66523 * and fall back on vmalloc() if that fails. Others
66524 * just put it in the vmalloc space.
66525 */
66526 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
66527 +#ifdef CONFIG_MODULES
66528 +#ifdef MODULES_VADDR
66529 unsigned long addr = (unsigned long)x;
66530 if (addr >= MODULES_VADDR && addr < MODULES_END)
66531 return 1;
66532 #endif
66533 +
66534 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66535 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
66536 + return 1;
66537 +#endif
66538 +
66539 +#endif
66540 +
66541 return is_vmalloc_addr(x);
66542 }
66543
66544 @@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void
66545
66546 if (!pgd_none(*pgd)) {
66547 pud_t *pud = pud_offset(pgd, addr);
66548 +#ifdef CONFIG_X86
66549 + if (!pud_large(*pud))
66550 +#endif
66551 if (!pud_none(*pud)) {
66552 pmd_t *pmd = pmd_offset(pud, addr);
66553 +#ifdef CONFIG_X86
66554 + if (!pmd_large(*pmd))
66555 +#endif
66556 if (!pmd_none(*pmd)) {
66557 pte_t *ptep, pte;
66558
66559 @@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vm
66560 struct rb_node *tmp;
66561
66562 while (*p) {
66563 - struct vmap_area *tmp;
66564 + struct vmap_area *varea;
66565
66566 parent = *p;
66567 - tmp = rb_entry(parent, struct vmap_area, rb_node);
66568 - if (va->va_start < tmp->va_end)
66569 + varea = rb_entry(parent, struct vmap_area, rb_node);
66570 + if (va->va_start < varea->va_end)
66571 p = &(*p)->rb_left;
66572 - else if (va->va_end > tmp->va_start)
66573 + else if (va->va_end > varea->va_start)
66574 p = &(*p)->rb_right;
66575 else
66576 BUG();
66577 @@ -1232,6 +1274,16 @@ static struct vm_struct *__get_vm_area_n
66578 struct vm_struct *area;
66579
66580 BUG_ON(in_interrupt());
66581 +
66582 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
66583 + if (flags & VM_KERNEXEC) {
66584 + if (start != VMALLOC_START || end != VMALLOC_END)
66585 + return NULL;
66586 + start = (unsigned long)MODULES_EXEC_VADDR;
66587 + end = (unsigned long)MODULES_EXEC_END;
66588 + }
66589 +#endif
66590 +
66591 if (flags & VM_IOREMAP) {
66592 int bit = fls(size);
66593
66594 @@ -1457,6 +1509,11 @@ void *vmap(struct page **pages, unsigned
66595 if (count > totalram_pages)
66596 return NULL;
66597
66598 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
66599 + if (!(pgprot_val(prot) & _PAGE_NX))
66600 + flags |= VM_KERNEXEC;
66601 +#endif
66602 +
66603 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
66604 __builtin_return_address(0));
66605 if (!area)
66606 @@ -1567,6 +1624,13 @@ static void *__vmalloc_node(unsigned lon
66607 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
66608 return NULL;
66609
66610 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
66611 + if (!(pgprot_val(prot) & _PAGE_NX))
66612 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
66613 + node, gfp_mask, caller);
66614 + else
66615 +#endif
66616 +
66617 area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
66618 VMALLOC_END, node, gfp_mask, caller);
66619
66620 @@ -1585,6 +1649,7 @@ static void *__vmalloc_node(unsigned lon
66621 return addr;
66622 }
66623
66624 +#undef __vmalloc
66625 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
66626 {
66627 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
66628 @@ -1601,6 +1666,7 @@ EXPORT_SYMBOL(__vmalloc);
66629 * For tight control over page level allocator and protection flags
66630 * use __vmalloc() instead.
66631 */
66632 +#undef vmalloc
66633 void *vmalloc(unsigned long size)
66634 {
66635 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
66636 @@ -1615,6 +1681,7 @@ EXPORT_SYMBOL(vmalloc);
66637 * The resulting memory area is zeroed so it can be mapped to userspace
66638 * without leaking data.
66639 */
66640 +#undef vmalloc_user
66641 void *vmalloc_user(unsigned long size)
66642 {
66643 struct vm_struct *area;
66644 @@ -1642,6 +1709,7 @@ EXPORT_SYMBOL(vmalloc_user);
66645 * For tight control over page level allocator and protection flags
66646 * use __vmalloc() instead.
66647 */
66648 +#undef vmalloc_node
66649 void *vmalloc_node(unsigned long size, int node)
66650 {
66651 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
66652 @@ -1664,10 +1732,10 @@ EXPORT_SYMBOL(vmalloc_node);
66653 * For tight control over page level allocator and protection flags
66654 * use __vmalloc() instead.
66655 */
66656 -
66657 +#undef vmalloc_exec
66658 void *vmalloc_exec(unsigned long size)
66659 {
66660 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
66661 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
66662 -1, __builtin_return_address(0));
66663 }
66664
66665 @@ -1686,6 +1754,7 @@ void *vmalloc_exec(unsigned long size)
66666 * Allocate enough 32bit PA addressable pages to cover @size from the
66667 * page level allocator and map them into contiguous kernel virtual space.
66668 */
66669 +#undef vmalloc_32
66670 void *vmalloc_32(unsigned long size)
66671 {
66672 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
66673 @@ -1700,6 +1769,7 @@ EXPORT_SYMBOL(vmalloc_32);
66674 * The resulting memory area is 32bit addressable and zeroed so it can be
66675 * mapped to userspace without leaking data.
66676 */
66677 +#undef vmalloc_32_user
66678 void *vmalloc_32_user(unsigned long size)
66679 {
66680 struct vm_struct *area;
66681 @@ -1964,6 +2034,8 @@ int remap_vmalloc_range(struct vm_area_s
66682 unsigned long uaddr = vma->vm_start;
66683 unsigned long usize = vma->vm_end - vma->vm_start;
66684
66685 + BUG_ON(vma->vm_mirror);
66686 +
66687 if ((PAGE_SIZE-1) & (unsigned long)addr)
66688 return -EINVAL;
66689
66690 diff -urNp linux-2.6.32.41/mm/vmstat.c linux-2.6.32.41/mm/vmstat.c
66691 --- linux-2.6.32.41/mm/vmstat.c 2011-03-27 14:31:47.000000000 -0400
66692 +++ linux-2.6.32.41/mm/vmstat.c 2011-04-17 15:56:46.000000000 -0400
66693 @@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
66694 *
66695 * vm_stat contains the global counters
66696 */
66697 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
66698 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
66699 EXPORT_SYMBOL(vm_stat);
66700
66701 #ifdef CONFIG_SMP
66702 @@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
66703 v = p->vm_stat_diff[i];
66704 p->vm_stat_diff[i] = 0;
66705 local_irq_restore(flags);
66706 - atomic_long_add(v, &zone->vm_stat[i]);
66707 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
66708 global_diff[i] += v;
66709 #ifdef CONFIG_NUMA
66710 /* 3 seconds idle till flush */
66711 @@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
66712
66713 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
66714 if (global_diff[i])
66715 - atomic_long_add(global_diff[i], &vm_stat[i]);
66716 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
66717 }
66718
66719 #endif
66720 @@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
66721 start_cpu_timer(cpu);
66722 #endif
66723 #ifdef CONFIG_PROC_FS
66724 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
66725 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
66726 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
66727 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
66728 + {
66729 + mode_t gr_mode = S_IRUGO;
66730 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66731 + gr_mode = S_IRUSR;
66732 +#endif
66733 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
66734 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
66735 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66736 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
66737 +#else
66738 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
66739 +#endif
66740 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
66741 + }
66742 #endif
66743 return 0;
66744 }
66745 diff -urNp linux-2.6.32.41/net/8021q/vlan.c linux-2.6.32.41/net/8021q/vlan.c
66746 --- linux-2.6.32.41/net/8021q/vlan.c 2011-03-27 14:31:47.000000000 -0400
66747 +++ linux-2.6.32.41/net/8021q/vlan.c 2011-04-17 15:56:46.000000000 -0400
66748 @@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net
66749 err = -EPERM;
66750 if (!capable(CAP_NET_ADMIN))
66751 break;
66752 - if ((args.u.name_type >= 0) &&
66753 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
66754 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
66755 struct vlan_net *vn;
66756
66757 vn = net_generic(net, vlan_net_id);
66758 diff -urNp linux-2.6.32.41/net/atm/atm_misc.c linux-2.6.32.41/net/atm/atm_misc.c
66759 --- linux-2.6.32.41/net/atm/atm_misc.c 2011-03-27 14:31:47.000000000 -0400
66760 +++ linux-2.6.32.41/net/atm/atm_misc.c 2011-04-17 15:56:46.000000000 -0400
66761 @@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int t
66762 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
66763 return 1;
66764 atm_return(vcc,truesize);
66765 - atomic_inc(&vcc->stats->rx_drop);
66766 + atomic_inc_unchecked(&vcc->stats->rx_drop);
66767 return 0;
66768 }
66769
66770 @@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct
66771 }
66772 }
66773 atm_return(vcc,guess);
66774 - atomic_inc(&vcc->stats->rx_drop);
66775 + atomic_inc_unchecked(&vcc->stats->rx_drop);
66776 return NULL;
66777 }
66778
66779 @@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafpr
66780
66781 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
66782 {
66783 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
66784 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
66785 __SONET_ITEMS
66786 #undef __HANDLE_ITEM
66787 }
66788 @@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_sta
66789
66790 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
66791 {
66792 -#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
66793 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
66794 __SONET_ITEMS
66795 #undef __HANDLE_ITEM
66796 }
66797 diff -urNp linux-2.6.32.41/net/atm/mpoa_caches.c linux-2.6.32.41/net/atm/mpoa_caches.c
66798 --- linux-2.6.32.41/net/atm/mpoa_caches.c 2011-03-27 14:31:47.000000000 -0400
66799 +++ linux-2.6.32.41/net/atm/mpoa_caches.c 2011-05-16 21:46:57.000000000 -0400
66800 @@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_cl
66801 struct timeval now;
66802 struct k_message msg;
66803
66804 + pax_track_stack();
66805 +
66806 do_gettimeofday(&now);
66807
66808 write_lock_irq(&client->egress_lock);
66809 diff -urNp linux-2.6.32.41/net/atm/proc.c linux-2.6.32.41/net/atm/proc.c
66810 --- linux-2.6.32.41/net/atm/proc.c 2011-03-27 14:31:47.000000000 -0400
66811 +++ linux-2.6.32.41/net/atm/proc.c 2011-04-17 15:56:46.000000000 -0400
66812 @@ -43,9 +43,9 @@ static void add_stats(struct seq_file *s
66813 const struct k_atm_aal_stats *stats)
66814 {
66815 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
66816 - atomic_read(&stats->tx),atomic_read(&stats->tx_err),
66817 - atomic_read(&stats->rx),atomic_read(&stats->rx_err),
66818 - atomic_read(&stats->rx_drop));
66819 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
66820 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
66821 + atomic_read_unchecked(&stats->rx_drop));
66822 }
66823
66824 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
66825 @@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *se
66826 {
66827 struct sock *sk = sk_atm(vcc);
66828
66829 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66830 + seq_printf(seq, "%p ", NULL);
66831 +#else
66832 seq_printf(seq, "%p ", vcc);
66833 +#endif
66834 +
66835 if (!vcc->dev)
66836 seq_printf(seq, "Unassigned ");
66837 else
66838 @@ -214,7 +219,11 @@ static void svc_info(struct seq_file *se
66839 {
66840 if (!vcc->dev)
66841 seq_printf(seq, sizeof(void *) == 4 ?
66842 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66843 + "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
66844 +#else
66845 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
66846 +#endif
66847 else
66848 seq_printf(seq, "%3d %3d %5d ",
66849 vcc->dev->number, vcc->vpi, vcc->vci);
66850 diff -urNp linux-2.6.32.41/net/atm/resources.c linux-2.6.32.41/net/atm/resources.c
66851 --- linux-2.6.32.41/net/atm/resources.c 2011-03-27 14:31:47.000000000 -0400
66852 +++ linux-2.6.32.41/net/atm/resources.c 2011-04-17 15:56:46.000000000 -0400
66853 @@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *
66854 static void copy_aal_stats(struct k_atm_aal_stats *from,
66855 struct atm_aal_stats *to)
66856 {
66857 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
66858 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
66859 __AAL_STAT_ITEMS
66860 #undef __HANDLE_ITEM
66861 }
66862 @@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_
66863 static void subtract_aal_stats(struct k_atm_aal_stats *from,
66864 struct atm_aal_stats *to)
66865 {
66866 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
66867 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
66868 __AAL_STAT_ITEMS
66869 #undef __HANDLE_ITEM
66870 }
66871 diff -urNp linux-2.6.32.41/net/bridge/br_private.h linux-2.6.32.41/net/bridge/br_private.h
66872 --- linux-2.6.32.41/net/bridge/br_private.h 2011-03-27 14:31:47.000000000 -0400
66873 +++ linux-2.6.32.41/net/bridge/br_private.h 2011-04-17 15:56:46.000000000 -0400
66874 @@ -254,7 +254,7 @@ extern void br_ifinfo_notify(int event,
66875
66876 #ifdef CONFIG_SYSFS
66877 /* br_sysfs_if.c */
66878 -extern struct sysfs_ops brport_sysfs_ops;
66879 +extern const struct sysfs_ops brport_sysfs_ops;
66880 extern int br_sysfs_addif(struct net_bridge_port *p);
66881
66882 /* br_sysfs_br.c */
66883 diff -urNp linux-2.6.32.41/net/bridge/br_stp_if.c linux-2.6.32.41/net/bridge/br_stp_if.c
66884 --- linux-2.6.32.41/net/bridge/br_stp_if.c 2011-03-27 14:31:47.000000000 -0400
66885 +++ linux-2.6.32.41/net/bridge/br_stp_if.c 2011-04-17 15:56:46.000000000 -0400
66886 @@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridg
66887 char *envp[] = { NULL };
66888
66889 if (br->stp_enabled == BR_USER_STP) {
66890 - r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
66891 + r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
66892 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
66893 br->dev->name, r);
66894
66895 diff -urNp linux-2.6.32.41/net/bridge/br_sysfs_if.c linux-2.6.32.41/net/bridge/br_sysfs_if.c
66896 --- linux-2.6.32.41/net/bridge/br_sysfs_if.c 2011-03-27 14:31:47.000000000 -0400
66897 +++ linux-2.6.32.41/net/bridge/br_sysfs_if.c 2011-04-17 15:56:46.000000000 -0400
66898 @@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobje
66899 return ret;
66900 }
66901
66902 -struct sysfs_ops brport_sysfs_ops = {
66903 +const struct sysfs_ops brport_sysfs_ops = {
66904 .show = brport_show,
66905 .store = brport_store,
66906 };
66907 diff -urNp linux-2.6.32.41/net/bridge/netfilter/ebtables.c linux-2.6.32.41/net/bridge/netfilter/ebtables.c
66908 --- linux-2.6.32.41/net/bridge/netfilter/ebtables.c 2011-04-17 17:00:52.000000000 -0400
66909 +++ linux-2.6.32.41/net/bridge/netfilter/ebtables.c 2011-05-16 21:46:57.000000000 -0400
66910 @@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struc
66911 unsigned int entries_size, nentries;
66912 char *entries;
66913
66914 + pax_track_stack();
66915 +
66916 if (cmd == EBT_SO_GET_ENTRIES) {
66917 entries_size = t->private->entries_size;
66918 nentries = t->private->nentries;
66919 diff -urNp linux-2.6.32.41/net/can/bcm.c linux-2.6.32.41/net/can/bcm.c
66920 --- linux-2.6.32.41/net/can/bcm.c 2011-05-10 22:12:01.000000000 -0400
66921 +++ linux-2.6.32.41/net/can/bcm.c 2011-05-10 22:12:34.000000000 -0400
66922 @@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file
66923 struct bcm_sock *bo = bcm_sk(sk);
66924 struct bcm_op *op;
66925
66926 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66927 + seq_printf(m, ">>> socket %p", NULL);
66928 + seq_printf(m, " / sk %p", NULL);
66929 + seq_printf(m, " / bo %p", NULL);
66930 +#else
66931 seq_printf(m, ">>> socket %p", sk->sk_socket);
66932 seq_printf(m, " / sk %p", sk);
66933 seq_printf(m, " / bo %p", bo);
66934 +#endif
66935 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
66936 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
66937 seq_printf(m, " <<<\n");
66938 diff -urNp linux-2.6.32.41/net/core/dev.c linux-2.6.32.41/net/core/dev.c
66939 --- linux-2.6.32.41/net/core/dev.c 2011-04-17 17:00:52.000000000 -0400
66940 +++ linux-2.6.32.41/net/core/dev.c 2011-04-17 17:04:18.000000000 -0400
66941 @@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const cha
66942 if (no_module && capable(CAP_NET_ADMIN))
66943 no_module = request_module("netdev-%s", name);
66944 if (no_module && capable(CAP_SYS_MODULE)) {
66945 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66946 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
66947 +#else
66948 if (!request_module("%s", name))
66949 pr_err("Loading kernel module for a network device "
66950 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
66951 "instead\n", name);
66952 +#endif
66953 }
66954 }
66955 EXPORT_SYMBOL(dev_load);
66956 @@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
66957 }
66958 EXPORT_SYMBOL(netif_rx_ni);
66959
66960 -static void net_tx_action(struct softirq_action *h)
66961 +static void net_tx_action(void)
66962 {
66963 struct softnet_data *sd = &__get_cpu_var(softnet_data);
66964
66965 @@ -2826,7 +2830,7 @@ void netif_napi_del(struct napi_struct *
66966 EXPORT_SYMBOL(netif_napi_del);
66967
66968
66969 -static void net_rx_action(struct softirq_action *h)
66970 +static void net_rx_action(void)
66971 {
66972 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
66973 unsigned long time_limit = jiffies + 2;
66974 diff -urNp linux-2.6.32.41/net/core/flow.c linux-2.6.32.41/net/core/flow.c
66975 --- linux-2.6.32.41/net/core/flow.c 2011-03-27 14:31:47.000000000 -0400
66976 +++ linux-2.6.32.41/net/core/flow.c 2011-05-04 17:56:20.000000000 -0400
66977 @@ -35,11 +35,11 @@ struct flow_cache_entry {
66978 atomic_t *object_ref;
66979 };
66980
66981 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
66982 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
66983
66984 static u32 flow_hash_shift;
66985 #define flow_hash_size (1 << flow_hash_shift)
66986 -static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
66987 +static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
66988
66989 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
66990
66991 @@ -52,7 +52,7 @@ struct flow_percpu_info {
66992 u32 hash_rnd;
66993 int count;
66994 };
66995 -static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
66996 +static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
66997
66998 #define flow_hash_rnd_recalc(cpu) \
66999 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
67000 @@ -69,7 +69,7 @@ struct flow_flush_info {
67001 atomic_t cpuleft;
67002 struct completion completion;
67003 };
67004 -static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
67005 +static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
67006
67007 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
67008
67009 @@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net,
67010 if (fle->family == family &&
67011 fle->dir == dir &&
67012 flow_key_compare(key, &fle->key) == 0) {
67013 - if (fle->genid == atomic_read(&flow_cache_genid)) {
67014 + if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
67015 void *ret = fle->object;
67016
67017 if (ret)
67018 @@ -228,7 +228,7 @@ nocache:
67019 err = resolver(net, key, family, dir, &obj, &obj_ref);
67020
67021 if (fle && !err) {
67022 - fle->genid = atomic_read(&flow_cache_genid);
67023 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
67024
67025 if (fle->object)
67026 atomic_dec(fle->object_ref);
67027 @@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(uns
67028
67029 fle = flow_table(cpu)[i];
67030 for (; fle; fle = fle->next) {
67031 - unsigned genid = atomic_read(&flow_cache_genid);
67032 + unsigned genid = atomic_read_unchecked(&flow_cache_genid);
67033
67034 if (!fle->object || fle->genid == genid)
67035 continue;
67036 diff -urNp linux-2.6.32.41/net/core/skbuff.c linux-2.6.32.41/net/core/skbuff.c
67037 --- linux-2.6.32.41/net/core/skbuff.c 2011-03-27 14:31:47.000000000 -0400
67038 +++ linux-2.6.32.41/net/core/skbuff.c 2011-05-16 21:46:57.000000000 -0400
67039 @@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb,
67040 struct sk_buff *frag_iter;
67041 struct sock *sk = skb->sk;
67042
67043 + pax_track_stack();
67044 +
67045 /*
67046 * __skb_splice_bits() only fails if the output has no room left,
67047 * so no point in going over the frag_list for the error case.
67048 diff -urNp linux-2.6.32.41/net/core/sock.c linux-2.6.32.41/net/core/sock.c
67049 --- linux-2.6.32.41/net/core/sock.c 2011-03-27 14:31:47.000000000 -0400
67050 +++ linux-2.6.32.41/net/core/sock.c 2011-05-04 17:56:20.000000000 -0400
67051 @@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock,
67052 break;
67053
67054 case SO_PEERCRED:
67055 + {
67056 + struct ucred peercred;
67057 if (len > sizeof(sk->sk_peercred))
67058 len = sizeof(sk->sk_peercred);
67059 - if (copy_to_user(optval, &sk->sk_peercred, len))
67060 + peercred = sk->sk_peercred;
67061 + if (copy_to_user(optval, &peercred, len))
67062 return -EFAULT;
67063 goto lenout;
67064 + }
67065
67066 case SO_PEERNAME:
67067 {
67068 @@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock,
67069 */
67070 smp_wmb();
67071 atomic_set(&sk->sk_refcnt, 1);
67072 - atomic_set(&sk->sk_drops, 0);
67073 + atomic_set_unchecked(&sk->sk_drops, 0);
67074 }
67075 EXPORT_SYMBOL(sock_init_data);
67076
67077 diff -urNp linux-2.6.32.41/net/decnet/sysctl_net_decnet.c linux-2.6.32.41/net/decnet/sysctl_net_decnet.c
67078 --- linux-2.6.32.41/net/decnet/sysctl_net_decnet.c 2011-03-27 14:31:47.000000000 -0400
67079 +++ linux-2.6.32.41/net/decnet/sysctl_net_decnet.c 2011-04-17 15:56:46.000000000 -0400
67080 @@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_t
67081
67082 if (len > *lenp) len = *lenp;
67083
67084 - if (copy_to_user(buffer, addr, len))
67085 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
67086 return -EFAULT;
67087
67088 *lenp = len;
67089 @@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table
67090
67091 if (len > *lenp) len = *lenp;
67092
67093 - if (copy_to_user(buffer, devname, len))
67094 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
67095 return -EFAULT;
67096
67097 *lenp = len;
67098 diff -urNp linux-2.6.32.41/net/econet/Kconfig linux-2.6.32.41/net/econet/Kconfig
67099 --- linux-2.6.32.41/net/econet/Kconfig 2011-03-27 14:31:47.000000000 -0400
67100 +++ linux-2.6.32.41/net/econet/Kconfig 2011-04-17 15:56:46.000000000 -0400
67101 @@ -4,7 +4,7 @@
67102
67103 config ECONET
67104 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
67105 - depends on EXPERIMENTAL && INET
67106 + depends on EXPERIMENTAL && INET && BROKEN
67107 ---help---
67108 Econet is a fairly old and slow networking protocol mainly used by
67109 Acorn computers to access file and print servers. It uses native
67110 diff -urNp linux-2.6.32.41/net/ieee802154/dgram.c linux-2.6.32.41/net/ieee802154/dgram.c
67111 --- linux-2.6.32.41/net/ieee802154/dgram.c 2011-03-27 14:31:47.000000000 -0400
67112 +++ linux-2.6.32.41/net/ieee802154/dgram.c 2011-05-04 17:56:28.000000000 -0400
67113 @@ -318,7 +318,7 @@ out:
67114 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
67115 {
67116 if (sock_queue_rcv_skb(sk, skb) < 0) {
67117 - atomic_inc(&sk->sk_drops);
67118 + atomic_inc_unchecked(&sk->sk_drops);
67119 kfree_skb(skb);
67120 return NET_RX_DROP;
67121 }
67122 diff -urNp linux-2.6.32.41/net/ieee802154/raw.c linux-2.6.32.41/net/ieee802154/raw.c
67123 --- linux-2.6.32.41/net/ieee802154/raw.c 2011-03-27 14:31:47.000000000 -0400
67124 +++ linux-2.6.32.41/net/ieee802154/raw.c 2011-05-04 17:56:28.000000000 -0400
67125 @@ -206,7 +206,7 @@ out:
67126 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
67127 {
67128 if (sock_queue_rcv_skb(sk, skb) < 0) {
67129 - atomic_inc(&sk->sk_drops);
67130 + atomic_inc_unchecked(&sk->sk_drops);
67131 kfree_skb(skb);
67132 return NET_RX_DROP;
67133 }
67134 diff -urNp linux-2.6.32.41/net/ipv4/inet_diag.c linux-2.6.32.41/net/ipv4/inet_diag.c
67135 --- linux-2.6.32.41/net/ipv4/inet_diag.c 2011-04-17 17:00:52.000000000 -0400
67136 +++ linux-2.6.32.41/net/ipv4/inet_diag.c 2011-04-17 17:04:18.000000000 -0400
67137 @@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct soc
67138 r->idiag_retrans = 0;
67139
67140 r->id.idiag_if = sk->sk_bound_dev_if;
67141 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67142 + r->id.idiag_cookie[0] = 0;
67143 + r->id.idiag_cookie[1] = 0;
67144 +#else
67145 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
67146 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
67147 +#endif
67148
67149 r->id.idiag_sport = inet->sport;
67150 r->id.idiag_dport = inet->dport;
67151 @@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct in
67152 r->idiag_family = tw->tw_family;
67153 r->idiag_retrans = 0;
67154 r->id.idiag_if = tw->tw_bound_dev_if;
67155 +
67156 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67157 + r->id.idiag_cookie[0] = 0;
67158 + r->id.idiag_cookie[1] = 0;
67159 +#else
67160 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
67161 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
67162 +#endif
67163 +
67164 r->id.idiag_sport = tw->tw_sport;
67165 r->id.idiag_dport = tw->tw_dport;
67166 r->id.idiag_src[0] = tw->tw_rcv_saddr;
67167 @@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk
67168 if (sk == NULL)
67169 goto unlock;
67170
67171 +#ifndef CONFIG_GRKERNSEC_HIDESYM
67172 err = -ESTALE;
67173 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
67174 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
67175 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
67176 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
67177 goto out;
67178 +#endif
67179
67180 err = -ENOMEM;
67181 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
67182 @@ -581,8 +595,14 @@ static int inet_diag_fill_req(struct sk_
67183 r->idiag_retrans = req->retrans;
67184
67185 r->id.idiag_if = sk->sk_bound_dev_if;
67186 +
67187 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67188 + r->id.idiag_cookie[0] = 0;
67189 + r->id.idiag_cookie[1] = 0;
67190 +#else
67191 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
67192 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
67193 +#endif
67194
67195 tmo = req->expires - jiffies;
67196 if (tmo < 0)
67197 diff -urNp linux-2.6.32.41/net/ipv4/inet_hashtables.c linux-2.6.32.41/net/ipv4/inet_hashtables.c
67198 --- linux-2.6.32.41/net/ipv4/inet_hashtables.c 2011-03-27 14:31:47.000000000 -0400
67199 +++ linux-2.6.32.41/net/ipv4/inet_hashtables.c 2011-04-17 15:56:46.000000000 -0400
67200 @@ -18,11 +18,14 @@
67201 #include <linux/sched.h>
67202 #include <linux/slab.h>
67203 #include <linux/wait.h>
67204 +#include <linux/security.h>
67205
67206 #include <net/inet_connection_sock.h>
67207 #include <net/inet_hashtables.h>
67208 #include <net/ip.h>
67209
67210 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
67211 +
67212 /*
67213 * Allocate and initialize a new local port bind bucket.
67214 * The bindhash mutex for snum's hash chain must be held here.
67215 @@ -490,6 +493,8 @@ ok:
67216 }
67217 spin_unlock(&head->lock);
67218
67219 + gr_update_task_in_ip_table(current, inet_sk(sk));
67220 +
67221 if (tw) {
67222 inet_twsk_deschedule(tw, death_row);
67223 inet_twsk_put(tw);
67224 diff -urNp linux-2.6.32.41/net/ipv4/inetpeer.c linux-2.6.32.41/net/ipv4/inetpeer.c
67225 --- linux-2.6.32.41/net/ipv4/inetpeer.c 2011-03-27 14:31:47.000000000 -0400
67226 +++ linux-2.6.32.41/net/ipv4/inetpeer.c 2011-05-16 21:46:57.000000000 -0400
67227 @@ -366,6 +366,8 @@ struct inet_peer *inet_getpeer(__be32 da
67228 struct inet_peer *p, *n;
67229 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
67230
67231 + pax_track_stack();
67232 +
67233 /* Look up for the address quickly. */
67234 read_lock_bh(&peer_pool_lock);
67235 p = lookup(daddr, NULL);
67236 @@ -389,7 +391,7 @@ struct inet_peer *inet_getpeer(__be32 da
67237 return NULL;
67238 n->v4daddr = daddr;
67239 atomic_set(&n->refcnt, 1);
67240 - atomic_set(&n->rid, 0);
67241 + atomic_set_unchecked(&n->rid, 0);
67242 n->ip_id_count = secure_ip_id(daddr);
67243 n->tcp_ts_stamp = 0;
67244
67245 diff -urNp linux-2.6.32.41/net/ipv4/ip_fragment.c linux-2.6.32.41/net/ipv4/ip_fragment.c
67246 --- linux-2.6.32.41/net/ipv4/ip_fragment.c 2011-03-27 14:31:47.000000000 -0400
67247 +++ linux-2.6.32.41/net/ipv4/ip_fragment.c 2011-04-17 15:56:46.000000000 -0400
67248 @@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct
67249 return 0;
67250
67251 start = qp->rid;
67252 - end = atomic_inc_return(&peer->rid);
67253 + end = atomic_inc_return_unchecked(&peer->rid);
67254 qp->rid = end;
67255
67256 rc = qp->q.fragments && (end - start) > max;
67257 diff -urNp linux-2.6.32.41/net/ipv4/ip_sockglue.c linux-2.6.32.41/net/ipv4/ip_sockglue.c
67258 --- linux-2.6.32.41/net/ipv4/ip_sockglue.c 2011-03-27 14:31:47.000000000 -0400
67259 +++ linux-2.6.32.41/net/ipv4/ip_sockglue.c 2011-05-16 21:46:57.000000000 -0400
67260 @@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock
67261 int val;
67262 int len;
67263
67264 + pax_track_stack();
67265 +
67266 if (level != SOL_IP)
67267 return -EOPNOTSUPP;
67268
67269 diff -urNp linux-2.6.32.41/net/ipv4/netfilter/arp_tables.c linux-2.6.32.41/net/ipv4/netfilter/arp_tables.c
67270 --- linux-2.6.32.41/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:00:52.000000000 -0400
67271 +++ linux-2.6.32.41/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:04:18.000000000 -0400
67272 @@ -934,6 +934,7 @@ static int get_info(struct net *net, voi
67273 private = &tmp;
67274 }
67275 #endif
67276 + memset(&info, 0, sizeof(info));
67277 info.valid_hooks = t->valid_hooks;
67278 memcpy(info.hook_entry, private->hook_entry,
67279 sizeof(info.hook_entry));
67280 diff -urNp linux-2.6.32.41/net/ipv4/netfilter/ip_tables.c linux-2.6.32.41/net/ipv4/netfilter/ip_tables.c
67281 --- linux-2.6.32.41/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:00:52.000000000 -0400
67282 +++ linux-2.6.32.41/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:04:18.000000000 -0400
67283 @@ -1141,6 +1141,7 @@ static int get_info(struct net *net, voi
67284 private = &tmp;
67285 }
67286 #endif
67287 + memset(&info, 0, sizeof(info));
67288 info.valid_hooks = t->valid_hooks;
67289 memcpy(info.hook_entry, private->hook_entry,
67290 sizeof(info.hook_entry));
67291 diff -urNp linux-2.6.32.41/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-2.6.32.41/net/ipv4/netfilter/nf_nat_snmp_basic.c
67292 --- linux-2.6.32.41/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-03-27 14:31:47.000000000 -0400
67293 +++ linux-2.6.32.41/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-04-17 15:56:46.000000000 -0400
67294 @@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(
67295
67296 *len = 0;
67297
67298 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
67299 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
67300 if (*octets == NULL) {
67301 if (net_ratelimit())
67302 printk("OOM in bsalg (%d)\n", __LINE__);
67303 diff -urNp linux-2.6.32.41/net/ipv4/raw.c linux-2.6.32.41/net/ipv4/raw.c
67304 --- linux-2.6.32.41/net/ipv4/raw.c 2011-03-27 14:31:47.000000000 -0400
67305 +++ linux-2.6.32.41/net/ipv4/raw.c 2011-05-04 17:59:08.000000000 -0400
67306 @@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk,
67307 /* Charge it to the socket. */
67308
67309 if (sock_queue_rcv_skb(sk, skb) < 0) {
67310 - atomic_inc(&sk->sk_drops);
67311 + atomic_inc_unchecked(&sk->sk_drops);
67312 kfree_skb(skb);
67313 return NET_RX_DROP;
67314 }
67315 @@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk,
67316 int raw_rcv(struct sock *sk, struct sk_buff *skb)
67317 {
67318 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
67319 - atomic_inc(&sk->sk_drops);
67320 + atomic_inc_unchecked(&sk->sk_drops);
67321 kfree_skb(skb);
67322 return NET_RX_DROP;
67323 }
67324 @@ -724,15 +724,22 @@ static int raw_init(struct sock *sk)
67325
67326 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
67327 {
67328 + struct icmp_filter filter;
67329 +
67330 + if (optlen < 0)
67331 + return -EINVAL;
67332 if (optlen > sizeof(struct icmp_filter))
67333 optlen = sizeof(struct icmp_filter);
67334 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
67335 + if (copy_from_user(&filter, optval, optlen))
67336 return -EFAULT;
67337 + memcpy(&raw_sk(sk)->filter, &filter, optlen);
67338 +
67339 return 0;
67340 }
67341
67342 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
67343 {
67344 + struct icmp_filter filter;
67345 int len, ret = -EFAULT;
67346
67347 if (get_user(len, optlen))
67348 @@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock
67349 if (len > sizeof(struct icmp_filter))
67350 len = sizeof(struct icmp_filter);
67351 ret = -EFAULT;
67352 + memcpy(&filter, &raw_sk(sk)->filter, len);
67353 if (put_user(len, optlen) ||
67354 - copy_to_user(optval, &raw_sk(sk)->filter, len))
67355 + copy_to_user(optval, &filter, len))
67356 goto out;
67357 ret = 0;
67358 out: return ret;
67359 @@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq
67360 sk_wmem_alloc_get(sp),
67361 sk_rmem_alloc_get(sp),
67362 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
67363 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
67364 + atomic_read(&sp->sk_refcnt),
67365 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67366 + NULL,
67367 +#else
67368 + sp,
67369 +#endif
67370 + atomic_read_unchecked(&sp->sk_drops));
67371 }
67372
67373 static int raw_seq_show(struct seq_file *seq, void *v)
67374 diff -urNp linux-2.6.32.41/net/ipv4/route.c linux-2.6.32.41/net/ipv4/route.c
67375 --- linux-2.6.32.41/net/ipv4/route.c 2011-03-27 14:31:47.000000000 -0400
67376 +++ linux-2.6.32.41/net/ipv4/route.c 2011-05-04 17:56:28.000000000 -0400
67377 @@ -268,7 +268,7 @@ static inline unsigned int rt_hash(__be3
67378
67379 static inline int rt_genid(struct net *net)
67380 {
67381 - return atomic_read(&net->ipv4.rt_genid);
67382 + return atomic_read_unchecked(&net->ipv4.rt_genid);
67383 }
67384
67385 #ifdef CONFIG_PROC_FS
67386 @@ -888,7 +888,7 @@ static void rt_cache_invalidate(struct n
67387 unsigned char shuffle;
67388
67389 get_random_bytes(&shuffle, sizeof(shuffle));
67390 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
67391 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
67392 }
67393
67394 /*
67395 @@ -3356,7 +3356,7 @@ static __net_initdata struct pernet_oper
67396
67397 static __net_init int rt_secret_timer_init(struct net *net)
67398 {
67399 - atomic_set(&net->ipv4.rt_genid,
67400 + atomic_set_unchecked(&net->ipv4.rt_genid,
67401 (int) ((num_physpages ^ (num_physpages>>8)) ^
67402 (jiffies ^ (jiffies >> 7))));
67403
67404 diff -urNp linux-2.6.32.41/net/ipv4/tcp.c linux-2.6.32.41/net/ipv4/tcp.c
67405 --- linux-2.6.32.41/net/ipv4/tcp.c 2011-03-27 14:31:47.000000000 -0400
67406 +++ linux-2.6.32.41/net/ipv4/tcp.c 2011-05-16 21:46:57.000000000 -0400
67407 @@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock
67408 int val;
67409 int err = 0;
67410
67411 + pax_track_stack();
67412 +
67413 /* This is a string value all the others are int's */
67414 if (optname == TCP_CONGESTION) {
67415 char name[TCP_CA_NAME_MAX];
67416 @@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock
67417 struct tcp_sock *tp = tcp_sk(sk);
67418 int val, len;
67419
67420 + pax_track_stack();
67421 +
67422 if (get_user(len, optlen))
67423 return -EFAULT;
67424
67425 diff -urNp linux-2.6.32.41/net/ipv4/tcp_ipv4.c linux-2.6.32.41/net/ipv4/tcp_ipv4.c
67426 --- linux-2.6.32.41/net/ipv4/tcp_ipv4.c 2011-03-27 14:31:47.000000000 -0400
67427 +++ linux-2.6.32.41/net/ipv4/tcp_ipv4.c 2011-04-17 15:56:46.000000000 -0400
67428 @@ -84,6 +84,9 @@
67429 int sysctl_tcp_tw_reuse __read_mostly;
67430 int sysctl_tcp_low_latency __read_mostly;
67431
67432 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67433 +extern int grsec_enable_blackhole;
67434 +#endif
67435
67436 #ifdef CONFIG_TCP_MD5SIG
67437 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
67438 @@ -1542,6 +1545,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
67439 return 0;
67440
67441 reset:
67442 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67443 + if (!grsec_enable_blackhole)
67444 +#endif
67445 tcp_v4_send_reset(rsk, skb);
67446 discard:
67447 kfree_skb(skb);
67448 @@ -1603,12 +1609,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
67449 TCP_SKB_CB(skb)->sacked = 0;
67450
67451 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
67452 - if (!sk)
67453 + if (!sk) {
67454 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67455 + ret = 1;
67456 +#endif
67457 goto no_tcp_socket;
67458 + }
67459
67460 process:
67461 - if (sk->sk_state == TCP_TIME_WAIT)
67462 + if (sk->sk_state == TCP_TIME_WAIT) {
67463 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67464 + ret = 2;
67465 +#endif
67466 goto do_time_wait;
67467 + }
67468
67469 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
67470 goto discard_and_relse;
67471 @@ -1650,6 +1664,10 @@ no_tcp_socket:
67472 bad_packet:
67473 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
67474 } else {
67475 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67476 + if (!grsec_enable_blackhole || (ret == 1 &&
67477 + (skb->dev->flags & IFF_LOOPBACK)))
67478 +#endif
67479 tcp_v4_send_reset(NULL, skb);
67480 }
67481
67482 @@ -2237,7 +2255,11 @@ static void get_openreq4(struct sock *sk
67483 0, /* non standard timer */
67484 0, /* open_requests have no inode */
67485 atomic_read(&sk->sk_refcnt),
67486 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67487 + NULL,
67488 +#else
67489 req,
67490 +#endif
67491 len);
67492 }
67493
67494 @@ -2279,7 +2301,12 @@ static void get_tcp4_sock(struct sock *s
67495 sock_i_uid(sk),
67496 icsk->icsk_probes_out,
67497 sock_i_ino(sk),
67498 - atomic_read(&sk->sk_refcnt), sk,
67499 + atomic_read(&sk->sk_refcnt),
67500 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67501 + NULL,
67502 +#else
67503 + sk,
67504 +#endif
67505 jiffies_to_clock_t(icsk->icsk_rto),
67506 jiffies_to_clock_t(icsk->icsk_ack.ato),
67507 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
67508 @@ -2307,7 +2334,13 @@ static void get_timewait4_sock(struct in
67509 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
67510 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
67511 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
67512 - atomic_read(&tw->tw_refcnt), tw, len);
67513 + atomic_read(&tw->tw_refcnt),
67514 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67515 + NULL,
67516 +#else
67517 + tw,
67518 +#endif
67519 + len);
67520 }
67521
67522 #define TMPSZ 150
67523 diff -urNp linux-2.6.32.41/net/ipv4/tcp_minisocks.c linux-2.6.32.41/net/ipv4/tcp_minisocks.c
67524 --- linux-2.6.32.41/net/ipv4/tcp_minisocks.c 2011-03-27 14:31:47.000000000 -0400
67525 +++ linux-2.6.32.41/net/ipv4/tcp_minisocks.c 2011-04-17 15:56:46.000000000 -0400
67526 @@ -26,6 +26,10 @@
67527 #include <net/inet_common.h>
67528 #include <net/xfrm.h>
67529
67530 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67531 +extern int grsec_enable_blackhole;
67532 +#endif
67533 +
67534 #ifdef CONFIG_SYSCTL
67535 #define SYNC_INIT 0 /* let the user enable it */
67536 #else
67537 @@ -672,6 +676,10 @@ listen_overflow:
67538
67539 embryonic_reset:
67540 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
67541 +
67542 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67543 + if (!grsec_enable_blackhole)
67544 +#endif
67545 if (!(flg & TCP_FLAG_RST))
67546 req->rsk_ops->send_reset(sk, skb);
67547
67548 diff -urNp linux-2.6.32.41/net/ipv4/tcp_output.c linux-2.6.32.41/net/ipv4/tcp_output.c
67549 --- linux-2.6.32.41/net/ipv4/tcp_output.c 2011-03-27 14:31:47.000000000 -0400
67550 +++ linux-2.6.32.41/net/ipv4/tcp_output.c 2011-05-16 21:46:57.000000000 -0400
67551 @@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct s
67552 __u8 *md5_hash_location;
67553 int mss;
67554
67555 + pax_track_stack();
67556 +
67557 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
67558 if (skb == NULL)
67559 return NULL;
67560 diff -urNp linux-2.6.32.41/net/ipv4/tcp_probe.c linux-2.6.32.41/net/ipv4/tcp_probe.c
67561 --- linux-2.6.32.41/net/ipv4/tcp_probe.c 2011-03-27 14:31:47.000000000 -0400
67562 +++ linux-2.6.32.41/net/ipv4/tcp_probe.c 2011-04-17 15:56:46.000000000 -0400
67563 @@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file
67564 if (cnt + width >= len)
67565 break;
67566
67567 - if (copy_to_user(buf + cnt, tbuf, width))
67568 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
67569 return -EFAULT;
67570 cnt += width;
67571 }
67572 diff -urNp linux-2.6.32.41/net/ipv4/tcp_timer.c linux-2.6.32.41/net/ipv4/tcp_timer.c
67573 --- linux-2.6.32.41/net/ipv4/tcp_timer.c 2011-03-27 14:31:47.000000000 -0400
67574 +++ linux-2.6.32.41/net/ipv4/tcp_timer.c 2011-04-17 15:56:46.000000000 -0400
67575 @@ -21,6 +21,10 @@
67576 #include <linux/module.h>
67577 #include <net/tcp.h>
67578
67579 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67580 +extern int grsec_lastack_retries;
67581 +#endif
67582 +
67583 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
67584 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
67585 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
67586 @@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock
67587 }
67588 }
67589
67590 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67591 + if ((sk->sk_state == TCP_LAST_ACK) &&
67592 + (grsec_lastack_retries > 0) &&
67593 + (grsec_lastack_retries < retry_until))
67594 + retry_until = grsec_lastack_retries;
67595 +#endif
67596 +
67597 if (retransmits_timed_out(sk, retry_until)) {
67598 /* Has it gone just too far? */
67599 tcp_write_err(sk);
67600 diff -urNp linux-2.6.32.41/net/ipv4/udp.c linux-2.6.32.41/net/ipv4/udp.c
67601 --- linux-2.6.32.41/net/ipv4/udp.c 2011-03-27 14:31:47.000000000 -0400
67602 +++ linux-2.6.32.41/net/ipv4/udp.c 2011-05-04 17:57:28.000000000 -0400
67603 @@ -86,6 +86,7 @@
67604 #include <linux/types.h>
67605 #include <linux/fcntl.h>
67606 #include <linux/module.h>
67607 +#include <linux/security.h>
67608 #include <linux/socket.h>
67609 #include <linux/sockios.h>
67610 #include <linux/igmp.h>
67611 @@ -106,6 +107,10 @@
67612 #include <net/xfrm.h>
67613 #include "udp_impl.h"
67614
67615 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67616 +extern int grsec_enable_blackhole;
67617 +#endif
67618 +
67619 struct udp_table udp_table;
67620 EXPORT_SYMBOL(udp_table);
67621
67622 @@ -371,6 +376,9 @@ found:
67623 return s;
67624 }
67625
67626 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
67627 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
67628 +
67629 /*
67630 * This routine is called by the ICMP module when it gets some
67631 * sort of error condition. If err < 0 then the socket should
67632 @@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
67633 dport = usin->sin_port;
67634 if (dport == 0)
67635 return -EINVAL;
67636 +
67637 + err = gr_search_udp_sendmsg(sk, usin);
67638 + if (err)
67639 + return err;
67640 } else {
67641 if (sk->sk_state != TCP_ESTABLISHED)
67642 return -EDESTADDRREQ;
67643 +
67644 + err = gr_search_udp_sendmsg(sk, NULL);
67645 + if (err)
67646 + return err;
67647 +
67648 daddr = inet->daddr;
67649 dport = inet->dport;
67650 /* Open fast path for connected socket.
67651 @@ -945,6 +962,10 @@ try_again:
67652 if (!skb)
67653 goto out;
67654
67655 + err = gr_search_udp_recvmsg(sk, skb);
67656 + if (err)
67657 + goto out_free;
67658 +
67659 ulen = skb->len - sizeof(struct udphdr);
67660 copied = len;
67661 if (copied > ulen)
67662 @@ -1065,7 +1086,7 @@ static int __udp_queue_rcv_skb(struct so
67663 if (rc == -ENOMEM) {
67664 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
67665 is_udplite);
67666 - atomic_inc(&sk->sk_drops);
67667 + atomic_inc_unchecked(&sk->sk_drops);
67668 }
67669 goto drop;
67670 }
67671 @@ -1335,6 +1356,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
67672 goto csum_error;
67673
67674 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
67675 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67676 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
67677 +#endif
67678 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
67679
67680 /*
67681 @@ -1755,8 +1779,13 @@ static void udp4_format_sock(struct sock
67682 sk_wmem_alloc_get(sp),
67683 sk_rmem_alloc_get(sp),
67684 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
67685 - atomic_read(&sp->sk_refcnt), sp,
67686 - atomic_read(&sp->sk_drops), len);
67687 + atomic_read(&sp->sk_refcnt),
67688 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67689 + NULL,
67690 +#else
67691 + sp,
67692 +#endif
67693 + atomic_read_unchecked(&sp->sk_drops), len);
67694 }
67695
67696 int udp4_seq_show(struct seq_file *seq, void *v)
67697 diff -urNp linux-2.6.32.41/net/ipv6/inet6_connection_sock.c linux-2.6.32.41/net/ipv6/inet6_connection_sock.c
67698 --- linux-2.6.32.41/net/ipv6/inet6_connection_sock.c 2011-03-27 14:31:47.000000000 -0400
67699 +++ linux-2.6.32.41/net/ipv6/inet6_connection_sock.c 2011-05-04 17:56:28.000000000 -0400
67700 @@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *
67701 #ifdef CONFIG_XFRM
67702 {
67703 struct rt6_info *rt = (struct rt6_info *)dst;
67704 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
67705 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
67706 }
67707 #endif
67708 }
67709 @@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(
67710 #ifdef CONFIG_XFRM
67711 if (dst) {
67712 struct rt6_info *rt = (struct rt6_info *)dst;
67713 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
67714 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
67715 sk->sk_dst_cache = NULL;
67716 dst_release(dst);
67717 dst = NULL;
67718 diff -urNp linux-2.6.32.41/net/ipv6/inet6_hashtables.c linux-2.6.32.41/net/ipv6/inet6_hashtables.c
67719 --- linux-2.6.32.41/net/ipv6/inet6_hashtables.c 2011-03-27 14:31:47.000000000 -0400
67720 +++ linux-2.6.32.41/net/ipv6/inet6_hashtables.c 2011-05-04 17:56:28.000000000 -0400
67721 @@ -118,7 +118,7 @@ out:
67722 }
67723 EXPORT_SYMBOL(__inet6_lookup_established);
67724
67725 -static int inline compute_score(struct sock *sk, struct net *net,
67726 +static inline int compute_score(struct sock *sk, struct net *net,
67727 const unsigned short hnum,
67728 const struct in6_addr *daddr,
67729 const int dif)
67730 diff -urNp linux-2.6.32.41/net/ipv6/ipv6_sockglue.c linux-2.6.32.41/net/ipv6/ipv6_sockglue.c
67731 --- linux-2.6.32.41/net/ipv6/ipv6_sockglue.c 2011-03-27 14:31:47.000000000 -0400
67732 +++ linux-2.6.32.41/net/ipv6/ipv6_sockglue.c 2011-05-16 21:46:57.000000000 -0400
67733 @@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct soc
67734 int val, valbool;
67735 int retv = -ENOPROTOOPT;
67736
67737 + pax_track_stack();
67738 +
67739 if (optval == NULL)
67740 val=0;
67741 else {
67742 @@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct soc
67743 int len;
67744 int val;
67745
67746 + pax_track_stack();
67747 +
67748 if (ip6_mroute_opt(optname))
67749 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
67750
67751 diff -urNp linux-2.6.32.41/net/ipv6/netfilter/ip6_tables.c linux-2.6.32.41/net/ipv6/netfilter/ip6_tables.c
67752 --- linux-2.6.32.41/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:00:52.000000000 -0400
67753 +++ linux-2.6.32.41/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:04:18.000000000 -0400
67754 @@ -1173,6 +1173,7 @@ static int get_info(struct net *net, voi
67755 private = &tmp;
67756 }
67757 #endif
67758 + memset(&info, 0, sizeof(info));
67759 info.valid_hooks = t->valid_hooks;
67760 memcpy(info.hook_entry, private->hook_entry,
67761 sizeof(info.hook_entry));
67762 diff -urNp linux-2.6.32.41/net/ipv6/raw.c linux-2.6.32.41/net/ipv6/raw.c
67763 --- linux-2.6.32.41/net/ipv6/raw.c 2011-03-27 14:31:47.000000000 -0400
67764 +++ linux-2.6.32.41/net/ipv6/raw.c 2011-05-16 21:46:57.000000000 -0400
67765 @@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct s
67766 {
67767 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
67768 skb_checksum_complete(skb)) {
67769 - atomic_inc(&sk->sk_drops);
67770 + atomic_inc_unchecked(&sk->sk_drops);
67771 kfree_skb(skb);
67772 return NET_RX_DROP;
67773 }
67774
67775 /* Charge it to the socket. */
67776 if (sock_queue_rcv_skb(sk,skb)<0) {
67777 - atomic_inc(&sk->sk_drops);
67778 + atomic_inc_unchecked(&sk->sk_drops);
67779 kfree_skb(skb);
67780 return NET_RX_DROP;
67781 }
67782 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
67783 struct raw6_sock *rp = raw6_sk(sk);
67784
67785 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
67786 - atomic_inc(&sk->sk_drops);
67787 + atomic_inc_unchecked(&sk->sk_drops);
67788 kfree_skb(skb);
67789 return NET_RX_DROP;
67790 }
67791 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
67792
67793 if (inet->hdrincl) {
67794 if (skb_checksum_complete(skb)) {
67795 - atomic_inc(&sk->sk_drops);
67796 + atomic_inc_unchecked(&sk->sk_drops);
67797 kfree_skb(skb);
67798 return NET_RX_DROP;
67799 }
67800 @@ -518,7 +518,7 @@ csum_copy_err:
67801 as some normal condition.
67802 */
67803 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
67804 - atomic_inc(&sk->sk_drops);
67805 + atomic_inc_unchecked(&sk->sk_drops);
67806 goto out;
67807 }
67808
67809 @@ -600,7 +600,7 @@ out:
67810 return err;
67811 }
67812
67813 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
67814 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
67815 struct flowi *fl, struct rt6_info *rt,
67816 unsigned int flags)
67817 {
67818 @@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *i
67819 u16 proto;
67820 int err;
67821
67822 + pax_track_stack();
67823 +
67824 /* Rough check on arithmetic overflow,
67825 better check is made in ip6_append_data().
67826 */
67827 @@ -916,12 +918,17 @@ do_confirm:
67828 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
67829 char __user *optval, int optlen)
67830 {
67831 + struct icmp6_filter filter;
67832 +
67833 switch (optname) {
67834 case ICMPV6_FILTER:
67835 + if (optlen < 0)
67836 + return -EINVAL;
67837 if (optlen > sizeof(struct icmp6_filter))
67838 optlen = sizeof(struct icmp6_filter);
67839 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
67840 + if (copy_from_user(&filter, optval, optlen))
67841 return -EFAULT;
67842 + memcpy(&raw6_sk(sk)->filter, &filter, optlen);
67843 return 0;
67844 default:
67845 return -ENOPROTOOPT;
67846 @@ -933,6 +940,7 @@ static int rawv6_seticmpfilter(struct so
67847 static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
67848 char __user *optval, int __user *optlen)
67849 {
67850 + struct icmp6_filter filter;
67851 int len;
67852
67853 switch (optname) {
67854 @@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct so
67855 len = sizeof(struct icmp6_filter);
67856 if (put_user(len, optlen))
67857 return -EFAULT;
67858 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
67859 + memcpy(&filter, &raw6_sk(sk)->filter, len);
67860 + if (copy_to_user(optval, &filter, len))
67861 return -EFAULT;
67862 return 0;
67863 default:
67864 @@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct se
67865 0, 0L, 0,
67866 sock_i_uid(sp), 0,
67867 sock_i_ino(sp),
67868 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
67869 + atomic_read(&sp->sk_refcnt),
67870 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67871 + NULL,
67872 +#else
67873 + sp,
67874 +#endif
67875 + atomic_read_unchecked(&sp->sk_drops));
67876 }
67877
67878 static int raw6_seq_show(struct seq_file *seq, void *v)
67879 diff -urNp linux-2.6.32.41/net/ipv6/tcp_ipv6.c linux-2.6.32.41/net/ipv6/tcp_ipv6.c
67880 --- linux-2.6.32.41/net/ipv6/tcp_ipv6.c 2011-03-27 14:31:47.000000000 -0400
67881 +++ linux-2.6.32.41/net/ipv6/tcp_ipv6.c 2011-04-17 15:56:46.000000000 -0400
67882 @@ -88,6 +88,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
67883 }
67884 #endif
67885
67886 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67887 +extern int grsec_enable_blackhole;
67888 +#endif
67889 +
67890 static void tcp_v6_hash(struct sock *sk)
67891 {
67892 if (sk->sk_state != TCP_CLOSE) {
67893 @@ -1578,6 +1582,9 @@ static int tcp_v6_do_rcv(struct sock *sk
67894 return 0;
67895
67896 reset:
67897 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67898 + if (!grsec_enable_blackhole)
67899 +#endif
67900 tcp_v6_send_reset(sk, skb);
67901 discard:
67902 if (opt_skb)
67903 @@ -1655,12 +1662,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
67904 TCP_SKB_CB(skb)->sacked = 0;
67905
67906 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
67907 - if (!sk)
67908 + if (!sk) {
67909 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67910 + ret = 1;
67911 +#endif
67912 goto no_tcp_socket;
67913 + }
67914
67915 process:
67916 - if (sk->sk_state == TCP_TIME_WAIT)
67917 + if (sk->sk_state == TCP_TIME_WAIT) {
67918 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67919 + ret = 2;
67920 +#endif
67921 goto do_time_wait;
67922 + }
67923
67924 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
67925 goto discard_and_relse;
67926 @@ -1700,6 +1715,10 @@ no_tcp_socket:
67927 bad_packet:
67928 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
67929 } else {
67930 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67931 + if (!grsec_enable_blackhole || (ret == 1 &&
67932 + (skb->dev->flags & IFF_LOOPBACK)))
67933 +#endif
67934 tcp_v6_send_reset(NULL, skb);
67935 }
67936
67937 @@ -1915,7 +1934,13 @@ static void get_openreq6(struct seq_file
67938 uid,
67939 0, /* non standard timer */
67940 0, /* open_requests have no inode */
67941 - 0, req);
67942 + 0,
67943 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67944 + NULL
67945 +#else
67946 + req
67947 +#endif
67948 + );
67949 }
67950
67951 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
67952 @@ -1965,7 +1990,12 @@ static void get_tcp6_sock(struct seq_fil
67953 sock_i_uid(sp),
67954 icsk->icsk_probes_out,
67955 sock_i_ino(sp),
67956 - atomic_read(&sp->sk_refcnt), sp,
67957 + atomic_read(&sp->sk_refcnt),
67958 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67959 + NULL,
67960 +#else
67961 + sp,
67962 +#endif
67963 jiffies_to_clock_t(icsk->icsk_rto),
67964 jiffies_to_clock_t(icsk->icsk_ack.ato),
67965 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
67966 @@ -2000,7 +2030,13 @@ static void get_timewait6_sock(struct se
67967 dest->s6_addr32[2], dest->s6_addr32[3], destp,
67968 tw->tw_substate, 0, 0,
67969 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
67970 - atomic_read(&tw->tw_refcnt), tw);
67971 + atomic_read(&tw->tw_refcnt),
67972 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67973 + NULL
67974 +#else
67975 + tw
67976 +#endif
67977 + );
67978 }
67979
67980 static int tcp6_seq_show(struct seq_file *seq, void *v)
67981 diff -urNp linux-2.6.32.41/net/ipv6/udp.c linux-2.6.32.41/net/ipv6/udp.c
67982 --- linux-2.6.32.41/net/ipv6/udp.c 2011-03-27 14:31:47.000000000 -0400
67983 +++ linux-2.6.32.41/net/ipv6/udp.c 2011-05-04 17:58:16.000000000 -0400
67984 @@ -49,6 +49,10 @@
67985 #include <linux/seq_file.h>
67986 #include "udp_impl.h"
67987
67988 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67989 +extern int grsec_enable_blackhole;
67990 +#endif
67991 +
67992 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
67993 {
67994 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
67995 @@ -388,7 +392,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
67996 if (rc == -ENOMEM) {
67997 UDP6_INC_STATS_BH(sock_net(sk),
67998 UDP_MIB_RCVBUFERRORS, is_udplite);
67999 - atomic_inc(&sk->sk_drops);
68000 + atomic_inc_unchecked(&sk->sk_drops);
68001 }
68002 goto drop;
68003 }
68004 @@ -587,6 +591,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
68005 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
68006 proto == IPPROTO_UDPLITE);
68007
68008 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68009 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
68010 +#endif
68011 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
68012
68013 kfree_skb(skb);
68014 @@ -1206,8 +1213,13 @@ static void udp6_sock_seq_show(struct se
68015 0, 0L, 0,
68016 sock_i_uid(sp), 0,
68017 sock_i_ino(sp),
68018 - atomic_read(&sp->sk_refcnt), sp,
68019 - atomic_read(&sp->sk_drops));
68020 + atomic_read(&sp->sk_refcnt),
68021 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68022 + NULL,
68023 +#else
68024 + sp,
68025 +#endif
68026 + atomic_read_unchecked(&sp->sk_drops));
68027 }
68028
68029 int udp6_seq_show(struct seq_file *seq, void *v)
68030 diff -urNp linux-2.6.32.41/net/irda/ircomm/ircomm_tty.c linux-2.6.32.41/net/irda/ircomm/ircomm_tty.c
68031 --- linux-2.6.32.41/net/irda/ircomm/ircomm_tty.c 2011-03-27 14:31:47.000000000 -0400
68032 +++ linux-2.6.32.41/net/irda/ircomm/ircomm_tty.c 2011-04-17 15:56:46.000000000 -0400
68033 @@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(st
68034 add_wait_queue(&self->open_wait, &wait);
68035
68036 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
68037 - __FILE__,__LINE__, tty->driver->name, self->open_count );
68038 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
68039
68040 /* As far as I can see, we protect open_count - Jean II */
68041 spin_lock_irqsave(&self->spinlock, flags);
68042 if (!tty_hung_up_p(filp)) {
68043 extra_count = 1;
68044 - self->open_count--;
68045 + local_dec(&self->open_count);
68046 }
68047 spin_unlock_irqrestore(&self->spinlock, flags);
68048 - self->blocked_open++;
68049 + local_inc(&self->blocked_open);
68050
68051 while (1) {
68052 if (tty->termios->c_cflag & CBAUD) {
68053 @@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(st
68054 }
68055
68056 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
68057 - __FILE__,__LINE__, tty->driver->name, self->open_count );
68058 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
68059
68060 schedule();
68061 }
68062 @@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(st
68063 if (extra_count) {
68064 /* ++ is not atomic, so this should be protected - Jean II */
68065 spin_lock_irqsave(&self->spinlock, flags);
68066 - self->open_count++;
68067 + local_inc(&self->open_count);
68068 spin_unlock_irqrestore(&self->spinlock, flags);
68069 }
68070 - self->blocked_open--;
68071 + local_dec(&self->blocked_open);
68072
68073 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
68074 - __FILE__,__LINE__, tty->driver->name, self->open_count);
68075 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
68076
68077 if (!retval)
68078 self->flags |= ASYNC_NORMAL_ACTIVE;
68079 @@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_st
68080 }
68081 /* ++ is not atomic, so this should be protected - Jean II */
68082 spin_lock_irqsave(&self->spinlock, flags);
68083 - self->open_count++;
68084 + local_inc(&self->open_count);
68085
68086 tty->driver_data = self;
68087 self->tty = tty;
68088 spin_unlock_irqrestore(&self->spinlock, flags);
68089
68090 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
68091 - self->line, self->open_count);
68092 + self->line, local_read(&self->open_count));
68093
68094 /* Not really used by us, but lets do it anyway */
68095 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
68096 @@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_
68097 return;
68098 }
68099
68100 - if ((tty->count == 1) && (self->open_count != 1)) {
68101 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
68102 /*
68103 * Uh, oh. tty->count is 1, which means that the tty
68104 * structure will be freed. state->count should always
68105 @@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_
68106 */
68107 IRDA_DEBUG(0, "%s(), bad serial port count; "
68108 "tty->count is 1, state->count is %d\n", __func__ ,
68109 - self->open_count);
68110 - self->open_count = 1;
68111 + local_read(&self->open_count));
68112 + local_set(&self->open_count, 1);
68113 }
68114
68115 - if (--self->open_count < 0) {
68116 + if (local_dec_return(&self->open_count) < 0) {
68117 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
68118 - __func__, self->line, self->open_count);
68119 - self->open_count = 0;
68120 + __func__, self->line, local_read(&self->open_count));
68121 + local_set(&self->open_count, 0);
68122 }
68123 - if (self->open_count) {
68124 + if (local_read(&self->open_count)) {
68125 spin_unlock_irqrestore(&self->spinlock, flags);
68126
68127 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
68128 @@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_
68129 tty->closing = 0;
68130 self->tty = NULL;
68131
68132 - if (self->blocked_open) {
68133 + if (local_read(&self->blocked_open)) {
68134 if (self->close_delay)
68135 schedule_timeout_interruptible(self->close_delay);
68136 wake_up_interruptible(&self->open_wait);
68137 @@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty
68138 spin_lock_irqsave(&self->spinlock, flags);
68139 self->flags &= ~ASYNC_NORMAL_ACTIVE;
68140 self->tty = NULL;
68141 - self->open_count = 0;
68142 + local_set(&self->open_count, 0);
68143 spin_unlock_irqrestore(&self->spinlock, flags);
68144
68145 wake_up_interruptible(&self->open_wait);
68146 @@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct
68147 seq_putc(m, '\n');
68148
68149 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
68150 - seq_printf(m, "Open count: %d\n", self->open_count);
68151 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
68152 seq_printf(m, "Max data size: %d\n", self->max_data_size);
68153 seq_printf(m, "Max header size: %d\n", self->max_header_size);
68154
68155 diff -urNp linux-2.6.32.41/net/iucv/af_iucv.c linux-2.6.32.41/net/iucv/af_iucv.c
68156 --- linux-2.6.32.41/net/iucv/af_iucv.c 2011-03-27 14:31:47.000000000 -0400
68157 +++ linux-2.6.32.41/net/iucv/af_iucv.c 2011-05-04 17:56:28.000000000 -0400
68158 @@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct soc
68159
68160 write_lock_bh(&iucv_sk_list.lock);
68161
68162 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
68163 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
68164 while (__iucv_get_sock_by_name(name)) {
68165 sprintf(name, "%08x",
68166 - atomic_inc_return(&iucv_sk_list.autobind_name));
68167 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
68168 }
68169
68170 write_unlock_bh(&iucv_sk_list.lock);
68171 diff -urNp linux-2.6.32.41/net/key/af_key.c linux-2.6.32.41/net/key/af_key.c
68172 --- linux-2.6.32.41/net/key/af_key.c 2011-03-27 14:31:47.000000000 -0400
68173 +++ linux-2.6.32.41/net/key/af_key.c 2011-05-16 21:46:57.000000000 -0400
68174 @@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk
68175 struct xfrm_migrate m[XFRM_MAX_DEPTH];
68176 struct xfrm_kmaddress k;
68177
68178 + pax_track_stack();
68179 +
68180 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
68181 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
68182 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
68183 @@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_fil
68184 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
68185 else
68186 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
68187 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68188 + NULL,
68189 +#else
68190 s,
68191 +#endif
68192 atomic_read(&s->sk_refcnt),
68193 sk_rmem_alloc_get(s),
68194 sk_wmem_alloc_get(s),
68195 diff -urNp linux-2.6.32.41/net/mac80211/cfg.c linux-2.6.32.41/net/mac80211/cfg.c
68196 --- linux-2.6.32.41/net/mac80211/cfg.c 2011-03-27 14:31:47.000000000 -0400
68197 +++ linux-2.6.32.41/net/mac80211/cfg.c 2011-04-17 15:56:46.000000000 -0400
68198 @@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(st
68199 return err;
68200 }
68201
68202 -struct cfg80211_ops mac80211_config_ops = {
68203 +const struct cfg80211_ops mac80211_config_ops = {
68204 .add_virtual_intf = ieee80211_add_iface,
68205 .del_virtual_intf = ieee80211_del_iface,
68206 .change_virtual_intf = ieee80211_change_iface,
68207 diff -urNp linux-2.6.32.41/net/mac80211/cfg.h linux-2.6.32.41/net/mac80211/cfg.h
68208 --- linux-2.6.32.41/net/mac80211/cfg.h 2011-03-27 14:31:47.000000000 -0400
68209 +++ linux-2.6.32.41/net/mac80211/cfg.h 2011-04-17 15:56:46.000000000 -0400
68210 @@ -4,6 +4,6 @@
68211 #ifndef __CFG_H
68212 #define __CFG_H
68213
68214 -extern struct cfg80211_ops mac80211_config_ops;
68215 +extern const struct cfg80211_ops mac80211_config_ops;
68216
68217 #endif /* __CFG_H */
68218 diff -urNp linux-2.6.32.41/net/mac80211/debugfs_key.c linux-2.6.32.41/net/mac80211/debugfs_key.c
68219 --- linux-2.6.32.41/net/mac80211/debugfs_key.c 2011-03-27 14:31:47.000000000 -0400
68220 +++ linux-2.6.32.41/net/mac80211/debugfs_key.c 2011-04-17 15:56:46.000000000 -0400
68221 @@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file
68222 size_t count, loff_t *ppos)
68223 {
68224 struct ieee80211_key *key = file->private_data;
68225 - int i, res, bufsize = 2 * key->conf.keylen + 2;
68226 + int i, bufsize = 2 * key->conf.keylen + 2;
68227 char *buf = kmalloc(bufsize, GFP_KERNEL);
68228 char *p = buf;
68229 + ssize_t res;
68230 +
68231 + if (buf == NULL)
68232 + return -ENOMEM;
68233
68234 for (i = 0; i < key->conf.keylen; i++)
68235 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
68236 diff -urNp linux-2.6.32.41/net/mac80211/debugfs_sta.c linux-2.6.32.41/net/mac80211/debugfs_sta.c
68237 --- linux-2.6.32.41/net/mac80211/debugfs_sta.c 2011-03-27 14:31:47.000000000 -0400
68238 +++ linux-2.6.32.41/net/mac80211/debugfs_sta.c 2011-05-16 21:46:57.000000000 -0400
68239 @@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struc
68240 int i;
68241 struct sta_info *sta = file->private_data;
68242
68243 + pax_track_stack();
68244 +
68245 spin_lock_bh(&sta->lock);
68246 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
68247 sta->ampdu_mlme.dialog_token_allocator + 1);
68248 diff -urNp linux-2.6.32.41/net/mac80211/ieee80211_i.h linux-2.6.32.41/net/mac80211/ieee80211_i.h
68249 --- linux-2.6.32.41/net/mac80211/ieee80211_i.h 2011-03-27 14:31:47.000000000 -0400
68250 +++ linux-2.6.32.41/net/mac80211/ieee80211_i.h 2011-04-17 15:56:46.000000000 -0400
68251 @@ -25,6 +25,7 @@
68252 #include <linux/etherdevice.h>
68253 #include <net/cfg80211.h>
68254 #include <net/mac80211.h>
68255 +#include <asm/local.h>
68256 #include "key.h"
68257 #include "sta_info.h"
68258
68259 @@ -635,7 +636,7 @@ struct ieee80211_local {
68260 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
68261 spinlock_t queue_stop_reason_lock;
68262
68263 - int open_count;
68264 + local_t open_count;
68265 int monitors, cooked_mntrs;
68266 /* number of interfaces with corresponding FIF_ flags */
68267 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
68268 diff -urNp linux-2.6.32.41/net/mac80211/iface.c linux-2.6.32.41/net/mac80211/iface.c
68269 --- linux-2.6.32.41/net/mac80211/iface.c 2011-03-27 14:31:47.000000000 -0400
68270 +++ linux-2.6.32.41/net/mac80211/iface.c 2011-04-17 15:56:46.000000000 -0400
68271 @@ -166,7 +166,7 @@ static int ieee80211_open(struct net_dev
68272 break;
68273 }
68274
68275 - if (local->open_count == 0) {
68276 + if (local_read(&local->open_count) == 0) {
68277 res = drv_start(local);
68278 if (res)
68279 goto err_del_bss;
68280 @@ -196,7 +196,7 @@ static int ieee80211_open(struct net_dev
68281 * Validate the MAC address for this device.
68282 */
68283 if (!is_valid_ether_addr(dev->dev_addr)) {
68284 - if (!local->open_count)
68285 + if (!local_read(&local->open_count))
68286 drv_stop(local);
68287 return -EADDRNOTAVAIL;
68288 }
68289 @@ -292,7 +292,7 @@ static int ieee80211_open(struct net_dev
68290
68291 hw_reconf_flags |= __ieee80211_recalc_idle(local);
68292
68293 - local->open_count++;
68294 + local_inc(&local->open_count);
68295 if (hw_reconf_flags) {
68296 ieee80211_hw_config(local, hw_reconf_flags);
68297 /*
68298 @@ -320,7 +320,7 @@ static int ieee80211_open(struct net_dev
68299 err_del_interface:
68300 drv_remove_interface(local, &conf);
68301 err_stop:
68302 - if (!local->open_count)
68303 + if (!local_read(&local->open_count))
68304 drv_stop(local);
68305 err_del_bss:
68306 sdata->bss = NULL;
68307 @@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_dev
68308 WARN_ON(!list_empty(&sdata->u.ap.vlans));
68309 }
68310
68311 - local->open_count--;
68312 + local_dec(&local->open_count);
68313
68314 switch (sdata->vif.type) {
68315 case NL80211_IFTYPE_AP_VLAN:
68316 @@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_dev
68317
68318 ieee80211_recalc_ps(local, -1);
68319
68320 - if (local->open_count == 0) {
68321 + if (local_read(&local->open_count) == 0) {
68322 ieee80211_clear_tx_pending(local);
68323 ieee80211_stop_device(local);
68324
68325 diff -urNp linux-2.6.32.41/net/mac80211/main.c linux-2.6.32.41/net/mac80211/main.c
68326 --- linux-2.6.32.41/net/mac80211/main.c 2011-05-10 22:12:02.000000000 -0400
68327 +++ linux-2.6.32.41/net/mac80211/main.c 2011-05-10 22:12:34.000000000 -0400
68328 @@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211
68329 local->hw.conf.power_level = power;
68330 }
68331
68332 - if (changed && local->open_count) {
68333 + if (changed && local_read(&local->open_count)) {
68334 ret = drv_config(local, changed);
68335 /*
68336 * Goal:
68337 diff -urNp linux-2.6.32.41/net/mac80211/mlme.c linux-2.6.32.41/net/mac80211/mlme.c
68338 --- linux-2.6.32.41/net/mac80211/mlme.c 2011-03-27 14:31:47.000000000 -0400
68339 +++ linux-2.6.32.41/net/mac80211/mlme.c 2011-05-16 21:46:57.000000000 -0400
68340 @@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee
68341 bool have_higher_than_11mbit = false, newsta = false;
68342 u16 ap_ht_cap_flags;
68343
68344 + pax_track_stack();
68345 +
68346 /*
68347 * AssocResp and ReassocResp have identical structure, so process both
68348 * of them in this function.
68349 diff -urNp linux-2.6.32.41/net/mac80211/pm.c linux-2.6.32.41/net/mac80211/pm.c
68350 --- linux-2.6.32.41/net/mac80211/pm.c 2011-03-27 14:31:47.000000000 -0400
68351 +++ linux-2.6.32.41/net/mac80211/pm.c 2011-04-17 15:56:46.000000000 -0400
68352 @@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211
68353 }
68354
68355 /* stop hardware - this must stop RX */
68356 - if (local->open_count)
68357 + if (local_read(&local->open_count))
68358 ieee80211_stop_device(local);
68359
68360 local->suspended = true;
68361 diff -urNp linux-2.6.32.41/net/mac80211/rate.c linux-2.6.32.41/net/mac80211/rate.c
68362 --- linux-2.6.32.41/net/mac80211/rate.c 2011-03-27 14:31:47.000000000 -0400
68363 +++ linux-2.6.32.41/net/mac80211/rate.c 2011-04-17 15:56:46.000000000 -0400
68364 @@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct
68365 struct rate_control_ref *ref, *old;
68366
68367 ASSERT_RTNL();
68368 - if (local->open_count)
68369 + if (local_read(&local->open_count))
68370 return -EBUSY;
68371
68372 ref = rate_control_alloc(name, local);
68373 diff -urNp linux-2.6.32.41/net/mac80211/tx.c linux-2.6.32.41/net/mac80211/tx.c
68374 --- linux-2.6.32.41/net/mac80211/tx.c 2011-03-27 14:31:47.000000000 -0400
68375 +++ linux-2.6.32.41/net/mac80211/tx.c 2011-04-17 15:56:46.000000000 -0400
68376 @@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct
68377 return cpu_to_le16(dur);
68378 }
68379
68380 -static int inline is_ieee80211_device(struct ieee80211_local *local,
68381 +static inline int is_ieee80211_device(struct ieee80211_local *local,
68382 struct net_device *dev)
68383 {
68384 return local == wdev_priv(dev->ieee80211_ptr);
68385 diff -urNp linux-2.6.32.41/net/mac80211/util.c linux-2.6.32.41/net/mac80211/util.c
68386 --- linux-2.6.32.41/net/mac80211/util.c 2011-03-27 14:31:47.000000000 -0400
68387 +++ linux-2.6.32.41/net/mac80211/util.c 2011-04-17 15:56:46.000000000 -0400
68388 @@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_
68389 local->resuming = true;
68390
68391 /* restart hardware */
68392 - if (local->open_count) {
68393 + if (local_read(&local->open_count)) {
68394 /*
68395 * Upon resume hardware can sometimes be goofy due to
68396 * various platform / driver / bus issues, so restarting
68397 diff -urNp linux-2.6.32.41/net/netfilter/ipvs/ip_vs_app.c linux-2.6.32.41/net/netfilter/ipvs/ip_vs_app.c
68398 --- linux-2.6.32.41/net/netfilter/ipvs/ip_vs_app.c 2011-03-27 14:31:47.000000000 -0400
68399 +++ linux-2.6.32.41/net/netfilter/ipvs/ip_vs_app.c 2011-05-17 19:26:34.000000000 -0400
68400 @@ -564,7 +564,7 @@ static const struct file_operations ip_v
68401 .open = ip_vs_app_open,
68402 .read = seq_read,
68403 .llseek = seq_lseek,
68404 - .release = seq_release,
68405 + .release = seq_release_net,
68406 };
68407 #endif
68408
68409 diff -urNp linux-2.6.32.41/net/netfilter/ipvs/ip_vs_conn.c linux-2.6.32.41/net/netfilter/ipvs/ip_vs_conn.c
68410 --- linux-2.6.32.41/net/netfilter/ipvs/ip_vs_conn.c 2011-03-27 14:31:47.000000000 -0400
68411 +++ linux-2.6.32.41/net/netfilter/ipvs/ip_vs_conn.c 2011-05-17 19:26:34.000000000 -0400
68412 @@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
68413 /* if the connection is not template and is created
68414 * by sync, preserve the activity flag.
68415 */
68416 - cp->flags |= atomic_read(&dest->conn_flags) &
68417 + cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
68418 (~IP_VS_CONN_F_INACTIVE);
68419 else
68420 - cp->flags |= atomic_read(&dest->conn_flags);
68421 + cp->flags |= atomic_read_unchecked(&dest->conn_flags);
68422 cp->dest = dest;
68423
68424 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
68425 @@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const
68426 atomic_set(&cp->refcnt, 1);
68427
68428 atomic_set(&cp->n_control, 0);
68429 - atomic_set(&cp->in_pkts, 0);
68430 + atomic_set_unchecked(&cp->in_pkts, 0);
68431
68432 atomic_inc(&ip_vs_conn_count);
68433 if (flags & IP_VS_CONN_F_NO_CPORT)
68434 @@ -871,7 +871,7 @@ static const struct file_operations ip_v
68435 .open = ip_vs_conn_open,
68436 .read = seq_read,
68437 .llseek = seq_lseek,
68438 - .release = seq_release,
68439 + .release = seq_release_net,
68440 };
68441
68442 static const char *ip_vs_origin_name(unsigned flags)
68443 @@ -934,7 +934,7 @@ static const struct file_operations ip_v
68444 .open = ip_vs_conn_sync_open,
68445 .read = seq_read,
68446 .llseek = seq_lseek,
68447 - .release = seq_release,
68448 + .release = seq_release_net,
68449 };
68450
68451 #endif
68452 @@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip
68453
68454 /* Don't drop the entry if its number of incoming packets is not
68455 located in [0, 8] */
68456 - i = atomic_read(&cp->in_pkts);
68457 + i = atomic_read_unchecked(&cp->in_pkts);
68458 if (i > 8 || i < 0) return 0;
68459
68460 if (!todrop_rate[i]) return 0;
68461 diff -urNp linux-2.6.32.41/net/netfilter/ipvs/ip_vs_core.c linux-2.6.32.41/net/netfilter/ipvs/ip_vs_core.c
68462 --- linux-2.6.32.41/net/netfilter/ipvs/ip_vs_core.c 2011-03-27 14:31:47.000000000 -0400
68463 +++ linux-2.6.32.41/net/netfilter/ipvs/ip_vs_core.c 2011-05-04 17:56:28.000000000 -0400
68464 @@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *sv
68465 ret = cp->packet_xmit(skb, cp, pp);
68466 /* do not touch skb anymore */
68467
68468 - atomic_inc(&cp->in_pkts);
68469 + atomic_inc_unchecked(&cp->in_pkts);
68470 ip_vs_conn_put(cp);
68471 return ret;
68472 }
68473 @@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk
68474 * Sync connection if it is about to close to
68475 * encorage the standby servers to update the connections timeout
68476 */
68477 - pkts = atomic_add_return(1, &cp->in_pkts);
68478 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
68479 if (af == AF_INET &&
68480 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
68481 (((cp->protocol != IPPROTO_TCP ||
68482 diff -urNp linux-2.6.32.41/net/netfilter/ipvs/ip_vs_ctl.c linux-2.6.32.41/net/netfilter/ipvs/ip_vs_ctl.c
68483 --- linux-2.6.32.41/net/netfilter/ipvs/ip_vs_ctl.c 2011-03-27 14:31:47.000000000 -0400
68484 +++ linux-2.6.32.41/net/netfilter/ipvs/ip_vs_ctl.c 2011-05-17 19:26:34.000000000 -0400
68485 @@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service
68486 ip_vs_rs_hash(dest);
68487 write_unlock_bh(&__ip_vs_rs_lock);
68488 }
68489 - atomic_set(&dest->conn_flags, conn_flags);
68490 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
68491
68492 /* bind the service */
68493 if (!dest->svc) {
68494 @@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct se
68495 " %-7s %-6d %-10d %-10d\n",
68496 &dest->addr.in6,
68497 ntohs(dest->port),
68498 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
68499 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
68500 atomic_read(&dest->weight),
68501 atomic_read(&dest->activeconns),
68502 atomic_read(&dest->inactconns));
68503 @@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct se
68504 "%-7s %-6d %-10d %-10d\n",
68505 ntohl(dest->addr.ip),
68506 ntohs(dest->port),
68507 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
68508 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
68509 atomic_read(&dest->weight),
68510 atomic_read(&dest->activeconns),
68511 atomic_read(&dest->inactconns));
68512 @@ -1927,7 +1927,7 @@ static const struct file_operations ip_v
68513 .open = ip_vs_info_open,
68514 .read = seq_read,
68515 .llseek = seq_lseek,
68516 - .release = seq_release_private,
68517 + .release = seq_release_net,
68518 };
68519
68520 #endif
68521 @@ -1976,7 +1976,7 @@ static const struct file_operations ip_v
68522 .open = ip_vs_stats_seq_open,
68523 .read = seq_read,
68524 .llseek = seq_lseek,
68525 - .release = single_release,
68526 + .release = single_release_net,
68527 };
68528
68529 #endif
68530 @@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip
68531
68532 entry.addr = dest->addr.ip;
68533 entry.port = dest->port;
68534 - entry.conn_flags = atomic_read(&dest->conn_flags);
68535 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
68536 entry.weight = atomic_read(&dest->weight);
68537 entry.u_threshold = dest->u_threshold;
68538 entry.l_threshold = dest->l_threshold;
68539 @@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cm
68540 unsigned char arg[128];
68541 int ret = 0;
68542
68543 + pax_track_stack();
68544 +
68545 if (!capable(CAP_NET_ADMIN))
68546 return -EPERM;
68547
68548 @@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct s
68549 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
68550
68551 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
68552 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
68553 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
68554 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
68555 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
68556 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
68557 diff -urNp linux-2.6.32.41/net/netfilter/ipvs/ip_vs_sync.c linux-2.6.32.41/net/netfilter/ipvs/ip_vs_sync.c
68558 --- linux-2.6.32.41/net/netfilter/ipvs/ip_vs_sync.c 2011-03-27 14:31:47.000000000 -0400
68559 +++ linux-2.6.32.41/net/netfilter/ipvs/ip_vs_sync.c 2011-05-04 17:56:28.000000000 -0400
68560 @@ -438,7 +438,7 @@ static void ip_vs_process_message(const
68561
68562 if (opt)
68563 memcpy(&cp->in_seq, opt, sizeof(*opt));
68564 - atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
68565 + atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
68566 cp->state = state;
68567 cp->old_state = cp->state;
68568 /*
68569 diff -urNp linux-2.6.32.41/net/netfilter/ipvs/ip_vs_xmit.c linux-2.6.32.41/net/netfilter/ipvs/ip_vs_xmit.c
68570 --- linux-2.6.32.41/net/netfilter/ipvs/ip_vs_xmit.c 2011-03-27 14:31:47.000000000 -0400
68571 +++ linux-2.6.32.41/net/netfilter/ipvs/ip_vs_xmit.c 2011-05-04 17:56:28.000000000 -0400
68572 @@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
68573 else
68574 rc = NF_ACCEPT;
68575 /* do not touch skb anymore */
68576 - atomic_inc(&cp->in_pkts);
68577 + atomic_inc_unchecked(&cp->in_pkts);
68578 goto out;
68579 }
68580
68581 @@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
68582 else
68583 rc = NF_ACCEPT;
68584 /* do not touch skb anymore */
68585 - atomic_inc(&cp->in_pkts);
68586 + atomic_inc_unchecked(&cp->in_pkts);
68587 goto out;
68588 }
68589
68590 diff -urNp linux-2.6.32.41/net/netfilter/Kconfig linux-2.6.32.41/net/netfilter/Kconfig
68591 --- linux-2.6.32.41/net/netfilter/Kconfig 2011-03-27 14:31:47.000000000 -0400
68592 +++ linux-2.6.32.41/net/netfilter/Kconfig 2011-04-17 15:56:46.000000000 -0400
68593 @@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
68594
68595 To compile it as a module, choose M here. If unsure, say N.
68596
68597 +config NETFILTER_XT_MATCH_GRADM
68598 + tristate '"gradm" match support'
68599 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
68600 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
68601 + ---help---
68602 + The gradm match allows to match on grsecurity RBAC being enabled.
68603 + It is useful when iptables rules are applied early on bootup to
68604 + prevent connections to the machine (except from a trusted host)
68605 + while the RBAC system is disabled.
68606 +
68607 config NETFILTER_XT_MATCH_HASHLIMIT
68608 tristate '"hashlimit" match support'
68609 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
68610 diff -urNp linux-2.6.32.41/net/netfilter/Makefile linux-2.6.32.41/net/netfilter/Makefile
68611 --- linux-2.6.32.41/net/netfilter/Makefile 2011-03-27 14:31:47.000000000 -0400
68612 +++ linux-2.6.32.41/net/netfilter/Makefile 2011-04-17 15:56:46.000000000 -0400
68613 @@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRAC
68614 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
68615 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
68616 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
68617 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
68618 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
68619 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
68620 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
68621 diff -urNp linux-2.6.32.41/net/netfilter/nf_conntrack_netlink.c linux-2.6.32.41/net/netfilter/nf_conntrack_netlink.c
68622 --- linux-2.6.32.41/net/netfilter/nf_conntrack_netlink.c 2011-03-27 14:31:47.000000000 -0400
68623 +++ linux-2.6.32.41/net/netfilter/nf_conntrack_netlink.c 2011-04-17 15:56:46.000000000 -0400
68624 @@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlatt
68625 static int
68626 ctnetlink_parse_tuple(const struct nlattr * const cda[],
68627 struct nf_conntrack_tuple *tuple,
68628 - enum ctattr_tuple type, u_int8_t l3num)
68629 + enum ctattr_type type, u_int8_t l3num)
68630 {
68631 struct nlattr *tb[CTA_TUPLE_MAX+1];
68632 int err;
68633 diff -urNp linux-2.6.32.41/net/netfilter/nfnetlink_log.c linux-2.6.32.41/net/netfilter/nfnetlink_log.c
68634 --- linux-2.6.32.41/net/netfilter/nfnetlink_log.c 2011-03-27 14:31:47.000000000 -0400
68635 +++ linux-2.6.32.41/net/netfilter/nfnetlink_log.c 2011-05-04 17:56:28.000000000 -0400
68636 @@ -68,7 +68,7 @@ struct nfulnl_instance {
68637 };
68638
68639 static DEFINE_RWLOCK(instances_lock);
68640 -static atomic_t global_seq;
68641 +static atomic_unchecked_t global_seq;
68642
68643 #define INSTANCE_BUCKETS 16
68644 static struct hlist_head instance_table[INSTANCE_BUCKETS];
68645 @@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_ins
68646 /* global sequence number */
68647 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
68648 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
68649 - htonl(atomic_inc_return(&global_seq)));
68650 + htonl(atomic_inc_return_unchecked(&global_seq)));
68651
68652 if (data_len) {
68653 struct nlattr *nla;
68654 diff -urNp linux-2.6.32.41/net/netfilter/xt_gradm.c linux-2.6.32.41/net/netfilter/xt_gradm.c
68655 --- linux-2.6.32.41/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
68656 +++ linux-2.6.32.41/net/netfilter/xt_gradm.c 2011-04-17 15:56:46.000000000 -0400
68657 @@ -0,0 +1,51 @@
68658 +/*
68659 + * gradm match for netfilter
68660 + * Copyright © Zbigniew Krzystolik, 2010
68661 + *
68662 + * This program is free software; you can redistribute it and/or modify
68663 + * it under the terms of the GNU General Public License; either version
68664 + * 2 or 3 as published by the Free Software Foundation.
68665 + */
68666 +#include <linux/module.h>
68667 +#include <linux/moduleparam.h>
68668 +#include <linux/skbuff.h>
68669 +#include <linux/netfilter/x_tables.h>
68670 +#include <linux/grsecurity.h>
68671 +#include <linux/netfilter/xt_gradm.h>
68672 +
68673 +static bool
68674 +gradm_mt(const struct sk_buff *skb, const struct xt_match_param *par)
68675 +{
68676 + const struct xt_gradm_mtinfo *info = par->matchinfo;
68677 + bool retval = false;
68678 + if (gr_acl_is_enabled())
68679 + retval = true;
68680 + return retval ^ info->invflags;
68681 +}
68682 +
68683 +static struct xt_match gradm_mt_reg __read_mostly = {
68684 + .name = "gradm",
68685 + .revision = 0,
68686 + .family = NFPROTO_UNSPEC,
68687 + .match = gradm_mt,
68688 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
68689 + .me = THIS_MODULE,
68690 +};
68691 +
68692 +static int __init gradm_mt_init(void)
68693 +{
68694 + return xt_register_match(&gradm_mt_reg);
68695 +}
68696 +
68697 +static void __exit gradm_mt_exit(void)
68698 +{
68699 + xt_unregister_match(&gradm_mt_reg);
68700 +}
68701 +
68702 +module_init(gradm_mt_init);
68703 +module_exit(gradm_mt_exit);
68704 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
68705 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
68706 +MODULE_LICENSE("GPL");
68707 +MODULE_ALIAS("ipt_gradm");
68708 +MODULE_ALIAS("ip6t_gradm");
68709 diff -urNp linux-2.6.32.41/net/netlink/af_netlink.c linux-2.6.32.41/net/netlink/af_netlink.c
68710 --- linux-2.6.32.41/net/netlink/af_netlink.c 2011-03-27 14:31:47.000000000 -0400
68711 +++ linux-2.6.32.41/net/netlink/af_netlink.c 2011-05-04 17:56:28.000000000 -0400
68712 @@ -733,7 +733,7 @@ static void netlink_overrun(struct sock
68713 sk->sk_error_report(sk);
68714 }
68715 }
68716 - atomic_inc(&sk->sk_drops);
68717 + atomic_inc_unchecked(&sk->sk_drops);
68718 }
68719
68720 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
68721 @@ -1964,15 +1964,23 @@ static int netlink_seq_show(struct seq_f
68722 struct netlink_sock *nlk = nlk_sk(s);
68723
68724 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d\n",
68725 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68726 + NULL,
68727 +#else
68728 s,
68729 +#endif
68730 s->sk_protocol,
68731 nlk->pid,
68732 nlk->groups ? (u32)nlk->groups[0] : 0,
68733 sk_rmem_alloc_get(s),
68734 sk_wmem_alloc_get(s),
68735 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68736 + NULL,
68737 +#else
68738 nlk->cb,
68739 +#endif
68740 atomic_read(&s->sk_refcnt),
68741 - atomic_read(&s->sk_drops)
68742 + atomic_read_unchecked(&s->sk_drops)
68743 );
68744
68745 }
68746 diff -urNp linux-2.6.32.41/net/netrom/af_netrom.c linux-2.6.32.41/net/netrom/af_netrom.c
68747 --- linux-2.6.32.41/net/netrom/af_netrom.c 2011-03-27 14:31:47.000000000 -0400
68748 +++ linux-2.6.32.41/net/netrom/af_netrom.c 2011-04-17 15:56:46.000000000 -0400
68749 @@ -838,6 +838,7 @@ static int nr_getname(struct socket *soc
68750 struct sock *sk = sock->sk;
68751 struct nr_sock *nr = nr_sk(sk);
68752
68753 + memset(sax, 0, sizeof(*sax));
68754 lock_sock(sk);
68755 if (peer != 0) {
68756 if (sk->sk_state != TCP_ESTABLISHED) {
68757 @@ -852,7 +853,6 @@ static int nr_getname(struct socket *soc
68758 *uaddr_len = sizeof(struct full_sockaddr_ax25);
68759 } else {
68760 sax->fsa_ax25.sax25_family = AF_NETROM;
68761 - sax->fsa_ax25.sax25_ndigis = 0;
68762 sax->fsa_ax25.sax25_call = nr->source_addr;
68763 *uaddr_len = sizeof(struct sockaddr_ax25);
68764 }
68765 diff -urNp linux-2.6.32.41/net/packet/af_packet.c linux-2.6.32.41/net/packet/af_packet.c
68766 --- linux-2.6.32.41/net/packet/af_packet.c 2011-04-17 17:00:52.000000000 -0400
68767 +++ linux-2.6.32.41/net/packet/af_packet.c 2011-04-17 15:56:46.000000000 -0400
68768 @@ -2427,7 +2427,11 @@ static int packet_seq_show(struct seq_fi
68769
68770 seq_printf(seq,
68771 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
68772 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68773 + NULL,
68774 +#else
68775 s,
68776 +#endif
68777 atomic_read(&s->sk_refcnt),
68778 s->sk_type,
68779 ntohs(po->num),
68780 diff -urNp linux-2.6.32.41/net/phonet/af_phonet.c linux-2.6.32.41/net/phonet/af_phonet.c
68781 --- linux-2.6.32.41/net/phonet/af_phonet.c 2011-03-27 14:31:47.000000000 -0400
68782 +++ linux-2.6.32.41/net/phonet/af_phonet.c 2011-04-17 15:56:46.000000000 -0400
68783 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_pr
68784 {
68785 struct phonet_protocol *pp;
68786
68787 - if (protocol >= PHONET_NPROTO)
68788 + if (protocol < 0 || protocol >= PHONET_NPROTO)
68789 return NULL;
68790
68791 spin_lock(&proto_tab_lock);
68792 @@ -402,7 +402,7 @@ int __init_or_module phonet_proto_regist
68793 {
68794 int err = 0;
68795
68796 - if (protocol >= PHONET_NPROTO)
68797 + if (protocol < 0 || protocol >= PHONET_NPROTO)
68798 return -EINVAL;
68799
68800 err = proto_register(pp->prot, 1);
68801 diff -urNp linux-2.6.32.41/net/phonet/datagram.c linux-2.6.32.41/net/phonet/datagram.c
68802 --- linux-2.6.32.41/net/phonet/datagram.c 2011-03-27 14:31:47.000000000 -0400
68803 +++ linux-2.6.32.41/net/phonet/datagram.c 2011-05-04 17:56:28.000000000 -0400
68804 @@ -162,7 +162,7 @@ static int pn_backlog_rcv(struct sock *s
68805 if (err < 0) {
68806 kfree_skb(skb);
68807 if (err == -ENOMEM)
68808 - atomic_inc(&sk->sk_drops);
68809 + atomic_inc_unchecked(&sk->sk_drops);
68810 }
68811 return err ? NET_RX_DROP : NET_RX_SUCCESS;
68812 }
68813 diff -urNp linux-2.6.32.41/net/phonet/pep.c linux-2.6.32.41/net/phonet/pep.c
68814 --- linux-2.6.32.41/net/phonet/pep.c 2011-03-27 14:31:47.000000000 -0400
68815 +++ linux-2.6.32.41/net/phonet/pep.c 2011-05-04 17:56:28.000000000 -0400
68816 @@ -348,7 +348,7 @@ static int pipe_do_rcv(struct sock *sk,
68817
68818 case PNS_PEP_CTRL_REQ:
68819 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
68820 - atomic_inc(&sk->sk_drops);
68821 + atomic_inc_unchecked(&sk->sk_drops);
68822 break;
68823 }
68824 __skb_pull(skb, 4);
68825 @@ -362,12 +362,12 @@ static int pipe_do_rcv(struct sock *sk,
68826 if (!err)
68827 return 0;
68828 if (err == -ENOMEM)
68829 - atomic_inc(&sk->sk_drops);
68830 + atomic_inc_unchecked(&sk->sk_drops);
68831 break;
68832 }
68833
68834 if (pn->rx_credits == 0) {
68835 - atomic_inc(&sk->sk_drops);
68836 + atomic_inc_unchecked(&sk->sk_drops);
68837 err = -ENOBUFS;
68838 break;
68839 }
68840 diff -urNp linux-2.6.32.41/net/phonet/socket.c linux-2.6.32.41/net/phonet/socket.c
68841 --- linux-2.6.32.41/net/phonet/socket.c 2011-03-27 14:31:47.000000000 -0400
68842 +++ linux-2.6.32.41/net/phonet/socket.c 2011-05-04 17:57:07.000000000 -0400
68843 @@ -482,8 +482,13 @@ static int pn_sock_seq_show(struct seq_f
68844 sk->sk_state,
68845 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
68846 sock_i_uid(sk), sock_i_ino(sk),
68847 - atomic_read(&sk->sk_refcnt), sk,
68848 - atomic_read(&sk->sk_drops), &len);
68849 + atomic_read(&sk->sk_refcnt),
68850 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68851 + NULL,
68852 +#else
68853 + sk,
68854 +#endif
68855 + atomic_read_unchecked(&sk->sk_drops), &len);
68856 }
68857 seq_printf(seq, "%*s\n", 127 - len, "");
68858 return 0;
68859 diff -urNp linux-2.6.32.41/net/rds/cong.c linux-2.6.32.41/net/rds/cong.c
68860 --- linux-2.6.32.41/net/rds/cong.c 2011-03-27 14:31:47.000000000 -0400
68861 +++ linux-2.6.32.41/net/rds/cong.c 2011-05-04 17:56:28.000000000 -0400
68862 @@ -77,7 +77,7 @@
68863 * finds that the saved generation number is smaller than the global generation
68864 * number, it wakes up the process.
68865 */
68866 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
68867 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
68868
68869 /*
68870 * Congestion monitoring
68871 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_con
68872 rdsdebug("waking map %p for %pI4\n",
68873 map, &map->m_addr);
68874 rds_stats_inc(s_cong_update_received);
68875 - atomic_inc(&rds_cong_generation);
68876 + atomic_inc_unchecked(&rds_cong_generation);
68877 if (waitqueue_active(&map->m_waitq))
68878 wake_up(&map->m_waitq);
68879 if (waitqueue_active(&rds_poll_waitq))
68880 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
68881
68882 int rds_cong_updated_since(unsigned long *recent)
68883 {
68884 - unsigned long gen = atomic_read(&rds_cong_generation);
68885 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
68886
68887 if (likely(*recent == gen))
68888 return 0;
68889 diff -urNp linux-2.6.32.41/net/rds/iw_rdma.c linux-2.6.32.41/net/rds/iw_rdma.c
68890 --- linux-2.6.32.41/net/rds/iw_rdma.c 2011-03-27 14:31:47.000000000 -0400
68891 +++ linux-2.6.32.41/net/rds/iw_rdma.c 2011-05-16 21:46:57.000000000 -0400
68892 @@ -181,6 +181,8 @@ int rds_iw_update_cm_id(struct rds_iw_de
68893 struct rdma_cm_id *pcm_id;
68894 int rc;
68895
68896 + pax_track_stack();
68897 +
68898 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
68899 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
68900
68901 diff -urNp linux-2.6.32.41/net/rds/Kconfig linux-2.6.32.41/net/rds/Kconfig
68902 --- linux-2.6.32.41/net/rds/Kconfig 2011-03-27 14:31:47.000000000 -0400
68903 +++ linux-2.6.32.41/net/rds/Kconfig 2011-04-17 15:56:46.000000000 -0400
68904 @@ -1,7 +1,7 @@
68905
68906 config RDS
68907 tristate "The RDS Protocol (EXPERIMENTAL)"
68908 - depends on INET && EXPERIMENTAL
68909 + depends on INET && EXPERIMENTAL && BROKEN
68910 ---help---
68911 The RDS (Reliable Datagram Sockets) protocol provides reliable,
68912 sequenced delivery of datagrams over Infiniband, iWARP,
68913 diff -urNp linux-2.6.32.41/net/rxrpc/af_rxrpc.c linux-2.6.32.41/net/rxrpc/af_rxrpc.c
68914 --- linux-2.6.32.41/net/rxrpc/af_rxrpc.c 2011-03-27 14:31:47.000000000 -0400
68915 +++ linux-2.6.32.41/net/rxrpc/af_rxrpc.c 2011-05-04 17:56:28.000000000 -0400
68916 @@ -38,7 +38,7 @@ static const struct proto_ops rxrpc_rpc_
68917 __be32 rxrpc_epoch;
68918
68919 /* current debugging ID */
68920 -atomic_t rxrpc_debug_id;
68921 +atomic_unchecked_t rxrpc_debug_id;
68922
68923 /* count of skbs currently in use */
68924 atomic_t rxrpc_n_skbs;
68925 diff -urNp linux-2.6.32.41/net/rxrpc/ar-ack.c linux-2.6.32.41/net/rxrpc/ar-ack.c
68926 --- linux-2.6.32.41/net/rxrpc/ar-ack.c 2011-03-27 14:31:47.000000000 -0400
68927 +++ linux-2.6.32.41/net/rxrpc/ar-ack.c 2011-05-16 21:46:57.000000000 -0400
68928 @@ -174,7 +174,7 @@ static void rxrpc_resend(struct rxrpc_ca
68929
68930 _enter("{%d,%d,%d,%d},",
68931 call->acks_hard, call->acks_unacked,
68932 - atomic_read(&call->sequence),
68933 + atomic_read_unchecked(&call->sequence),
68934 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
68935
68936 stop = 0;
68937 @@ -198,7 +198,7 @@ static void rxrpc_resend(struct rxrpc_ca
68938
68939 /* each Tx packet has a new serial number */
68940 sp->hdr.serial =
68941 - htonl(atomic_inc_return(&call->conn->serial));
68942 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
68943
68944 hdr = (struct rxrpc_header *) txb->head;
68945 hdr->serial = sp->hdr.serial;
68946 @@ -401,7 +401,7 @@ static void rxrpc_rotate_tx_window(struc
68947 */
68948 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
68949 {
68950 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
68951 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
68952 }
68953
68954 /*
68955 @@ -627,7 +627,7 @@ process_further:
68956
68957 latest = ntohl(sp->hdr.serial);
68958 hard = ntohl(ack.firstPacket);
68959 - tx = atomic_read(&call->sequence);
68960 + tx = atomic_read_unchecked(&call->sequence);
68961
68962 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
68963 latest,
68964 @@ -840,6 +840,8 @@ void rxrpc_process_call(struct work_stru
68965 u32 abort_code = RX_PROTOCOL_ERROR;
68966 u8 *acks = NULL;
68967
68968 + pax_track_stack();
68969 +
68970 //printk("\n--------------------\n");
68971 _enter("{%d,%s,%lx} [%lu]",
68972 call->debug_id, rxrpc_call_states[call->state], call->events,
68973 @@ -1159,7 +1161,7 @@ void rxrpc_process_call(struct work_stru
68974 goto maybe_reschedule;
68975
68976 send_ACK_with_skew:
68977 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
68978 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
68979 ntohl(ack.serial));
68980 send_ACK:
68981 mtu = call->conn->trans->peer->if_mtu;
68982 @@ -1171,7 +1173,7 @@ send_ACK:
68983 ackinfo.rxMTU = htonl(5692);
68984 ackinfo.jumbo_max = htonl(4);
68985
68986 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
68987 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
68988 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
68989 ntohl(hdr.serial),
68990 ntohs(ack.maxSkew),
68991 @@ -1189,7 +1191,7 @@ send_ACK:
68992 send_message:
68993 _debug("send message");
68994
68995 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
68996 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
68997 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
68998 send_message_2:
68999
69000 diff -urNp linux-2.6.32.41/net/rxrpc/ar-call.c linux-2.6.32.41/net/rxrpc/ar-call.c
69001 --- linux-2.6.32.41/net/rxrpc/ar-call.c 2011-03-27 14:31:47.000000000 -0400
69002 +++ linux-2.6.32.41/net/rxrpc/ar-call.c 2011-05-04 17:56:28.000000000 -0400
69003 @@ -82,7 +82,7 @@ static struct rxrpc_call *rxrpc_alloc_ca
69004 spin_lock_init(&call->lock);
69005 rwlock_init(&call->state_lock);
69006 atomic_set(&call->usage, 1);
69007 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
69008 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
69009 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
69010
69011 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
69012 diff -urNp linux-2.6.32.41/net/rxrpc/ar-connection.c linux-2.6.32.41/net/rxrpc/ar-connection.c
69013 --- linux-2.6.32.41/net/rxrpc/ar-connection.c 2011-03-27 14:31:47.000000000 -0400
69014 +++ linux-2.6.32.41/net/rxrpc/ar-connection.c 2011-05-04 17:56:28.000000000 -0400
69015 @@ -205,7 +205,7 @@ static struct rxrpc_connection *rxrpc_al
69016 rwlock_init(&conn->lock);
69017 spin_lock_init(&conn->state_lock);
69018 atomic_set(&conn->usage, 1);
69019 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
69020 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
69021 conn->avail_calls = RXRPC_MAXCALLS;
69022 conn->size_align = 4;
69023 conn->header_size = sizeof(struct rxrpc_header);
69024 diff -urNp linux-2.6.32.41/net/rxrpc/ar-connevent.c linux-2.6.32.41/net/rxrpc/ar-connevent.c
69025 --- linux-2.6.32.41/net/rxrpc/ar-connevent.c 2011-03-27 14:31:47.000000000 -0400
69026 +++ linux-2.6.32.41/net/rxrpc/ar-connevent.c 2011-05-04 17:56:28.000000000 -0400
69027 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct
69028
69029 len = iov[0].iov_len + iov[1].iov_len;
69030
69031 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
69032 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
69033 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
69034
69035 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
69036 diff -urNp linux-2.6.32.41/net/rxrpc/ar-input.c linux-2.6.32.41/net/rxrpc/ar-input.c
69037 --- linux-2.6.32.41/net/rxrpc/ar-input.c 2011-03-27 14:31:47.000000000 -0400
69038 +++ linux-2.6.32.41/net/rxrpc/ar-input.c 2011-05-04 17:56:28.000000000 -0400
69039 @@ -339,9 +339,9 @@ void rxrpc_fast_process_packet(struct rx
69040 /* track the latest serial number on this connection for ACK packet
69041 * information */
69042 serial = ntohl(sp->hdr.serial);
69043 - hi_serial = atomic_read(&call->conn->hi_serial);
69044 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
69045 while (serial > hi_serial)
69046 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
69047 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
69048 serial);
69049
69050 /* request ACK generation for any ACK or DATA packet that requests
69051 diff -urNp linux-2.6.32.41/net/rxrpc/ar-internal.h linux-2.6.32.41/net/rxrpc/ar-internal.h
69052 --- linux-2.6.32.41/net/rxrpc/ar-internal.h 2011-03-27 14:31:47.000000000 -0400
69053 +++ linux-2.6.32.41/net/rxrpc/ar-internal.h 2011-05-04 17:56:28.000000000 -0400
69054 @@ -272,8 +272,8 @@ struct rxrpc_connection {
69055 int error; /* error code for local abort */
69056 int debug_id; /* debug ID for printks */
69057 unsigned call_counter; /* call ID counter */
69058 - atomic_t serial; /* packet serial number counter */
69059 - atomic_t hi_serial; /* highest serial number received */
69060 + atomic_unchecked_t serial; /* packet serial number counter */
69061 + atomic_unchecked_t hi_serial; /* highest serial number received */
69062 u8 avail_calls; /* number of calls available */
69063 u8 size_align; /* data size alignment (for security) */
69064 u8 header_size; /* rxrpc + security header size */
69065 @@ -346,7 +346,7 @@ struct rxrpc_call {
69066 spinlock_t lock;
69067 rwlock_t state_lock; /* lock for state transition */
69068 atomic_t usage;
69069 - atomic_t sequence; /* Tx data packet sequence counter */
69070 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
69071 u32 abort_code; /* local/remote abort code */
69072 enum { /* current state of call */
69073 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
69074 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(stru
69075 */
69076 extern atomic_t rxrpc_n_skbs;
69077 extern __be32 rxrpc_epoch;
69078 -extern atomic_t rxrpc_debug_id;
69079 +extern atomic_unchecked_t rxrpc_debug_id;
69080 extern struct workqueue_struct *rxrpc_workqueue;
69081
69082 /*
69083 diff -urNp linux-2.6.32.41/net/rxrpc/ar-key.c linux-2.6.32.41/net/rxrpc/ar-key.c
69084 --- linux-2.6.32.41/net/rxrpc/ar-key.c 2011-03-27 14:31:47.000000000 -0400
69085 +++ linux-2.6.32.41/net/rxrpc/ar-key.c 2011-04-17 15:56:46.000000000 -0400
69086 @@ -88,11 +88,11 @@ static int rxrpc_instantiate_xdr_rxkad(s
69087 return ret;
69088
69089 plen -= sizeof(*token);
69090 - token = kmalloc(sizeof(*token), GFP_KERNEL);
69091 + token = kzalloc(sizeof(*token), GFP_KERNEL);
69092 if (!token)
69093 return -ENOMEM;
69094
69095 - token->kad = kmalloc(plen, GFP_KERNEL);
69096 + token->kad = kzalloc(plen, GFP_KERNEL);
69097 if (!token->kad) {
69098 kfree(token);
69099 return -ENOMEM;
69100 @@ -730,10 +730,10 @@ static int rxrpc_instantiate(struct key
69101 goto error;
69102
69103 ret = -ENOMEM;
69104 - token = kmalloc(sizeof(*token), GFP_KERNEL);
69105 + token = kzalloc(sizeof(*token), GFP_KERNEL);
69106 if (!token)
69107 goto error;
69108 - token->kad = kmalloc(plen, GFP_KERNEL);
69109 + token->kad = kzalloc(plen, GFP_KERNEL);
69110 if (!token->kad)
69111 goto error_free;
69112
69113 diff -urNp linux-2.6.32.41/net/rxrpc/ar-local.c linux-2.6.32.41/net/rxrpc/ar-local.c
69114 --- linux-2.6.32.41/net/rxrpc/ar-local.c 2011-03-27 14:31:47.000000000 -0400
69115 +++ linux-2.6.32.41/net/rxrpc/ar-local.c 2011-05-04 17:56:28.000000000 -0400
69116 @@ -44,7 +44,7 @@ struct rxrpc_local *rxrpc_alloc_local(st
69117 spin_lock_init(&local->lock);
69118 rwlock_init(&local->services_lock);
69119 atomic_set(&local->usage, 1);
69120 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
69121 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
69122 memcpy(&local->srx, srx, sizeof(*srx));
69123 }
69124
69125 diff -urNp linux-2.6.32.41/net/rxrpc/ar-output.c linux-2.6.32.41/net/rxrpc/ar-output.c
69126 --- linux-2.6.32.41/net/rxrpc/ar-output.c 2011-03-27 14:31:47.000000000 -0400
69127 +++ linux-2.6.32.41/net/rxrpc/ar-output.c 2011-05-04 17:56:28.000000000 -0400
69128 @@ -680,9 +680,9 @@ static int rxrpc_send_data(struct kiocb
69129 sp->hdr.cid = call->cid;
69130 sp->hdr.callNumber = call->call_id;
69131 sp->hdr.seq =
69132 - htonl(atomic_inc_return(&call->sequence));
69133 + htonl(atomic_inc_return_unchecked(&call->sequence));
69134 sp->hdr.serial =
69135 - htonl(atomic_inc_return(&conn->serial));
69136 + htonl(atomic_inc_return_unchecked(&conn->serial));
69137 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
69138 sp->hdr.userStatus = 0;
69139 sp->hdr.securityIndex = conn->security_ix;
69140 diff -urNp linux-2.6.32.41/net/rxrpc/ar-peer.c linux-2.6.32.41/net/rxrpc/ar-peer.c
69141 --- linux-2.6.32.41/net/rxrpc/ar-peer.c 2011-03-27 14:31:47.000000000 -0400
69142 +++ linux-2.6.32.41/net/rxrpc/ar-peer.c 2011-05-04 17:56:28.000000000 -0400
69143 @@ -86,7 +86,7 @@ static struct rxrpc_peer *rxrpc_alloc_pe
69144 INIT_LIST_HEAD(&peer->error_targets);
69145 spin_lock_init(&peer->lock);
69146 atomic_set(&peer->usage, 1);
69147 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
69148 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
69149 memcpy(&peer->srx, srx, sizeof(*srx));
69150
69151 rxrpc_assess_MTU_size(peer);
69152 diff -urNp linux-2.6.32.41/net/rxrpc/ar-proc.c linux-2.6.32.41/net/rxrpc/ar-proc.c
69153 --- linux-2.6.32.41/net/rxrpc/ar-proc.c 2011-03-27 14:31:47.000000000 -0400
69154 +++ linux-2.6.32.41/net/rxrpc/ar-proc.c 2011-05-04 17:56:28.000000000 -0400
69155 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(str
69156 atomic_read(&conn->usage),
69157 rxrpc_conn_states[conn->state],
69158 key_serial(conn->key),
69159 - atomic_read(&conn->serial),
69160 - atomic_read(&conn->hi_serial));
69161 + atomic_read_unchecked(&conn->serial),
69162 + atomic_read_unchecked(&conn->hi_serial));
69163
69164 return 0;
69165 }
69166 diff -urNp linux-2.6.32.41/net/rxrpc/ar-transport.c linux-2.6.32.41/net/rxrpc/ar-transport.c
69167 --- linux-2.6.32.41/net/rxrpc/ar-transport.c 2011-03-27 14:31:47.000000000 -0400
69168 +++ linux-2.6.32.41/net/rxrpc/ar-transport.c 2011-05-04 17:56:28.000000000 -0400
69169 @@ -46,7 +46,7 @@ static struct rxrpc_transport *rxrpc_all
69170 spin_lock_init(&trans->client_lock);
69171 rwlock_init(&trans->conn_lock);
69172 atomic_set(&trans->usage, 1);
69173 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
69174 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
69175
69176 if (peer->srx.transport.family == AF_INET) {
69177 switch (peer->srx.transport_type) {
69178 diff -urNp linux-2.6.32.41/net/rxrpc/rxkad.c linux-2.6.32.41/net/rxrpc/rxkad.c
69179 --- linux-2.6.32.41/net/rxrpc/rxkad.c 2011-03-27 14:31:47.000000000 -0400
69180 +++ linux-2.6.32.41/net/rxrpc/rxkad.c 2011-05-16 21:46:57.000000000 -0400
69181 @@ -210,6 +210,8 @@ static int rxkad_secure_packet_encrypt(c
69182 u16 check;
69183 int nsg;
69184
69185 + pax_track_stack();
69186 +
69187 sp = rxrpc_skb(skb);
69188
69189 _enter("");
69190 @@ -337,6 +339,8 @@ static int rxkad_verify_packet_auth(cons
69191 u16 check;
69192 int nsg;
69193
69194 + pax_track_stack();
69195 +
69196 _enter("");
69197
69198 sp = rxrpc_skb(skb);
69199 @@ -609,7 +613,7 @@ static int rxkad_issue_challenge(struct
69200
69201 len = iov[0].iov_len + iov[1].iov_len;
69202
69203 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
69204 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
69205 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
69206
69207 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
69208 @@ -659,7 +663,7 @@ static int rxkad_send_response(struct rx
69209
69210 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
69211
69212 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
69213 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
69214 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
69215
69216 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
69217 diff -urNp linux-2.6.32.41/net/sctp/proc.c linux-2.6.32.41/net/sctp/proc.c
69218 --- linux-2.6.32.41/net/sctp/proc.c 2011-03-27 14:31:47.000000000 -0400
69219 +++ linux-2.6.32.41/net/sctp/proc.c 2011-04-17 15:56:46.000000000 -0400
69220 @@ -213,7 +213,12 @@ static int sctp_eps_seq_show(struct seq_
69221 sctp_for_each_hentry(epb, node, &head->chain) {
69222 ep = sctp_ep(epb);
69223 sk = epb->sk;
69224 - seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
69225 + seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ",
69226 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69227 + NULL, NULL,
69228 +#else
69229 + ep, sk,
69230 +#endif
69231 sctp_sk(sk)->type, sk->sk_state, hash,
69232 epb->bind_addr.port,
69233 sock_i_uid(sk), sock_i_ino(sk));
69234 @@ -320,7 +325,12 @@ static int sctp_assocs_seq_show(struct s
69235 seq_printf(seq,
69236 "%8p %8p %-3d %-3d %-2d %-4d "
69237 "%4d %8d %8d %7d %5lu %-5d %5d ",
69238 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
69239 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69240 + NULL, NULL,
69241 +#else
69242 + assoc, sk,
69243 +#endif
69244 + sctp_sk(sk)->type, sk->sk_state,
69245 assoc->state, hash,
69246 assoc->assoc_id,
69247 assoc->sndbuf_used,
69248 diff -urNp linux-2.6.32.41/net/sctp/socket.c linux-2.6.32.41/net/sctp/socket.c
69249 --- linux-2.6.32.41/net/sctp/socket.c 2011-03-27 14:31:47.000000000 -0400
69250 +++ linux-2.6.32.41/net/sctp/socket.c 2011-04-23 12:56:11.000000000 -0400
69251 @@ -5802,7 +5802,6 @@ pp_found:
69252 */
69253 int reuse = sk->sk_reuse;
69254 struct sock *sk2;
69255 - struct hlist_node *node;
69256
69257 SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n");
69258 if (pp->fastreuse && sk->sk_reuse &&
69259 diff -urNp linux-2.6.32.41/net/socket.c linux-2.6.32.41/net/socket.c
69260 --- linux-2.6.32.41/net/socket.c 2011-03-27 14:31:47.000000000 -0400
69261 +++ linux-2.6.32.41/net/socket.c 2011-05-16 21:46:57.000000000 -0400
69262 @@ -87,6 +87,7 @@
69263 #include <linux/wireless.h>
69264 #include <linux/nsproxy.h>
69265 #include <linux/magic.h>
69266 +#include <linux/in.h>
69267
69268 #include <asm/uaccess.h>
69269 #include <asm/unistd.h>
69270 @@ -97,6 +98,21 @@
69271 #include <net/sock.h>
69272 #include <linux/netfilter.h>
69273
69274 +extern void gr_attach_curr_ip(const struct sock *sk);
69275 +extern int gr_handle_sock_all(const int family, const int type,
69276 + const int protocol);
69277 +extern int gr_handle_sock_server(const struct sockaddr *sck);
69278 +extern int gr_handle_sock_server_other(const struct sock *sck);
69279 +extern int gr_handle_sock_client(const struct sockaddr *sck);
69280 +extern int gr_search_connect(struct socket * sock,
69281 + struct sockaddr_in * addr);
69282 +extern int gr_search_bind(struct socket * sock,
69283 + struct sockaddr_in * addr);
69284 +extern int gr_search_listen(struct socket * sock);
69285 +extern int gr_search_accept(struct socket * sock);
69286 +extern int gr_search_socket(const int domain, const int type,
69287 + const int protocol);
69288 +
69289 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
69290 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
69291 unsigned long nr_segs, loff_t pos);
69292 @@ -298,7 +314,7 @@ static int sockfs_get_sb(struct file_sys
69293 mnt);
69294 }
69295
69296 -static struct vfsmount *sock_mnt __read_mostly;
69297 +struct vfsmount *sock_mnt __read_mostly;
69298
69299 static struct file_system_type sock_fs_type = {
69300 .name = "sockfs",
69301 @@ -1154,6 +1170,8 @@ static int __sock_create(struct net *net
69302 return -EAFNOSUPPORT;
69303 if (type < 0 || type >= SOCK_MAX)
69304 return -EINVAL;
69305 + if (protocol < 0)
69306 + return -EINVAL;
69307
69308 /* Compatibility.
69309
69310 @@ -1283,6 +1301,16 @@ SYSCALL_DEFINE3(socket, int, family, int
69311 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
69312 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
69313
69314 + if(!gr_search_socket(family, type, protocol)) {
69315 + retval = -EACCES;
69316 + goto out;
69317 + }
69318 +
69319 + if (gr_handle_sock_all(family, type, protocol)) {
69320 + retval = -EACCES;
69321 + goto out;
69322 + }
69323 +
69324 retval = sock_create(family, type, protocol, &sock);
69325 if (retval < 0)
69326 goto out;
69327 @@ -1415,6 +1443,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
69328 if (sock) {
69329 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
69330 if (err >= 0) {
69331 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
69332 + err = -EACCES;
69333 + goto error;
69334 + }
69335 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
69336 + if (err)
69337 + goto error;
69338 +
69339 err = security_socket_bind(sock,
69340 (struct sockaddr *)&address,
69341 addrlen);
69342 @@ -1423,6 +1459,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
69343 (struct sockaddr *)
69344 &address, addrlen);
69345 }
69346 +error:
69347 fput_light(sock->file, fput_needed);
69348 }
69349 return err;
69350 @@ -1446,10 +1483,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, ba
69351 if ((unsigned)backlog > somaxconn)
69352 backlog = somaxconn;
69353
69354 + if (gr_handle_sock_server_other(sock->sk)) {
69355 + err = -EPERM;
69356 + goto error;
69357 + }
69358 +
69359 + err = gr_search_listen(sock);
69360 + if (err)
69361 + goto error;
69362 +
69363 err = security_socket_listen(sock, backlog);
69364 if (!err)
69365 err = sock->ops->listen(sock, backlog);
69366
69367 +error:
69368 fput_light(sock->file, fput_needed);
69369 }
69370 return err;
69371 @@ -1492,6 +1539,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
69372 newsock->type = sock->type;
69373 newsock->ops = sock->ops;
69374
69375 + if (gr_handle_sock_server_other(sock->sk)) {
69376 + err = -EPERM;
69377 + sock_release(newsock);
69378 + goto out_put;
69379 + }
69380 +
69381 + err = gr_search_accept(sock);
69382 + if (err) {
69383 + sock_release(newsock);
69384 + goto out_put;
69385 + }
69386 +
69387 /*
69388 * We don't need try_module_get here, as the listening socket (sock)
69389 * has the protocol module (sock->ops->owner) held.
69390 @@ -1534,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
69391 fd_install(newfd, newfile);
69392 err = newfd;
69393
69394 + gr_attach_curr_ip(newsock->sk);
69395 +
69396 out_put:
69397 fput_light(sock->file, fput_needed);
69398 out:
69399 @@ -1571,6 +1632,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct
69400 int, addrlen)
69401 {
69402 struct socket *sock;
69403 + struct sockaddr *sck;
69404 struct sockaddr_storage address;
69405 int err, fput_needed;
69406
69407 @@ -1581,6 +1643,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct
69408 if (err < 0)
69409 goto out_put;
69410
69411 + sck = (struct sockaddr *)&address;
69412 +
69413 + if (gr_handle_sock_client(sck)) {
69414 + err = -EACCES;
69415 + goto out_put;
69416 + }
69417 +
69418 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
69419 + if (err)
69420 + goto out_put;
69421 +
69422 err =
69423 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
69424 if (err)
69425 @@ -1882,6 +1955,8 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct
69426 int err, ctl_len, iov_size, total_len;
69427 int fput_needed;
69428
69429 + pax_track_stack();
69430 +
69431 err = -EFAULT;
69432 if (MSG_CMSG_COMPAT & flags) {
69433 if (get_compat_msghdr(&msg_sys, msg_compat))
69434 diff -urNp linux-2.6.32.41/net/sunrpc/sched.c linux-2.6.32.41/net/sunrpc/sched.c
69435 --- linux-2.6.32.41/net/sunrpc/sched.c 2011-03-27 14:31:47.000000000 -0400
69436 +++ linux-2.6.32.41/net/sunrpc/sched.c 2011-04-17 15:56:46.000000000 -0400
69437 @@ -234,10 +234,10 @@ static int rpc_wait_bit_killable(void *w
69438 #ifdef RPC_DEBUG
69439 static void rpc_task_set_debuginfo(struct rpc_task *task)
69440 {
69441 - static atomic_t rpc_pid;
69442 + static atomic_unchecked_t rpc_pid;
69443
69444 task->tk_magic = RPC_TASK_MAGIC_ID;
69445 - task->tk_pid = atomic_inc_return(&rpc_pid);
69446 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
69447 }
69448 #else
69449 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
69450 diff -urNp linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma.c linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma.c
69451 --- linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma.c 2011-03-27 14:31:47.000000000 -0400
69452 +++ linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma.c 2011-05-04 17:56:20.000000000 -0400
69453 @@ -59,15 +59,15 @@ unsigned int svcrdma_max_req_size = RPCR
69454 static unsigned int min_max_inline = 4096;
69455 static unsigned int max_max_inline = 65536;
69456
69457 -atomic_t rdma_stat_recv;
69458 -atomic_t rdma_stat_read;
69459 -atomic_t rdma_stat_write;
69460 -atomic_t rdma_stat_sq_starve;
69461 -atomic_t rdma_stat_rq_starve;
69462 -atomic_t rdma_stat_rq_poll;
69463 -atomic_t rdma_stat_rq_prod;
69464 -atomic_t rdma_stat_sq_poll;
69465 -atomic_t rdma_stat_sq_prod;
69466 +atomic_unchecked_t rdma_stat_recv;
69467 +atomic_unchecked_t rdma_stat_read;
69468 +atomic_unchecked_t rdma_stat_write;
69469 +atomic_unchecked_t rdma_stat_sq_starve;
69470 +atomic_unchecked_t rdma_stat_rq_starve;
69471 +atomic_unchecked_t rdma_stat_rq_poll;
69472 +atomic_unchecked_t rdma_stat_rq_prod;
69473 +atomic_unchecked_t rdma_stat_sq_poll;
69474 +atomic_unchecked_t rdma_stat_sq_prod;
69475
69476 /* Temporary NFS request map and context caches */
69477 struct kmem_cache *svc_rdma_map_cachep;
69478 @@ -105,7 +105,7 @@ static int read_reset_stat(ctl_table *ta
69479 len -= *ppos;
69480 if (len > *lenp)
69481 len = *lenp;
69482 - if (len && copy_to_user(buffer, str_buf, len))
69483 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
69484 return -EFAULT;
69485 *lenp = len;
69486 *ppos += len;
69487 @@ -149,63 +149,63 @@ static ctl_table svcrdma_parm_table[] =
69488 {
69489 .procname = "rdma_stat_read",
69490 .data = &rdma_stat_read,
69491 - .maxlen = sizeof(atomic_t),
69492 + .maxlen = sizeof(atomic_unchecked_t),
69493 .mode = 0644,
69494 .proc_handler = &read_reset_stat,
69495 },
69496 {
69497 .procname = "rdma_stat_recv",
69498 .data = &rdma_stat_recv,
69499 - .maxlen = sizeof(atomic_t),
69500 + .maxlen = sizeof(atomic_unchecked_t),
69501 .mode = 0644,
69502 .proc_handler = &read_reset_stat,
69503 },
69504 {
69505 .procname = "rdma_stat_write",
69506 .data = &rdma_stat_write,
69507 - .maxlen = sizeof(atomic_t),
69508 + .maxlen = sizeof(atomic_unchecked_t),
69509 .mode = 0644,
69510 .proc_handler = &read_reset_stat,
69511 },
69512 {
69513 .procname = "rdma_stat_sq_starve",
69514 .data = &rdma_stat_sq_starve,
69515 - .maxlen = sizeof(atomic_t),
69516 + .maxlen = sizeof(atomic_unchecked_t),
69517 .mode = 0644,
69518 .proc_handler = &read_reset_stat,
69519 },
69520 {
69521 .procname = "rdma_stat_rq_starve",
69522 .data = &rdma_stat_rq_starve,
69523 - .maxlen = sizeof(atomic_t),
69524 + .maxlen = sizeof(atomic_unchecked_t),
69525 .mode = 0644,
69526 .proc_handler = &read_reset_stat,
69527 },
69528 {
69529 .procname = "rdma_stat_rq_poll",
69530 .data = &rdma_stat_rq_poll,
69531 - .maxlen = sizeof(atomic_t),
69532 + .maxlen = sizeof(atomic_unchecked_t),
69533 .mode = 0644,
69534 .proc_handler = &read_reset_stat,
69535 },
69536 {
69537 .procname = "rdma_stat_rq_prod",
69538 .data = &rdma_stat_rq_prod,
69539 - .maxlen = sizeof(atomic_t),
69540 + .maxlen = sizeof(atomic_unchecked_t),
69541 .mode = 0644,
69542 .proc_handler = &read_reset_stat,
69543 },
69544 {
69545 .procname = "rdma_stat_sq_poll",
69546 .data = &rdma_stat_sq_poll,
69547 - .maxlen = sizeof(atomic_t),
69548 + .maxlen = sizeof(atomic_unchecked_t),
69549 .mode = 0644,
69550 .proc_handler = &read_reset_stat,
69551 },
69552 {
69553 .procname = "rdma_stat_sq_prod",
69554 .data = &rdma_stat_sq_prod,
69555 - .maxlen = sizeof(atomic_t),
69556 + .maxlen = sizeof(atomic_unchecked_t),
69557 .mode = 0644,
69558 .proc_handler = &read_reset_stat,
69559 },
69560 diff -urNp linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
69561 --- linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-03-27 14:31:47.000000000 -0400
69562 +++ linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-05-04 17:56:28.000000000 -0400
69563 @@ -495,7 +495,7 @@ next_sge:
69564 svc_rdma_put_context(ctxt, 0);
69565 goto out;
69566 }
69567 - atomic_inc(&rdma_stat_read);
69568 + atomic_inc_unchecked(&rdma_stat_read);
69569
69570 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
69571 chl_map->ch[ch_no].count -= read_wr.num_sge;
69572 @@ -606,7 +606,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
69573 dto_q);
69574 list_del_init(&ctxt->dto_q);
69575 } else {
69576 - atomic_inc(&rdma_stat_rq_starve);
69577 + atomic_inc_unchecked(&rdma_stat_rq_starve);
69578 clear_bit(XPT_DATA, &xprt->xpt_flags);
69579 ctxt = NULL;
69580 }
69581 @@ -626,7 +626,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
69582 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
69583 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
69584 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
69585 - atomic_inc(&rdma_stat_recv);
69586 + atomic_inc_unchecked(&rdma_stat_recv);
69587
69588 /* Build up the XDR from the receive buffers. */
69589 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
69590 diff -urNp linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma_sendto.c linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma_sendto.c
69591 --- linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-03-27 14:31:47.000000000 -0400
69592 +++ linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-05-04 17:56:28.000000000 -0400
69593 @@ -328,7 +328,7 @@ static int send_write(struct svcxprt_rdm
69594 write_wr.wr.rdma.remote_addr = to;
69595
69596 /* Post It */
69597 - atomic_inc(&rdma_stat_write);
69598 + atomic_inc_unchecked(&rdma_stat_write);
69599 if (svc_rdma_send(xprt, &write_wr))
69600 goto err;
69601 return 0;
69602 diff -urNp linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma_transport.c linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma_transport.c
69603 --- linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-03-27 14:31:47.000000000 -0400
69604 +++ linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-05-04 17:56:28.000000000 -0400
69605 @@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rd
69606 return;
69607
69608 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
69609 - atomic_inc(&rdma_stat_rq_poll);
69610 + atomic_inc_unchecked(&rdma_stat_rq_poll);
69611
69612 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
69613 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
69614 @@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rd
69615 }
69616
69617 if (ctxt)
69618 - atomic_inc(&rdma_stat_rq_prod);
69619 + atomic_inc_unchecked(&rdma_stat_rq_prod);
69620
69621 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
69622 /*
69623 @@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rd
69624 return;
69625
69626 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
69627 - atomic_inc(&rdma_stat_sq_poll);
69628 + atomic_inc_unchecked(&rdma_stat_sq_poll);
69629 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
69630 if (wc.status != IB_WC_SUCCESS)
69631 /* Close the transport */
69632 @@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rd
69633 }
69634
69635 if (ctxt)
69636 - atomic_inc(&rdma_stat_sq_prod);
69637 + atomic_inc_unchecked(&rdma_stat_sq_prod);
69638 }
69639
69640 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
69641 @@ -1260,7 +1260,7 @@ int svc_rdma_send(struct svcxprt_rdma *x
69642 spin_lock_bh(&xprt->sc_lock);
69643 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
69644 spin_unlock_bh(&xprt->sc_lock);
69645 - atomic_inc(&rdma_stat_sq_starve);
69646 + atomic_inc_unchecked(&rdma_stat_sq_starve);
69647
69648 /* See if we can opportunistically reap SQ WR to make room */
69649 sq_cq_reap(xprt);
69650 diff -urNp linux-2.6.32.41/net/sysctl_net.c linux-2.6.32.41/net/sysctl_net.c
69651 --- linux-2.6.32.41/net/sysctl_net.c 2011-03-27 14:31:47.000000000 -0400
69652 +++ linux-2.6.32.41/net/sysctl_net.c 2011-04-17 15:56:46.000000000 -0400
69653 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ct
69654 struct ctl_table *table)
69655 {
69656 /* Allow network administrator to have same access as root. */
69657 - if (capable(CAP_NET_ADMIN)) {
69658 + if (capable_nolog(CAP_NET_ADMIN)) {
69659 int mode = (table->mode >> 6) & 7;
69660 return (mode << 6) | (mode << 3) | mode;
69661 }
69662 diff -urNp linux-2.6.32.41/net/unix/af_unix.c linux-2.6.32.41/net/unix/af_unix.c
69663 --- linux-2.6.32.41/net/unix/af_unix.c 2011-05-10 22:12:02.000000000 -0400
69664 +++ linux-2.6.32.41/net/unix/af_unix.c 2011-05-10 22:12:34.000000000 -0400
69665 @@ -745,6 +745,12 @@ static struct sock *unix_find_other(stru
69666 err = -ECONNREFUSED;
69667 if (!S_ISSOCK(inode->i_mode))
69668 goto put_fail;
69669 +
69670 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
69671 + err = -EACCES;
69672 + goto put_fail;
69673 + }
69674 +
69675 u = unix_find_socket_byinode(net, inode);
69676 if (!u)
69677 goto put_fail;
69678 @@ -765,6 +771,13 @@ static struct sock *unix_find_other(stru
69679 if (u) {
69680 struct dentry *dentry;
69681 dentry = unix_sk(u)->dentry;
69682 +
69683 + if (!gr_handle_chroot_unix(u->sk_peercred.pid)) {
69684 + err = -EPERM;
69685 + sock_put(u);
69686 + goto fail;
69687 + }
69688 +
69689 if (dentry)
69690 touch_atime(unix_sk(u)->mnt, dentry);
69691 } else
69692 @@ -850,11 +863,18 @@ static int unix_bind(struct socket *sock
69693 err = security_path_mknod(&nd.path, dentry, mode, 0);
69694 if (err)
69695 goto out_mknod_drop_write;
69696 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
69697 + err = -EACCES;
69698 + goto out_mknod_drop_write;
69699 + }
69700 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
69701 out_mknod_drop_write:
69702 mnt_drop_write(nd.path.mnt);
69703 if (err)
69704 goto out_mknod_dput;
69705 +
69706 + gr_handle_create(dentry, nd.path.mnt);
69707 +
69708 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
69709 dput(nd.path.dentry);
69710 nd.path.dentry = dentry;
69711 @@ -872,6 +892,10 @@ out_mknod_drop_write:
69712 goto out_unlock;
69713 }
69714
69715 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
69716 + sk->sk_peercred.pid = current->pid;
69717 +#endif
69718 +
69719 list = &unix_socket_table[addr->hash];
69720 } else {
69721 list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
69722 @@ -2211,7 +2235,11 @@ static int unix_seq_show(struct seq_file
69723 unix_state_lock(s);
69724
69725 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
69726 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69727 + NULL,
69728 +#else
69729 s,
69730 +#endif
69731 atomic_read(&s->sk_refcnt),
69732 0,
69733 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
69734 diff -urNp linux-2.6.32.41/net/wireless/wext.c linux-2.6.32.41/net/wireless/wext.c
69735 --- linux-2.6.32.41/net/wireless/wext.c 2011-03-27 14:31:47.000000000 -0400
69736 +++ linux-2.6.32.41/net/wireless/wext.c 2011-04-17 15:56:46.000000000 -0400
69737 @@ -816,8 +816,7 @@ static int ioctl_standard_iw_point(struc
69738 */
69739
69740 /* Support for very large requests */
69741 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
69742 - (user_length > descr->max_tokens)) {
69743 + if (user_length > descr->max_tokens) {
69744 /* Allow userspace to GET more than max so
69745 * we can support any size GET requests.
69746 * There is still a limit : -ENOMEM.
69747 @@ -854,22 +853,6 @@ static int ioctl_standard_iw_point(struc
69748 }
69749 }
69750
69751 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
69752 - /*
69753 - * If this is a GET, but not NOMAX, it means that the extra
69754 - * data is not bounded by userspace, but by max_tokens. Thus
69755 - * set the length to max_tokens. This matches the extra data
69756 - * allocation.
69757 - * The driver should fill it with the number of tokens it
69758 - * provided, and it may check iwp->length rather than having
69759 - * knowledge of max_tokens. If the driver doesn't change the
69760 - * iwp->length, this ioctl just copies back max_token tokens
69761 - * filled with zeroes. Hopefully the driver isn't claiming
69762 - * them to be valid data.
69763 - */
69764 - iwp->length = descr->max_tokens;
69765 - }
69766 -
69767 err = handler(dev, info, (union iwreq_data *) iwp, extra);
69768
69769 iwp->length += essid_compat;
69770 diff -urNp linux-2.6.32.41/net/xfrm/xfrm_policy.c linux-2.6.32.41/net/xfrm/xfrm_policy.c
69771 --- linux-2.6.32.41/net/xfrm/xfrm_policy.c 2011-03-27 14:31:47.000000000 -0400
69772 +++ linux-2.6.32.41/net/xfrm/xfrm_policy.c 2011-05-04 17:56:20.000000000 -0400
69773 @@ -586,7 +586,7 @@ int xfrm_policy_insert(int dir, struct x
69774 hlist_add_head(&policy->bydst, chain);
69775 xfrm_pol_hold(policy);
69776 net->xfrm.policy_count[dir]++;
69777 - atomic_inc(&flow_cache_genid);
69778 + atomic_inc_unchecked(&flow_cache_genid);
69779 if (delpol)
69780 __xfrm_policy_unlink(delpol, dir);
69781 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
69782 @@ -669,7 +669,7 @@ struct xfrm_policy *xfrm_policy_bysel_ct
69783 write_unlock_bh(&xfrm_policy_lock);
69784
69785 if (ret && delete) {
69786 - atomic_inc(&flow_cache_genid);
69787 + atomic_inc_unchecked(&flow_cache_genid);
69788 xfrm_policy_kill(ret);
69789 }
69790 return ret;
69791 @@ -710,7 +710,7 @@ struct xfrm_policy *xfrm_policy_byid(str
69792 write_unlock_bh(&xfrm_policy_lock);
69793
69794 if (ret && delete) {
69795 - atomic_inc(&flow_cache_genid);
69796 + atomic_inc_unchecked(&flow_cache_genid);
69797 xfrm_policy_kill(ret);
69798 }
69799 return ret;
69800 @@ -824,7 +824,7 @@ int xfrm_policy_flush(struct net *net, u
69801 }
69802
69803 }
69804 - atomic_inc(&flow_cache_genid);
69805 + atomic_inc_unchecked(&flow_cache_genid);
69806 out:
69807 write_unlock_bh(&xfrm_policy_lock);
69808 return err;
69809 @@ -1088,7 +1088,7 @@ int xfrm_policy_delete(struct xfrm_polic
69810 write_unlock_bh(&xfrm_policy_lock);
69811 if (pol) {
69812 if (dir < XFRM_POLICY_MAX)
69813 - atomic_inc(&flow_cache_genid);
69814 + atomic_inc_unchecked(&flow_cache_genid);
69815 xfrm_policy_kill(pol);
69816 return 0;
69817 }
69818 @@ -1477,7 +1477,7 @@ free_dst:
69819 goto out;
69820 }
69821
69822 -static int inline
69823 +static inline int
69824 xfrm_dst_alloc_copy(void **target, void *src, int size)
69825 {
69826 if (!*target) {
69827 @@ -1489,7 +1489,7 @@ xfrm_dst_alloc_copy(void **target, void
69828 return 0;
69829 }
69830
69831 -static int inline
69832 +static inline int
69833 xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
69834 {
69835 #ifdef CONFIG_XFRM_SUB_POLICY
69836 @@ -1501,7 +1501,7 @@ xfrm_dst_update_parent(struct dst_entry
69837 #endif
69838 }
69839
69840 -static int inline
69841 +static inline int
69842 xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
69843 {
69844 #ifdef CONFIG_XFRM_SUB_POLICY
69845 @@ -1537,7 +1537,7 @@ int __xfrm_lookup(struct net *net, struc
69846 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
69847
69848 restart:
69849 - genid = atomic_read(&flow_cache_genid);
69850 + genid = atomic_read_unchecked(&flow_cache_genid);
69851 policy = NULL;
69852 for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
69853 pols[pi] = NULL;
69854 @@ -1680,7 +1680,7 @@ restart:
69855 goto error;
69856 }
69857 if (nx == -EAGAIN ||
69858 - genid != atomic_read(&flow_cache_genid)) {
69859 + genid != atomic_read_unchecked(&flow_cache_genid)) {
69860 xfrm_pols_put(pols, npols);
69861 goto restart;
69862 }
69863 diff -urNp linux-2.6.32.41/net/xfrm/xfrm_user.c linux-2.6.32.41/net/xfrm/xfrm_user.c
69864 --- linux-2.6.32.41/net/xfrm/xfrm_user.c 2011-03-27 14:31:47.000000000 -0400
69865 +++ linux-2.6.32.41/net/xfrm/xfrm_user.c 2011-05-16 21:46:57.000000000 -0400
69866 @@ -1169,6 +1169,8 @@ static int copy_to_user_tmpl(struct xfrm
69867 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
69868 int i;
69869
69870 + pax_track_stack();
69871 +
69872 if (xp->xfrm_nr == 0)
69873 return 0;
69874
69875 @@ -1784,6 +1786,8 @@ static int xfrm_do_migrate(struct sk_buf
69876 int err;
69877 int n = 0;
69878
69879 + pax_track_stack();
69880 +
69881 if (attrs[XFRMA_MIGRATE] == NULL)
69882 return -EINVAL;
69883
69884 diff -urNp linux-2.6.32.41/samples/kobject/kset-example.c linux-2.6.32.41/samples/kobject/kset-example.c
69885 --- linux-2.6.32.41/samples/kobject/kset-example.c 2011-03-27 14:31:47.000000000 -0400
69886 +++ linux-2.6.32.41/samples/kobject/kset-example.c 2011-04-17 15:56:46.000000000 -0400
69887 @@ -87,7 +87,7 @@ static ssize_t foo_attr_store(struct kob
69888 }
69889
69890 /* Our custom sysfs_ops that we will associate with our ktype later on */
69891 -static struct sysfs_ops foo_sysfs_ops = {
69892 +static const struct sysfs_ops foo_sysfs_ops = {
69893 .show = foo_attr_show,
69894 .store = foo_attr_store,
69895 };
69896 diff -urNp linux-2.6.32.41/scripts/basic/fixdep.c linux-2.6.32.41/scripts/basic/fixdep.c
69897 --- linux-2.6.32.41/scripts/basic/fixdep.c 2011-03-27 14:31:47.000000000 -0400
69898 +++ linux-2.6.32.41/scripts/basic/fixdep.c 2011-04-17 15:56:46.000000000 -0400
69899 @@ -222,9 +222,9 @@ static void use_config(char *m, int slen
69900
69901 static void parse_config_file(char *map, size_t len)
69902 {
69903 - int *end = (int *) (map + len);
69904 + unsigned int *end = (unsigned int *) (map + len);
69905 /* start at +1, so that p can never be < map */
69906 - int *m = (int *) map + 1;
69907 + unsigned int *m = (unsigned int *) map + 1;
69908 char *p, *q;
69909
69910 for (; m < end; m++) {
69911 @@ -371,7 +371,7 @@ static void print_deps(void)
69912 static void traps(void)
69913 {
69914 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
69915 - int *p = (int *)test;
69916 + unsigned int *p = (unsigned int *)test;
69917
69918 if (*p != INT_CONF) {
69919 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
69920 diff -urNp linux-2.6.32.41/scripts/Makefile.build linux-2.6.32.41/scripts/Makefile.build
69921 --- linux-2.6.32.41/scripts/Makefile.build 2011-03-27 14:31:47.000000000 -0400
69922 +++ linux-2.6.32.41/scripts/Makefile.build 2011-06-04 20:46:51.000000000 -0400
69923 @@ -59,7 +59,7 @@ endif
69924 endif
69925
69926 # Do not include host rules unless needed
69927 -ifneq ($(hostprogs-y)$(hostprogs-m),)
69928 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
69929 include scripts/Makefile.host
69930 endif
69931
69932 diff -urNp linux-2.6.32.41/scripts/Makefile.clean linux-2.6.32.41/scripts/Makefile.clean
69933 --- linux-2.6.32.41/scripts/Makefile.clean 2011-03-27 14:31:47.000000000 -0400
69934 +++ linux-2.6.32.41/scripts/Makefile.clean 2011-06-04 20:47:19.000000000 -0400
69935 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subd
69936 __clean-files := $(extra-y) $(always) \
69937 $(targets) $(clean-files) \
69938 $(host-progs) \
69939 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
69940 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
69941 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
69942
69943 # as clean-files is given relative to the current directory, this adds
69944 # a $(obj) prefix, except for absolute paths
69945 diff -urNp linux-2.6.32.41/scripts/Makefile.host linux-2.6.32.41/scripts/Makefile.host
69946 --- linux-2.6.32.41/scripts/Makefile.host 2011-03-27 14:31:47.000000000 -0400
69947 +++ linux-2.6.32.41/scripts/Makefile.host 2011-06-04 20:48:22.000000000 -0400
69948 @@ -31,6 +31,7 @@
69949 # Note: Shared libraries consisting of C++ files are not supported
69950
69951 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
69952 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
69953
69954 # C code
69955 # Executables compiled from a single .c file
69956 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(hos
69957 # Shared libaries (only .c supported)
69958 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
69959 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
69960 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
69961 # Remove .so files from "xxx-objs"
69962 host-cobjs := $(filter-out %.so,$(host-cobjs))
69963
69964 diff -urNp linux-2.6.32.41/scripts/mod/file2alias.c linux-2.6.32.41/scripts/mod/file2alias.c
69965 --- linux-2.6.32.41/scripts/mod/file2alias.c 2011-03-27 14:31:47.000000000 -0400
69966 +++ linux-2.6.32.41/scripts/mod/file2alias.c 2011-04-17 15:56:46.000000000 -0400
69967 @@ -72,7 +72,7 @@ static void device_id_check(const char *
69968 unsigned long size, unsigned long id_size,
69969 void *symval)
69970 {
69971 - int i;
69972 + unsigned int i;
69973
69974 if (size % id_size || size < id_size) {
69975 if (cross_build != 0)
69976 @@ -102,7 +102,7 @@ static void device_id_check(const char *
69977 /* USB is special because the bcdDevice can be matched against a numeric range */
69978 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
69979 static void do_usb_entry(struct usb_device_id *id,
69980 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
69981 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
69982 unsigned char range_lo, unsigned char range_hi,
69983 struct module *mod)
69984 {
69985 @@ -368,7 +368,7 @@ static void do_pnp_device_entry(void *sy
69986 for (i = 0; i < count; i++) {
69987 const char *id = (char *)devs[i].id;
69988 char acpi_id[sizeof(devs[0].id)];
69989 - int j;
69990 + unsigned int j;
69991
69992 buf_printf(&mod->dev_table_buf,
69993 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
69994 @@ -398,7 +398,7 @@ static void do_pnp_card_entries(void *sy
69995
69996 for (j = 0; j < PNP_MAX_DEVICES; j++) {
69997 const char *id = (char *)card->devs[j].id;
69998 - int i2, j2;
69999 + unsigned int i2, j2;
70000 int dup = 0;
70001
70002 if (!id[0])
70003 @@ -424,7 +424,7 @@ static void do_pnp_card_entries(void *sy
70004 /* add an individual alias for every device entry */
70005 if (!dup) {
70006 char acpi_id[sizeof(card->devs[0].id)];
70007 - int k;
70008 + unsigned int k;
70009
70010 buf_printf(&mod->dev_table_buf,
70011 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
70012 @@ -699,7 +699,7 @@ static void dmi_ascii_filter(char *d, co
70013 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
70014 char *alias)
70015 {
70016 - int i, j;
70017 + unsigned int i, j;
70018
70019 sprintf(alias, "dmi*");
70020
70021 diff -urNp linux-2.6.32.41/scripts/mod/modpost.c linux-2.6.32.41/scripts/mod/modpost.c
70022 --- linux-2.6.32.41/scripts/mod/modpost.c 2011-03-27 14:31:47.000000000 -0400
70023 +++ linux-2.6.32.41/scripts/mod/modpost.c 2011-04-17 15:56:46.000000000 -0400
70024 @@ -835,6 +835,7 @@ enum mismatch {
70025 INIT_TO_EXIT,
70026 EXIT_TO_INIT,
70027 EXPORT_TO_INIT_EXIT,
70028 + DATA_TO_TEXT
70029 };
70030
70031 struct sectioncheck {
70032 @@ -920,6 +921,12 @@ const struct sectioncheck sectioncheck[]
70033 .fromsec = { "__ksymtab*", NULL },
70034 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
70035 .mismatch = EXPORT_TO_INIT_EXIT
70036 +},
70037 +/* Do not reference code from writable data */
70038 +{
70039 + .fromsec = { DATA_SECTIONS, NULL },
70040 + .tosec = { TEXT_SECTIONS, NULL },
70041 + .mismatch = DATA_TO_TEXT
70042 }
70043 };
70044
70045 @@ -1024,10 +1031,10 @@ static Elf_Sym *find_elf_symbol(struct e
70046 continue;
70047 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
70048 continue;
70049 - if (sym->st_value == addr)
70050 - return sym;
70051 /* Find a symbol nearby - addr are maybe negative */
70052 d = sym->st_value - addr;
70053 + if (d == 0)
70054 + return sym;
70055 if (d < 0)
70056 d = addr - sym->st_value;
70057 if (d < distance) {
70058 @@ -1268,6 +1275,14 @@ static void report_sec_mismatch(const ch
70059 "Fix this by removing the %sannotation of %s "
70060 "or drop the export.\n",
70061 tosym, sec2annotation(tosec), sec2annotation(tosec), tosym);
70062 + case DATA_TO_TEXT:
70063 +/*
70064 + fprintf(stderr,
70065 + "The variable %s references\n"
70066 + "the %s %s%s%s\n",
70067 + fromsym, to, sec2annotation(tosec), tosym, to_p);
70068 +*/
70069 + break;
70070 case NO_MISMATCH:
70071 /* To get warnings on missing members */
70072 break;
70073 @@ -1651,7 +1666,7 @@ void __attribute__((format(printf, 2, 3)
70074 va_end(ap);
70075 }
70076
70077 -void buf_write(struct buffer *buf, const char *s, int len)
70078 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
70079 {
70080 if (buf->size - buf->pos < len) {
70081 buf->size += len + SZ;
70082 @@ -1863,7 +1878,7 @@ static void write_if_changed(struct buff
70083 if (fstat(fileno(file), &st) < 0)
70084 goto close_write;
70085
70086 - if (st.st_size != b->pos)
70087 + if (st.st_size != (off_t)b->pos)
70088 goto close_write;
70089
70090 tmp = NOFAIL(malloc(b->pos));
70091 diff -urNp linux-2.6.32.41/scripts/mod/modpost.h linux-2.6.32.41/scripts/mod/modpost.h
70092 --- linux-2.6.32.41/scripts/mod/modpost.h 2011-03-27 14:31:47.000000000 -0400
70093 +++ linux-2.6.32.41/scripts/mod/modpost.h 2011-04-17 15:56:46.000000000 -0400
70094 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *e
70095
70096 struct buffer {
70097 char *p;
70098 - int pos;
70099 - int size;
70100 + unsigned int pos;
70101 + unsigned int size;
70102 };
70103
70104 void __attribute__((format(printf, 2, 3)))
70105 buf_printf(struct buffer *buf, const char *fmt, ...);
70106
70107 void
70108 -buf_write(struct buffer *buf, const char *s, int len);
70109 +buf_write(struct buffer *buf, const char *s, unsigned int len);
70110
70111 struct module {
70112 struct module *next;
70113 diff -urNp linux-2.6.32.41/scripts/mod/sumversion.c linux-2.6.32.41/scripts/mod/sumversion.c
70114 --- linux-2.6.32.41/scripts/mod/sumversion.c 2011-03-27 14:31:47.000000000 -0400
70115 +++ linux-2.6.32.41/scripts/mod/sumversion.c 2011-04-17 15:56:46.000000000 -0400
70116 @@ -455,7 +455,7 @@ static void write_version(const char *fi
70117 goto out;
70118 }
70119
70120 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
70121 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
70122 warn("writing sum in %s failed: %s\n",
70123 filename, strerror(errno));
70124 goto out;
70125 diff -urNp linux-2.6.32.41/scripts/pnmtologo.c linux-2.6.32.41/scripts/pnmtologo.c
70126 --- linux-2.6.32.41/scripts/pnmtologo.c 2011-03-27 14:31:47.000000000 -0400
70127 +++ linux-2.6.32.41/scripts/pnmtologo.c 2011-04-17 15:56:46.000000000 -0400
70128 @@ -237,14 +237,14 @@ static void write_header(void)
70129 fprintf(out, " * Linux logo %s\n", logoname);
70130 fputs(" */\n\n", out);
70131 fputs("#include <linux/linux_logo.h>\n\n", out);
70132 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
70133 + fprintf(out, "static unsigned char %s_data[] = {\n",
70134 logoname);
70135 }
70136
70137 static void write_footer(void)
70138 {
70139 fputs("\n};\n\n", out);
70140 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
70141 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
70142 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
70143 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
70144 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
70145 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
70146 fputs("\n};\n\n", out);
70147
70148 /* write logo clut */
70149 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
70150 + fprintf(out, "static unsigned char %s_clut[] = {\n",
70151 logoname);
70152 write_hex_cnt = 0;
70153 for (i = 0; i < logo_clutsize; i++) {
70154 diff -urNp linux-2.6.32.41/security/capability.c linux-2.6.32.41/security/capability.c
70155 --- linux-2.6.32.41/security/capability.c 2011-03-27 14:31:47.000000000 -0400
70156 +++ linux-2.6.32.41/security/capability.c 2011-04-17 15:56:46.000000000 -0400
70157 @@ -890,7 +890,7 @@ static void cap_audit_rule_free(void *ls
70158 }
70159 #endif /* CONFIG_AUDIT */
70160
70161 -struct security_operations default_security_ops = {
70162 +struct security_operations default_security_ops __read_only = {
70163 .name = "default",
70164 };
70165
70166 diff -urNp linux-2.6.32.41/security/commoncap.c linux-2.6.32.41/security/commoncap.c
70167 --- linux-2.6.32.41/security/commoncap.c 2011-03-27 14:31:47.000000000 -0400
70168 +++ linux-2.6.32.41/security/commoncap.c 2011-04-17 15:56:46.000000000 -0400
70169 @@ -27,7 +27,7 @@
70170 #include <linux/sched.h>
70171 #include <linux/prctl.h>
70172 #include <linux/securebits.h>
70173 -
70174 +#include <net/sock.h>
70175 /*
70176 * If a non-root user executes a setuid-root binary in
70177 * !secure(SECURE_NOROOT) mode, then we raise capabilities.
70178 @@ -50,9 +50,11 @@ static void warn_setuid_and_fcaps_mixed(
70179 }
70180 }
70181
70182 +extern kernel_cap_t gr_cap_rtnetlink(struct sock *sk);
70183 +
70184 int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
70185 {
70186 - NETLINK_CB(skb).eff_cap = current_cap();
70187 + NETLINK_CB(skb).eff_cap = gr_cap_rtnetlink(sk);
70188 return 0;
70189 }
70190
70191 @@ -582,6 +584,9 @@ int cap_bprm_secureexec(struct linux_bin
70192 {
70193 const struct cred *cred = current_cred();
70194
70195 + if (gr_acl_enable_at_secure())
70196 + return 1;
70197 +
70198 if (cred->uid != 0) {
70199 if (bprm->cap_effective)
70200 return 1;
70201 diff -urNp linux-2.6.32.41/security/integrity/ima/ima_api.c linux-2.6.32.41/security/integrity/ima/ima_api.c
70202 --- linux-2.6.32.41/security/integrity/ima/ima_api.c 2011-03-27 14:31:47.000000000 -0400
70203 +++ linux-2.6.32.41/security/integrity/ima/ima_api.c 2011-04-17 15:56:46.000000000 -0400
70204 @@ -74,7 +74,7 @@ void ima_add_violation(struct inode *ino
70205 int result;
70206
70207 /* can overflow, only indicator */
70208 - atomic_long_inc(&ima_htable.violations);
70209 + atomic_long_inc_unchecked(&ima_htable.violations);
70210
70211 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
70212 if (!entry) {
70213 diff -urNp linux-2.6.32.41/security/integrity/ima/ima_fs.c linux-2.6.32.41/security/integrity/ima/ima_fs.c
70214 --- linux-2.6.32.41/security/integrity/ima/ima_fs.c 2011-03-27 14:31:47.000000000 -0400
70215 +++ linux-2.6.32.41/security/integrity/ima/ima_fs.c 2011-04-17 15:56:46.000000000 -0400
70216 @@ -27,12 +27,12 @@
70217 static int valid_policy = 1;
70218 #define TMPBUFLEN 12
70219 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
70220 - loff_t *ppos, atomic_long_t *val)
70221 + loff_t *ppos, atomic_long_unchecked_t *val)
70222 {
70223 char tmpbuf[TMPBUFLEN];
70224 ssize_t len;
70225
70226 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
70227 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
70228 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
70229 }
70230
70231 diff -urNp linux-2.6.32.41/security/integrity/ima/ima.h linux-2.6.32.41/security/integrity/ima/ima.h
70232 --- linux-2.6.32.41/security/integrity/ima/ima.h 2011-03-27 14:31:47.000000000 -0400
70233 +++ linux-2.6.32.41/security/integrity/ima/ima.h 2011-04-17 15:56:46.000000000 -0400
70234 @@ -84,8 +84,8 @@ void ima_add_violation(struct inode *ino
70235 extern spinlock_t ima_queue_lock;
70236
70237 struct ima_h_table {
70238 - atomic_long_t len; /* number of stored measurements in the list */
70239 - atomic_long_t violations;
70240 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
70241 + atomic_long_unchecked_t violations;
70242 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
70243 };
70244 extern struct ima_h_table ima_htable;
70245 diff -urNp linux-2.6.32.41/security/integrity/ima/ima_queue.c linux-2.6.32.41/security/integrity/ima/ima_queue.c
70246 --- linux-2.6.32.41/security/integrity/ima/ima_queue.c 2011-03-27 14:31:47.000000000 -0400
70247 +++ linux-2.6.32.41/security/integrity/ima/ima_queue.c 2011-04-17 15:56:46.000000000 -0400
70248 @@ -78,7 +78,7 @@ static int ima_add_digest_entry(struct i
70249 INIT_LIST_HEAD(&qe->later);
70250 list_add_tail_rcu(&qe->later, &ima_measurements);
70251
70252 - atomic_long_inc(&ima_htable.len);
70253 + atomic_long_inc_unchecked(&ima_htable.len);
70254 key = ima_hash_key(entry->digest);
70255 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
70256 return 0;
70257 diff -urNp linux-2.6.32.41/security/Kconfig linux-2.6.32.41/security/Kconfig
70258 --- linux-2.6.32.41/security/Kconfig 2011-03-27 14:31:47.000000000 -0400
70259 +++ linux-2.6.32.41/security/Kconfig 2011-06-04 20:45:36.000000000 -0400
70260 @@ -4,6 +4,555 @@
70261
70262 menu "Security options"
70263
70264 +source grsecurity/Kconfig
70265 +
70266 +menu "PaX"
70267 +
70268 + config ARCH_TRACK_EXEC_LIMIT
70269 + bool
70270 +
70271 + config PAX_PER_CPU_PGD
70272 + bool
70273 +
70274 + config TASK_SIZE_MAX_SHIFT
70275 + int
70276 + depends on X86_64
70277 + default 47 if !PAX_PER_CPU_PGD
70278 + default 42 if PAX_PER_CPU_PGD
70279 +
70280 + config PAX_ENABLE_PAE
70281 + bool
70282 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
70283 +
70284 +config PAX
70285 + bool "Enable various PaX features"
70286 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
70287 + help
70288 + This allows you to enable various PaX features. PaX adds
70289 + intrusion prevention mechanisms to the kernel that reduce
70290 + the risks posed by exploitable memory corruption bugs.
70291 +
70292 +menu "PaX Control"
70293 + depends on PAX
70294 +
70295 +config PAX_SOFTMODE
70296 + bool 'Support soft mode'
70297 + select PAX_PT_PAX_FLAGS
70298 + help
70299 + Enabling this option will allow you to run PaX in soft mode, that
70300 + is, PaX features will not be enforced by default, only on executables
70301 + marked explicitly. You must also enable PT_PAX_FLAGS support as it
70302 + is the only way to mark executables for soft mode use.
70303 +
70304 + Soft mode can be activated by using the "pax_softmode=1" kernel command
70305 + line option on boot. Furthermore you can control various PaX features
70306 + at runtime via the entries in /proc/sys/kernel/pax.
70307 +
70308 +config PAX_EI_PAX
70309 + bool 'Use legacy ELF header marking'
70310 + help
70311 + Enabling this option will allow you to control PaX features on
70312 + a per executable basis via the 'chpax' utility available at
70313 + http://pax.grsecurity.net/. The control flags will be read from
70314 + an otherwise reserved part of the ELF header. This marking has
70315 + numerous drawbacks (no support for soft-mode, toolchain does not
70316 + know about the non-standard use of the ELF header) therefore it
70317 + has been deprecated in favour of PT_PAX_FLAGS support.
70318 +
70319 + Note that if you enable PT_PAX_FLAGS marking support as well,
70320 + the PT_PAX_FLAG marks will override the legacy EI_PAX marks.
70321 +
70322 +config PAX_PT_PAX_FLAGS
70323 + bool 'Use ELF program header marking'
70324 + help
70325 + Enabling this option will allow you to control PaX features on
70326 + a per executable basis via the 'paxctl' utility available at
70327 + http://pax.grsecurity.net/. The control flags will be read from
70328 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
70329 + has the benefits of supporting both soft mode and being fully
70330 + integrated into the toolchain (the binutils patch is available
70331 + from http://pax.grsecurity.net).
70332 +
70333 + If your toolchain does not support PT_PAX_FLAGS markings,
70334 + you can create one in most cases with 'paxctl -C'.
70335 +
70336 + Note that if you enable the legacy EI_PAX marking support as well,
70337 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
70338 +
70339 +choice
70340 + prompt 'MAC system integration'
70341 + default PAX_HAVE_ACL_FLAGS
70342 + help
70343 + Mandatory Access Control systems have the option of controlling
70344 + PaX flags on a per executable basis, choose the method supported
70345 + by your particular system.
70346 +
70347 + - "none": if your MAC system does not interact with PaX,
70348 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
70349 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
70350 +
70351 + NOTE: this option is for developers/integrators only.
70352 +
70353 + config PAX_NO_ACL_FLAGS
70354 + bool 'none'
70355 +
70356 + config PAX_HAVE_ACL_FLAGS
70357 + bool 'direct'
70358 +
70359 + config PAX_HOOK_ACL_FLAGS
70360 + bool 'hook'
70361 +endchoice
70362 +
70363 +endmenu
70364 +
70365 +menu "Non-executable pages"
70366 + depends on PAX
70367 +
70368 +config PAX_NOEXEC
70369 + bool "Enforce non-executable pages"
70370 + depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
70371 + help
70372 + By design some architectures do not allow for protecting memory
70373 + pages against execution or even if they do, Linux does not make
70374 + use of this feature. In practice this means that if a page is
70375 + readable (such as the stack or heap) it is also executable.
70376 +
70377 + There is a well known exploit technique that makes use of this
70378 + fact and a common programming mistake where an attacker can
70379 + introduce code of his choice somewhere in the attacked program's
70380 + memory (typically the stack or the heap) and then execute it.
70381 +
70382 + If the attacked program was running with different (typically
70383 + higher) privileges than that of the attacker, then he can elevate
70384 + his own privilege level (e.g. get a root shell, write to files for
70385 + which he does not have write access to, etc).
70386 +
70387 + Enabling this option will let you choose from various features
70388 + that prevent the injection and execution of 'foreign' code in
70389 + a program.
70390 +
70391 + This will also break programs that rely on the old behaviour and
70392 + expect that dynamically allocated memory via the malloc() family
70393 + of functions is executable (which it is not). Notable examples
70394 + are the XFree86 4.x server, the java runtime and wine.
70395 +
70396 +config PAX_PAGEEXEC
70397 + bool "Paging based non-executable pages"
70398 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
70399 + select S390_SWITCH_AMODE if S390
70400 + select S390_EXEC_PROTECT if S390
70401 + select ARCH_TRACK_EXEC_LIMIT if X86_32
70402 + help
70403 + This implementation is based on the paging feature of the CPU.
70404 + On i386 without hardware non-executable bit support there is a
70405 + variable but usually low performance impact, however on Intel's
70406 + P4 core based CPUs it is very high so you should not enable this
70407 + for kernels meant to be used on such CPUs.
70408 +
70409 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
70410 + with hardware non-executable bit support there is no performance
70411 + impact, on ppc the impact is negligible.
70412 +
70413 + Note that several architectures require various emulations due to
70414 + badly designed userland ABIs, this will cause a performance impact
70415 + but will disappear as soon as userland is fixed. For example, ppc
70416 + userland MUST have been built with secure-plt by a recent toolchain.
70417 +
70418 +config PAX_SEGMEXEC
70419 + bool "Segmentation based non-executable pages"
70420 + depends on PAX_NOEXEC && X86_32
70421 + help
70422 + This implementation is based on the segmentation feature of the
70423 + CPU and has a very small performance impact, however applications
70424 + will be limited to a 1.5 GB address space instead of the normal
70425 + 3 GB.
70426 +
70427 +config PAX_EMUTRAMP
70428 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
70429 + default y if PARISC
70430 + help
70431 + There are some programs and libraries that for one reason or
70432 + another attempt to execute special small code snippets from
70433 + non-executable memory pages. Most notable examples are the
70434 + signal handler return code generated by the kernel itself and
70435 + the GCC trampolines.
70436 +
70437 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
70438 + such programs will no longer work under your kernel.
70439 +
70440 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
70441 + utilities to enable trampoline emulation for the affected programs
70442 + yet still have the protection provided by the non-executable pages.
70443 +
70444 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
70445 + your system will not even boot.
70446 +
70447 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
70448 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
70449 + for the affected files.
70450 +
70451 + NOTE: enabling this feature *may* open up a loophole in the
70452 + protection provided by non-executable pages that an attacker
70453 + could abuse. Therefore the best solution is to not have any
70454 + files on your system that would require this option. This can
70455 + be achieved by not using libc5 (which relies on the kernel
70456 + signal handler return code) and not using or rewriting programs
70457 + that make use of the nested function implementation of GCC.
70458 + Skilled users can just fix GCC itself so that it implements
70459 + nested function calls in a way that does not interfere with PaX.
70460 +
70461 +config PAX_EMUSIGRT
70462 + bool "Automatically emulate sigreturn trampolines"
70463 + depends on PAX_EMUTRAMP && PARISC
70464 + default y
70465 + help
70466 + Enabling this option will have the kernel automatically detect
70467 + and emulate signal return trampolines executing on the stack
70468 + that would otherwise lead to task termination.
70469 +
70470 + This solution is intended as a temporary one for users with
70471 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
70472 + Modula-3 runtime, etc) or executables linked to such, basically
70473 + everything that does not specify its own SA_RESTORER function in
70474 + normal executable memory like glibc 2.1+ does.
70475 +
70476 + On parisc you MUST enable this option, otherwise your system will
70477 + not even boot.
70478 +
70479 + NOTE: this feature cannot be disabled on a per executable basis
70480 + and since it *does* open up a loophole in the protection provided
70481 + by non-executable pages, the best solution is to not have any
70482 + files on your system that would require this option.
70483 +
70484 +config PAX_MPROTECT
70485 + bool "Restrict mprotect()"
70486 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
70487 + help
70488 + Enabling this option will prevent programs from
70489 + - changing the executable status of memory pages that were
70490 + not originally created as executable,
70491 + - making read-only executable pages writable again,
70492 + - creating executable pages from anonymous memory,
70493 + - making read-only-after-relocations (RELRO) data pages writable again.
70494 +
70495 + You should say Y here to complete the protection provided by
70496 + the enforcement of non-executable pages.
70497 +
70498 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
70499 + this feature on a per file basis.
70500 +
70501 +config PAX_MPROTECT_COMPAT
70502 + bool "Use legacy/compat protection demoting (read help)"
70503 + depends on PAX_MPROTECT
70504 + default n
70505 + help
70506 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
70507 + by sending the proper error code to the application. For some broken
70508 + userland, this can cause problems with Python or other applications. The
70509 + current implementation however allows for applications like clamav to
70510 + detect if JIT compilation/execution is allowed and to fall back gracefully
70511 + to an interpreter-based mode if it does not. While we encourage everyone
70512 + to use the current implementation as-is and push upstream to fix broken
70513 + userland (note that the RWX logging option can assist with this), in some
70514 + environments this may not be possible. Having to disable MPROTECT
70515 + completely on certain binaries reduces the security benefit of PaX,
70516 + so this option is provided for those environments to revert to the old
70517 + behavior.
70518 +
70519 +config PAX_ELFRELOCS
70520 + bool "Allow ELF text relocations (read help)"
70521 + depends on PAX_MPROTECT
70522 + default n
70523 + help
70524 + Non-executable pages and mprotect() restrictions are effective
70525 + in preventing the introduction of new executable code into an
70526 + attacked task's address space. There remain only two venues
70527 + for this kind of attack: if the attacker can execute already
70528 + existing code in the attacked task then he can either have it
70529 + create and mmap() a file containing his code or have it mmap()
70530 + an already existing ELF library that does not have position
70531 + independent code in it and use mprotect() on it to make it
70532 + writable and copy his code there. While protecting against
70533 + the former approach is beyond PaX, the latter can be prevented
70534 + by having only PIC ELF libraries on one's system (which do not
70535 + need to relocate their code). If you are sure this is your case,
70536 + as is the case with all modern Linux distributions, then leave
70537 + this option disabled. You should say 'n' here.
70538 +
70539 +config PAX_ETEXECRELOCS
70540 + bool "Allow ELF ET_EXEC text relocations"
70541 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
70542 + select PAX_ELFRELOCS
70543 + default y
70544 + help
70545 + On some architectures there are incorrectly created applications
70546 + that require text relocations and would not work without enabling
70547 + this option. If you are an alpha, ia64 or parisc user, you should
70548 + enable this option and disable it once you have made sure that
70549 + none of your applications need it.
70550 +
70551 +config PAX_EMUPLT
70552 + bool "Automatically emulate ELF PLT"
70553 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
70554 + default y
70555 + help
70556 + Enabling this option will have the kernel automatically detect
70557 + and emulate the Procedure Linkage Table entries in ELF files.
70558 + On some architectures such entries are in writable memory, and
70559 + become non-executable leading to task termination. Therefore
70560 + it is mandatory that you enable this option on alpha, parisc,
70561 + sparc and sparc64, otherwise your system would not even boot.
70562 +
70563 + NOTE: this feature *does* open up a loophole in the protection
70564 + provided by the non-executable pages, therefore the proper
70565 + solution is to modify the toolchain to produce a PLT that does
70566 + not need to be writable.
70567 +
70568 +config PAX_DLRESOLVE
70569 + bool 'Emulate old glibc resolver stub'
70570 + depends on PAX_EMUPLT && SPARC
70571 + default n
70572 + help
70573 + This option is needed if userland has an old glibc (before 2.4)
70574 + that puts a 'save' instruction into the runtime generated resolver
70575 + stub that needs special emulation.
70576 +
70577 +config PAX_KERNEXEC
70578 + bool "Enforce non-executable kernel pages"
70579 + depends on PAX_NOEXEC && (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
70580 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
70581 + help
70582 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
70583 + that is, enabling this option will make it harder to inject
70584 + and execute 'foreign' code in kernel memory itself.
70585 +
70586 + Note that on x86_64 kernels there is a known regression when
70587 + this feature and KVM/VMX are both enabled in the host kernel.
70588 +
70589 +config PAX_KERNEXEC_MODULE_TEXT
70590 + int "Minimum amount of memory reserved for module code"
70591 + default "4"
70592 + depends on PAX_KERNEXEC && X86_32 && MODULES
70593 + help
70594 + Due to implementation details the kernel must reserve a fixed
70595 + amount of memory for module code at compile time that cannot be
70596 + changed at runtime. Here you can specify the minimum amount
70597 + in MB that will be reserved. Due to the same implementation
70598 + details this size will always be rounded up to the next 2/4 MB
70599 + boundary (depends on PAE) so the actually available memory for
70600 + module code will usually be more than this minimum.
70601 +
70602 + The default 4 MB should be enough for most users but if you have
70603 + an excessive number of modules (e.g., most distribution configs
70604 + compile many drivers as modules) or use huge modules such as
70605 + nvidia's kernel driver, you will need to adjust this amount.
70606 + A good rule of thumb is to look at your currently loaded kernel
70607 + modules and add up their sizes.
70608 +
70609 +endmenu
70610 +
70611 +menu "Address Space Layout Randomization"
70612 + depends on PAX
70613 +
70614 +config PAX_ASLR
70615 + bool "Address Space Layout Randomization"
70616 + depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
70617 + help
70618 + Many if not most exploit techniques rely on the knowledge of
70619 + certain addresses in the attacked program. The following options
70620 + will allow the kernel to apply a certain amount of randomization
70621 + to specific parts of the program thereby forcing an attacker to
70622 + guess them in most cases. Any failed guess will most likely crash
70623 + the attacked program which allows the kernel to detect such attempts
70624 + and react on them. PaX itself provides no reaction mechanisms,
70625 + instead it is strongly encouraged that you make use of Nergal's
70626 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
70627 + (http://www.grsecurity.net/) built-in crash detection features or
70628 + develop one yourself.
70629 +
70630 + By saying Y here you can choose to randomize the following areas:
70631 + - top of the task's kernel stack
70632 + - top of the task's userland stack
70633 + - base address for mmap() requests that do not specify one
70634 + (this includes all libraries)
70635 + - base address of the main executable
70636 +
70637 + It is strongly recommended to say Y here as address space layout
70638 + randomization has negligible impact on performance yet it provides
70639 + a very effective protection.
70640 +
70641 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
70642 + this feature on a per file basis.
70643 +
70644 +config PAX_RANDKSTACK
70645 + bool "Randomize kernel stack base"
70646 + depends on PAX_ASLR && X86_TSC && X86
70647 + help
70648 + By saying Y here the kernel will randomize every task's kernel
70649 + stack on every system call. This will not only force an attacker
70650 + to guess it but also prevent him from making use of possible
70651 + leaked information about it.
70652 +
70653 + Since the kernel stack is a rather scarce resource, randomization
70654 + may cause unexpected stack overflows, therefore you should very
70655 + carefully test your system. Note that once enabled in the kernel
70656 + configuration, this feature cannot be disabled on a per file basis.
70657 +
70658 +config PAX_RANDUSTACK
70659 + bool "Randomize user stack base"
70660 + depends on PAX_ASLR
70661 + help
70662 + By saying Y here the kernel will randomize every task's userland
70663 + stack. The randomization is done in two steps where the second
70664 + one may apply a big amount of shift to the top of the stack and
70665 + cause problems for programs that want to use lots of memory (more
70666 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
70667 + For this reason the second step can be controlled by 'chpax' or
70668 + 'paxctl' on a per file basis.
70669 +
70670 +config PAX_RANDMMAP
70671 + bool "Randomize mmap() base"
70672 + depends on PAX_ASLR
70673 + help
70674 + By saying Y here the kernel will use a randomized base address for
70675 + mmap() requests that do not specify one themselves. As a result
70676 + all dynamically loaded libraries will appear at random addresses
70677 + and therefore be harder to exploit by a technique where an attacker
70678 + attempts to execute library code for his purposes (e.g. spawn a
70679 + shell from an exploited program that is running at an elevated
70680 + privilege level).
70681 +
70682 + Furthermore, if a program is relinked as a dynamic ELF file, its
70683 + base address will be randomized as well, completing the full
70684 + randomization of the address space layout. Attacking such programs
70685 + becomes a guess game. You can find an example of doing this at
70686 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
70687 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
70688 +
70689 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
70690 + feature on a per file basis.
70691 +
70692 +endmenu
70693 +
70694 +menu "Miscellaneous hardening features"
70695 +
70696 +config PAX_MEMORY_SANITIZE
70697 + bool "Sanitize all freed memory"
70698 + help
70699 + By saying Y here the kernel will erase memory pages as soon as they
70700 + are freed. This in turn reduces the lifetime of data stored in the
70701 + pages, making it less likely that sensitive information such as
70702 + passwords, cryptographic secrets, etc stay in memory for too long.
70703 +
70704 + This is especially useful for programs whose runtime is short, long
70705 + lived processes and the kernel itself benefit from this as long as
70706 + they operate on whole memory pages and ensure timely freeing of pages
70707 + that may hold sensitive information.
70708 +
70709 + The tradeoff is performance impact, on a single CPU system kernel
70710 + compilation sees a 3% slowdown, other systems and workloads may vary
70711 + and you are advised to test this feature on your expected workload
70712 + before deploying it.
70713 +
70714 + Note that this feature does not protect data stored in live pages,
70715 + e.g., process memory swapped to disk may stay there for a long time.
70716 +
70717 +config PAX_MEMORY_STACKLEAK
70718 + bool "Sanitize kernel stack"
70719 + depends on X86
70720 + help
70721 + By saying Y here the kernel will erase the kernel stack before it
70722 + returns from a system call. This in turn reduces the information
70723 + that a kernel stack leak bug can reveal.
70724 +
70725 + Note that such a bug can still leak information that was put on
70726 + the stack by the current system call (the one eventually triggering
70727 + the bug) but traces of earlier system calls on the kernel stack
70728 + cannot leak anymore.
70729 +
70730 + The tradeoff is performance impact, on a single CPU system kernel
70731 + compilation sees a 1% slowdown, other systems and workloads may vary
70732 + and you are advised to test this feature on your expected workload
70733 + before deploying it.
70734 +
70735 + Note: full support for this feature requires gcc with plugin support
70736 + so make sure your compiler is at least gcc 4.5.0 (cross compilation
70737 + is not supported). Using older gcc versions means that functions
70738 + with large enough stack frames may leave uninitialized memory behind
70739 + that may be exposed to a later syscall leaking the stack.
70740 +
70741 +config PAX_MEMORY_UDEREF
70742 + bool "Prevent invalid userland pointer dereference"
70743 + depends on X86 && !UML_X86 && !XEN
70744 + select PAX_PER_CPU_PGD if X86_64
70745 + help
70746 + By saying Y here the kernel will be prevented from dereferencing
70747 + userland pointers in contexts where the kernel expects only kernel
70748 + pointers. This is both a useful runtime debugging feature and a
70749 + security measure that prevents exploiting a class of kernel bugs.
70750 +
70751 + The tradeoff is that some virtualization solutions may experience
70752 + a huge slowdown and therefore you should not enable this feature
70753 + for kernels meant to run in such environments. Whether a given VM
70754 + solution is affected or not is best determined by simply trying it
70755 + out, the performance impact will be obvious right on boot as this
70756 + mechanism engages from very early on. A good rule of thumb is that
70757 + VMs running on CPUs without hardware virtualization support (i.e.,
70758 + the majority of IA-32 CPUs) will likely experience the slowdown.
70759 +
70760 +config PAX_REFCOUNT
70761 + bool "Prevent various kernel object reference counter overflows"
70762 + depends on GRKERNSEC && (X86 || SPARC64)
70763 + help
70764 + By saying Y here the kernel will detect and prevent overflowing
70765 + various (but not all) kinds of object reference counters. Such
70766 + overflows can normally occur due to bugs only and are often, if
70767 + not always, exploitable.
70768 +
70769 + The tradeoff is that data structures protected by an overflowed
70770 + refcount will never be freed and therefore will leak memory. Note
70771 + that this leak also happens even without this protection but in
70772 + that case the overflow can eventually trigger the freeing of the
70773 + data structure while it is still being used elsewhere, resulting
70774 + in the exploitable situation that this feature prevents.
70775 +
70776 + Since this has a negligible performance impact, you should enable
70777 + this feature.
70778 +
70779 +config PAX_USERCOPY
70780 + bool "Harden heap object copies between kernel and userland"
70781 + depends on X86 || PPC || SPARC
70782 + depends on GRKERNSEC && (SLAB || SLUB)
70783 + help
70784 + By saying Y here the kernel will enforce the size of heap objects
70785 + when they are copied in either direction between the kernel and
70786 + userland, even if only a part of the heap object is copied.
70787 +
70788 + Specifically, this checking prevents information leaking from the
70789 + kernel heap during kernel to userland copies (if the kernel heap
70790 + object is otherwise fully initialized) and prevents kernel heap
70791 + overflows during userland to kernel copies.
70792 +
70793 + Note that the current implementation provides the strictest bounds
70794 + checks for the SLUB allocator.
70795 +
70796 + Enabling this option also enables per-slab cache protection against
70797 + data in a given cache being copied into/out of via userland
70798 + accessors. Though the whitelist of regions will be reduced over
70799 + time, it notably protects important data structures like task structs.
70800 +
70801 +
70802 + If frame pointers are enabled on x86, this option will also
70803 + restrict copies into and out of the kernel stack to local variables
70804 + within a single frame.
70805 +
70806 + Since this has a negligible performance impact, you should enable
70807 + this feature.
70808 +
70809 +endmenu
70810 +
70811 +endmenu
70812 +
70813 config KEYS
70814 bool "Enable access key retention support"
70815 help
70816 @@ -146,7 +695,7 @@ config INTEL_TXT
70817 config LSM_MMAP_MIN_ADDR
70818 int "Low address space for LSM to protect from user allocation"
70819 depends on SECURITY && SECURITY_SELINUX
70820 - default 65536
70821 + default 32768
70822 help
70823 This is the portion of low virtual memory which should be protected
70824 from userspace allocation. Keeping a user from writing to low pages
70825 diff -urNp linux-2.6.32.41/security/keys/keyring.c linux-2.6.32.41/security/keys/keyring.c
70826 --- linux-2.6.32.41/security/keys/keyring.c 2011-03-27 14:31:47.000000000 -0400
70827 +++ linux-2.6.32.41/security/keys/keyring.c 2011-04-18 22:03:00.000000000 -0400
70828 @@ -214,15 +214,15 @@ static long keyring_read(const struct ke
70829 ret = -EFAULT;
70830
70831 for (loop = 0; loop < klist->nkeys; loop++) {
70832 + key_serial_t serial;
70833 key = klist->keys[loop];
70834 + serial = key->serial;
70835
70836 tmp = sizeof(key_serial_t);
70837 if (tmp > buflen)
70838 tmp = buflen;
70839
70840 - if (copy_to_user(buffer,
70841 - &key->serial,
70842 - tmp) != 0)
70843 + if (copy_to_user(buffer, &serial, tmp))
70844 goto error;
70845
70846 buflen -= tmp;
70847 diff -urNp linux-2.6.32.41/security/min_addr.c linux-2.6.32.41/security/min_addr.c
70848 --- linux-2.6.32.41/security/min_addr.c 2011-03-27 14:31:47.000000000 -0400
70849 +++ linux-2.6.32.41/security/min_addr.c 2011-04-17 15:56:46.000000000 -0400
70850 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG
70851 */
70852 static void update_mmap_min_addr(void)
70853 {
70854 +#ifndef SPARC
70855 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
70856 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
70857 mmap_min_addr = dac_mmap_min_addr;
70858 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
70859 #else
70860 mmap_min_addr = dac_mmap_min_addr;
70861 #endif
70862 +#endif
70863 }
70864
70865 /*
70866 diff -urNp linux-2.6.32.41/security/root_plug.c linux-2.6.32.41/security/root_plug.c
70867 --- linux-2.6.32.41/security/root_plug.c 2011-03-27 14:31:47.000000000 -0400
70868 +++ linux-2.6.32.41/security/root_plug.c 2011-04-17 15:56:46.000000000 -0400
70869 @@ -70,7 +70,7 @@ static int rootplug_bprm_check_security
70870 return 0;
70871 }
70872
70873 -static struct security_operations rootplug_security_ops = {
70874 +static struct security_operations rootplug_security_ops __read_only = {
70875 .bprm_check_security = rootplug_bprm_check_security,
70876 };
70877
70878 diff -urNp linux-2.6.32.41/security/security.c linux-2.6.32.41/security/security.c
70879 --- linux-2.6.32.41/security/security.c 2011-03-27 14:31:47.000000000 -0400
70880 +++ linux-2.6.32.41/security/security.c 2011-04-17 15:56:46.000000000 -0400
70881 @@ -24,7 +24,7 @@ static __initdata char chosen_lsm[SECURI
70882 extern struct security_operations default_security_ops;
70883 extern void security_fixup_ops(struct security_operations *ops);
70884
70885 -struct security_operations *security_ops; /* Initialized to NULL */
70886 +struct security_operations *security_ops __read_only; /* Initialized to NULL */
70887
70888 static inline int verify(struct security_operations *ops)
70889 {
70890 @@ -106,7 +106,7 @@ int __init security_module_enable(struct
70891 * If there is already a security module registered with the kernel,
70892 * an error will be returned. Otherwise %0 is returned on success.
70893 */
70894 -int register_security(struct security_operations *ops)
70895 +int __init register_security(struct security_operations *ops)
70896 {
70897 if (verify(ops)) {
70898 printk(KERN_DEBUG "%s could not verify "
70899 diff -urNp linux-2.6.32.41/security/selinux/hooks.c linux-2.6.32.41/security/selinux/hooks.c
70900 --- linux-2.6.32.41/security/selinux/hooks.c 2011-03-27 14:31:47.000000000 -0400
70901 +++ linux-2.6.32.41/security/selinux/hooks.c 2011-04-17 15:56:46.000000000 -0400
70902 @@ -131,7 +131,7 @@ int selinux_enabled = 1;
70903 * Minimal support for a secondary security module,
70904 * just to allow the use of the capability module.
70905 */
70906 -static struct security_operations *secondary_ops;
70907 +static struct security_operations *secondary_ops __read_only;
70908
70909 /* Lists of inode and superblock security structures initialized
70910 before the policy was loaded. */
70911 @@ -5457,7 +5457,7 @@ static int selinux_key_getsecurity(struc
70912
70913 #endif
70914
70915 -static struct security_operations selinux_ops = {
70916 +static struct security_operations selinux_ops __read_only = {
70917 .name = "selinux",
70918
70919 .ptrace_access_check = selinux_ptrace_access_check,
70920 @@ -5841,7 +5841,9 @@ int selinux_disable(void)
70921 avc_disable();
70922
70923 /* Reset security_ops to the secondary module, dummy or capability. */
70924 + pax_open_kernel();
70925 security_ops = secondary_ops;
70926 + pax_close_kernel();
70927
70928 /* Unregister netfilter hooks. */
70929 selinux_nf_ip_exit();
70930 diff -urNp linux-2.6.32.41/security/selinux/include/xfrm.h linux-2.6.32.41/security/selinux/include/xfrm.h
70931 --- linux-2.6.32.41/security/selinux/include/xfrm.h 2011-03-27 14:31:47.000000000 -0400
70932 +++ linux-2.6.32.41/security/selinux/include/xfrm.h 2011-05-18 20:09:37.000000000 -0400
70933 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct s
70934
70935 static inline void selinux_xfrm_notify_policyload(void)
70936 {
70937 - atomic_inc(&flow_cache_genid);
70938 + atomic_inc_unchecked(&flow_cache_genid);
70939 }
70940 #else
70941 static inline int selinux_xfrm_enabled(void)
70942 diff -urNp linux-2.6.32.41/security/selinux/ss/services.c linux-2.6.32.41/security/selinux/ss/services.c
70943 --- linux-2.6.32.41/security/selinux/ss/services.c 2011-03-27 14:31:47.000000000 -0400
70944 +++ linux-2.6.32.41/security/selinux/ss/services.c 2011-05-16 21:46:57.000000000 -0400
70945 @@ -1715,6 +1715,8 @@ int security_load_policy(void *data, siz
70946 int rc = 0;
70947 struct policy_file file = { data, len }, *fp = &file;
70948
70949 + pax_track_stack();
70950 +
70951 if (!ss_initialized) {
70952 avtab_cache_init();
70953 if (policydb_read(&policydb, fp)) {
70954 diff -urNp linux-2.6.32.41/security/smack/smack_lsm.c linux-2.6.32.41/security/smack/smack_lsm.c
70955 --- linux-2.6.32.41/security/smack/smack_lsm.c 2011-03-27 14:31:47.000000000 -0400
70956 +++ linux-2.6.32.41/security/smack/smack_lsm.c 2011-04-17 15:56:46.000000000 -0400
70957 @@ -3073,7 +3073,7 @@ static int smack_inode_getsecctx(struct
70958 return 0;
70959 }
70960
70961 -struct security_operations smack_ops = {
70962 +struct security_operations smack_ops __read_only = {
70963 .name = "smack",
70964
70965 .ptrace_access_check = smack_ptrace_access_check,
70966 diff -urNp linux-2.6.32.41/security/tomoyo/tomoyo.c linux-2.6.32.41/security/tomoyo/tomoyo.c
70967 --- linux-2.6.32.41/security/tomoyo/tomoyo.c 2011-03-27 14:31:47.000000000 -0400
70968 +++ linux-2.6.32.41/security/tomoyo/tomoyo.c 2011-04-17 15:56:46.000000000 -0400
70969 @@ -275,7 +275,7 @@ static int tomoyo_dentry_open(struct fil
70970 * tomoyo_security_ops is a "struct security_operations" which is used for
70971 * registering TOMOYO.
70972 */
70973 -static struct security_operations tomoyo_security_ops = {
70974 +static struct security_operations tomoyo_security_ops __read_only = {
70975 .name = "tomoyo",
70976 .cred_alloc_blank = tomoyo_cred_alloc_blank,
70977 .cred_prepare = tomoyo_cred_prepare,
70978 diff -urNp linux-2.6.32.41/sound/aoa/codecs/onyx.c linux-2.6.32.41/sound/aoa/codecs/onyx.c
70979 --- linux-2.6.32.41/sound/aoa/codecs/onyx.c 2011-03-27 14:31:47.000000000 -0400
70980 +++ linux-2.6.32.41/sound/aoa/codecs/onyx.c 2011-04-17 15:56:46.000000000 -0400
70981 @@ -53,7 +53,7 @@ struct onyx {
70982 spdif_locked:1,
70983 analog_locked:1,
70984 original_mute:2;
70985 - int open_count;
70986 + local_t open_count;
70987 struct codec_info *codec_info;
70988
70989 /* mutex serializes concurrent access to the device
70990 @@ -752,7 +752,7 @@ static int onyx_open(struct codec_info_i
70991 struct onyx *onyx = cii->codec_data;
70992
70993 mutex_lock(&onyx->mutex);
70994 - onyx->open_count++;
70995 + local_inc(&onyx->open_count);
70996 mutex_unlock(&onyx->mutex);
70997
70998 return 0;
70999 @@ -764,8 +764,7 @@ static int onyx_close(struct codec_info_
71000 struct onyx *onyx = cii->codec_data;
71001
71002 mutex_lock(&onyx->mutex);
71003 - onyx->open_count--;
71004 - if (!onyx->open_count)
71005 + if (local_dec_and_test(&onyx->open_count))
71006 onyx->spdif_locked = onyx->analog_locked = 0;
71007 mutex_unlock(&onyx->mutex);
71008
71009 diff -urNp linux-2.6.32.41/sound/aoa/codecs/onyx.h linux-2.6.32.41/sound/aoa/codecs/onyx.h
71010 --- linux-2.6.32.41/sound/aoa/codecs/onyx.h 2011-03-27 14:31:47.000000000 -0400
71011 +++ linux-2.6.32.41/sound/aoa/codecs/onyx.h 2011-04-17 15:56:46.000000000 -0400
71012 @@ -11,6 +11,7 @@
71013 #include <linux/i2c.h>
71014 #include <asm/pmac_low_i2c.h>
71015 #include <asm/prom.h>
71016 +#include <asm/local.h>
71017
71018 /* PCM3052 register definitions */
71019
71020 diff -urNp linux-2.6.32.41/sound/drivers/mts64.c linux-2.6.32.41/sound/drivers/mts64.c
71021 --- linux-2.6.32.41/sound/drivers/mts64.c 2011-03-27 14:31:47.000000000 -0400
71022 +++ linux-2.6.32.41/sound/drivers/mts64.c 2011-04-17 15:56:46.000000000 -0400
71023 @@ -27,6 +27,7 @@
71024 #include <sound/initval.h>
71025 #include <sound/rawmidi.h>
71026 #include <sound/control.h>
71027 +#include <asm/local.h>
71028
71029 #define CARD_NAME "Miditerminal 4140"
71030 #define DRIVER_NAME "MTS64"
71031 @@ -65,7 +66,7 @@ struct mts64 {
71032 struct pardevice *pardev;
71033 int pardev_claimed;
71034
71035 - int open_count;
71036 + local_t open_count;
71037 int current_midi_output_port;
71038 int current_midi_input_port;
71039 u8 mode[MTS64_NUM_INPUT_PORTS];
71040 @@ -695,7 +696,7 @@ static int snd_mts64_rawmidi_open(struct
71041 {
71042 struct mts64 *mts = substream->rmidi->private_data;
71043
71044 - if (mts->open_count == 0) {
71045 + if (local_read(&mts->open_count) == 0) {
71046 /* We don't need a spinlock here, because this is just called
71047 if the device has not been opened before.
71048 So there aren't any IRQs from the device */
71049 @@ -703,7 +704,7 @@ static int snd_mts64_rawmidi_open(struct
71050
71051 msleep(50);
71052 }
71053 - ++(mts->open_count);
71054 + local_inc(&mts->open_count);
71055
71056 return 0;
71057 }
71058 @@ -713,8 +714,7 @@ static int snd_mts64_rawmidi_close(struc
71059 struct mts64 *mts = substream->rmidi->private_data;
71060 unsigned long flags;
71061
71062 - --(mts->open_count);
71063 - if (mts->open_count == 0) {
71064 + if (local_dec_return(&mts->open_count) == 0) {
71065 /* We need the spinlock_irqsave here because we can still
71066 have IRQs at this point */
71067 spin_lock_irqsave(&mts->lock, flags);
71068 @@ -723,8 +723,8 @@ static int snd_mts64_rawmidi_close(struc
71069
71070 msleep(500);
71071
71072 - } else if (mts->open_count < 0)
71073 - mts->open_count = 0;
71074 + } else if (local_read(&mts->open_count) < 0)
71075 + local_set(&mts->open_count, 0);
71076
71077 return 0;
71078 }
71079 diff -urNp linux-2.6.32.41/sound/drivers/portman2x4.c linux-2.6.32.41/sound/drivers/portman2x4.c
71080 --- linux-2.6.32.41/sound/drivers/portman2x4.c 2011-03-27 14:31:47.000000000 -0400
71081 +++ linux-2.6.32.41/sound/drivers/portman2x4.c 2011-04-17 15:56:46.000000000 -0400
71082 @@ -46,6 +46,7 @@
71083 #include <sound/initval.h>
71084 #include <sound/rawmidi.h>
71085 #include <sound/control.h>
71086 +#include <asm/local.h>
71087
71088 #define CARD_NAME "Portman 2x4"
71089 #define DRIVER_NAME "portman"
71090 @@ -83,7 +84,7 @@ struct portman {
71091 struct pardevice *pardev;
71092 int pardev_claimed;
71093
71094 - int open_count;
71095 + local_t open_count;
71096 int mode[PORTMAN_NUM_INPUT_PORTS];
71097 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
71098 };
71099 diff -urNp linux-2.6.32.41/sound/oss/sb_audio.c linux-2.6.32.41/sound/oss/sb_audio.c
71100 --- linux-2.6.32.41/sound/oss/sb_audio.c 2011-03-27 14:31:47.000000000 -0400
71101 +++ linux-2.6.32.41/sound/oss/sb_audio.c 2011-04-17 15:56:46.000000000 -0400
71102 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
71103 buf16 = (signed short *)(localbuf + localoffs);
71104 while (c)
71105 {
71106 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
71107 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
71108 if (copy_from_user(lbuf8,
71109 userbuf+useroffs + p,
71110 locallen))
71111 diff -urNp linux-2.6.32.41/sound/oss/swarm_cs4297a.c linux-2.6.32.41/sound/oss/swarm_cs4297a.c
71112 --- linux-2.6.32.41/sound/oss/swarm_cs4297a.c 2011-03-27 14:31:47.000000000 -0400
71113 +++ linux-2.6.32.41/sound/oss/swarm_cs4297a.c 2011-04-17 15:56:46.000000000 -0400
71114 @@ -2577,7 +2577,6 @@ static int __init cs4297a_init(void)
71115 {
71116 struct cs4297a_state *s;
71117 u32 pwr, id;
71118 - mm_segment_t fs;
71119 int rval;
71120 #ifndef CONFIG_BCM_CS4297A_CSWARM
71121 u64 cfg;
71122 @@ -2667,22 +2666,23 @@ static int __init cs4297a_init(void)
71123 if (!rval) {
71124 char *sb1250_duart_present;
71125
71126 +#if 0
71127 + mm_segment_t fs;
71128 fs = get_fs();
71129 set_fs(KERNEL_DS);
71130 -#if 0
71131 val = SOUND_MASK_LINE;
71132 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
71133 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
71134 val = initvol[i].vol;
71135 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
71136 }
71137 + set_fs(fs);
71138 // cs4297a_write_ac97(s, 0x18, 0x0808);
71139 #else
71140 // cs4297a_write_ac97(s, 0x5e, 0x180);
71141 cs4297a_write_ac97(s, 0x02, 0x0808);
71142 cs4297a_write_ac97(s, 0x18, 0x0808);
71143 #endif
71144 - set_fs(fs);
71145
71146 list_add(&s->list, &cs4297a_devs);
71147
71148 diff -urNp linux-2.6.32.41/sound/pci/ac97/ac97_codec.c linux-2.6.32.41/sound/pci/ac97/ac97_codec.c
71149 --- linux-2.6.32.41/sound/pci/ac97/ac97_codec.c 2011-03-27 14:31:47.000000000 -0400
71150 +++ linux-2.6.32.41/sound/pci/ac97/ac97_codec.c 2011-04-17 15:56:46.000000000 -0400
71151 @@ -1952,7 +1952,7 @@ static int snd_ac97_dev_disconnect(struc
71152 }
71153
71154 /* build_ops to do nothing */
71155 -static struct snd_ac97_build_ops null_build_ops;
71156 +static const struct snd_ac97_build_ops null_build_ops;
71157
71158 #ifdef CONFIG_SND_AC97_POWER_SAVE
71159 static void do_update_power(struct work_struct *work)
71160 diff -urNp linux-2.6.32.41/sound/pci/ac97/ac97_patch.c linux-2.6.32.41/sound/pci/ac97/ac97_patch.c
71161 --- linux-2.6.32.41/sound/pci/ac97/ac97_patch.c 2011-03-27 14:31:47.000000000 -0400
71162 +++ linux-2.6.32.41/sound/pci/ac97/ac97_patch.c 2011-04-23 12:56:12.000000000 -0400
71163 @@ -371,7 +371,7 @@ static int patch_yamaha_ymf743_build_spd
71164 return 0;
71165 }
71166
71167 -static struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
71168 +static const struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
71169 .build_spdif = patch_yamaha_ymf743_build_spdif,
71170 .build_3d = patch_yamaha_ymf7x3_3d,
71171 };
71172 @@ -455,7 +455,7 @@ static int patch_yamaha_ymf753_post_spdi
71173 return 0;
71174 }
71175
71176 -static struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
71177 +static const struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
71178 .build_3d = patch_yamaha_ymf7x3_3d,
71179 .build_post_spdif = patch_yamaha_ymf753_post_spdif
71180 };
71181 @@ -502,7 +502,7 @@ static int patch_wolfson_wm9703_specific
71182 return 0;
71183 }
71184
71185 -static struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
71186 +static const struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
71187 .build_specific = patch_wolfson_wm9703_specific,
71188 };
71189
71190 @@ -533,7 +533,7 @@ static int patch_wolfson_wm9704_specific
71191 return 0;
71192 }
71193
71194 -static struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
71195 +static const struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
71196 .build_specific = patch_wolfson_wm9704_specific,
71197 };
71198
71199 @@ -555,7 +555,7 @@ static int patch_wolfson_wm9705_specific
71200 return 0;
71201 }
71202
71203 -static struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
71204 +static const struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
71205 .build_specific = patch_wolfson_wm9705_specific,
71206 };
71207
71208 @@ -692,7 +692,7 @@ static int patch_wolfson_wm9711_specific
71209 return 0;
71210 }
71211
71212 -static struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
71213 +static const struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
71214 .build_specific = patch_wolfson_wm9711_specific,
71215 };
71216
71217 @@ -886,7 +886,7 @@ static void patch_wolfson_wm9713_resume
71218 }
71219 #endif
71220
71221 -static struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
71222 +static const struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
71223 .build_specific = patch_wolfson_wm9713_specific,
71224 .build_3d = patch_wolfson_wm9713_3d,
71225 #ifdef CONFIG_PM
71226 @@ -991,7 +991,7 @@ static int patch_sigmatel_stac97xx_speci
71227 return 0;
71228 }
71229
71230 -static struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
71231 +static const struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
71232 .build_3d = patch_sigmatel_stac9700_3d,
71233 .build_specific = patch_sigmatel_stac97xx_specific
71234 };
71235 @@ -1038,7 +1038,7 @@ static int patch_sigmatel_stac9708_speci
71236 return patch_sigmatel_stac97xx_specific(ac97);
71237 }
71238
71239 -static struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
71240 +static const struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
71241 .build_3d = patch_sigmatel_stac9708_3d,
71242 .build_specific = patch_sigmatel_stac9708_specific
71243 };
71244 @@ -1267,7 +1267,7 @@ static int patch_sigmatel_stac9758_speci
71245 return 0;
71246 }
71247
71248 -static struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
71249 +static const struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
71250 .build_3d = patch_sigmatel_stac9700_3d,
71251 .build_specific = patch_sigmatel_stac9758_specific
71252 };
71253 @@ -1342,7 +1342,7 @@ static int patch_cirrus_build_spdif(stru
71254 return 0;
71255 }
71256
71257 -static struct snd_ac97_build_ops patch_cirrus_ops = {
71258 +static const struct snd_ac97_build_ops patch_cirrus_ops = {
71259 .build_spdif = patch_cirrus_build_spdif
71260 };
71261
71262 @@ -1399,7 +1399,7 @@ static int patch_conexant_build_spdif(st
71263 return 0;
71264 }
71265
71266 -static struct snd_ac97_build_ops patch_conexant_ops = {
71267 +static const struct snd_ac97_build_ops patch_conexant_ops = {
71268 .build_spdif = patch_conexant_build_spdif
71269 };
71270
71271 @@ -1575,7 +1575,7 @@ static void patch_ad1881_chained(struct
71272 }
71273 }
71274
71275 -static struct snd_ac97_build_ops patch_ad1881_build_ops = {
71276 +static const struct snd_ac97_build_ops patch_ad1881_build_ops = {
71277 #ifdef CONFIG_PM
71278 .resume = ad18xx_resume
71279 #endif
71280 @@ -1662,7 +1662,7 @@ static int patch_ad1885_specific(struct
71281 return 0;
71282 }
71283
71284 -static struct snd_ac97_build_ops patch_ad1885_build_ops = {
71285 +static const struct snd_ac97_build_ops patch_ad1885_build_ops = {
71286 .build_specific = &patch_ad1885_specific,
71287 #ifdef CONFIG_PM
71288 .resume = ad18xx_resume
71289 @@ -1689,7 +1689,7 @@ static int patch_ad1886_specific(struct
71290 return 0;
71291 }
71292
71293 -static struct snd_ac97_build_ops patch_ad1886_build_ops = {
71294 +static const struct snd_ac97_build_ops patch_ad1886_build_ops = {
71295 .build_specific = &patch_ad1886_specific,
71296 #ifdef CONFIG_PM
71297 .resume = ad18xx_resume
71298 @@ -1896,7 +1896,7 @@ static int patch_ad1981a_specific(struct
71299 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
71300 }
71301
71302 -static struct snd_ac97_build_ops patch_ad1981a_build_ops = {
71303 +static const struct snd_ac97_build_ops patch_ad1981a_build_ops = {
71304 .build_post_spdif = patch_ad198x_post_spdif,
71305 .build_specific = patch_ad1981a_specific,
71306 #ifdef CONFIG_PM
71307 @@ -1951,7 +1951,7 @@ static int patch_ad1981b_specific(struct
71308 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
71309 }
71310
71311 -static struct snd_ac97_build_ops patch_ad1981b_build_ops = {
71312 +static const struct snd_ac97_build_ops patch_ad1981b_build_ops = {
71313 .build_post_spdif = patch_ad198x_post_spdif,
71314 .build_specific = patch_ad1981b_specific,
71315 #ifdef CONFIG_PM
71316 @@ -2090,7 +2090,7 @@ static int patch_ad1888_specific(struct
71317 return patch_build_controls(ac97, snd_ac97_ad1888_controls, ARRAY_SIZE(snd_ac97_ad1888_controls));
71318 }
71319
71320 -static struct snd_ac97_build_ops patch_ad1888_build_ops = {
71321 +static const struct snd_ac97_build_ops patch_ad1888_build_ops = {
71322 .build_post_spdif = patch_ad198x_post_spdif,
71323 .build_specific = patch_ad1888_specific,
71324 #ifdef CONFIG_PM
71325 @@ -2139,7 +2139,7 @@ static int patch_ad1980_specific(struct
71326 return patch_build_controls(ac97, &snd_ac97_ad198x_2cmic, 1);
71327 }
71328
71329 -static struct snd_ac97_build_ops patch_ad1980_build_ops = {
71330 +static const struct snd_ac97_build_ops patch_ad1980_build_ops = {
71331 .build_post_spdif = patch_ad198x_post_spdif,
71332 .build_specific = patch_ad1980_specific,
71333 #ifdef CONFIG_PM
71334 @@ -2254,7 +2254,7 @@ static int patch_ad1985_specific(struct
71335 ARRAY_SIZE(snd_ac97_ad1985_controls));
71336 }
71337
71338 -static struct snd_ac97_build_ops patch_ad1985_build_ops = {
71339 +static const struct snd_ac97_build_ops patch_ad1985_build_ops = {
71340 .build_post_spdif = patch_ad198x_post_spdif,
71341 .build_specific = patch_ad1985_specific,
71342 #ifdef CONFIG_PM
71343 @@ -2546,7 +2546,7 @@ static int patch_ad1986_specific(struct
71344 ARRAY_SIZE(snd_ac97_ad1985_controls));
71345 }
71346
71347 -static struct snd_ac97_build_ops patch_ad1986_build_ops = {
71348 +static const struct snd_ac97_build_ops patch_ad1986_build_ops = {
71349 .build_post_spdif = patch_ad198x_post_spdif,
71350 .build_specific = patch_ad1986_specific,
71351 #ifdef CONFIG_PM
71352 @@ -2651,7 +2651,7 @@ static int patch_alc650_specific(struct
71353 return 0;
71354 }
71355
71356 -static struct snd_ac97_build_ops patch_alc650_ops = {
71357 +static const struct snd_ac97_build_ops patch_alc650_ops = {
71358 .build_specific = patch_alc650_specific,
71359 .update_jacks = alc650_update_jacks
71360 };
71361 @@ -2803,7 +2803,7 @@ static int patch_alc655_specific(struct
71362 return 0;
71363 }
71364
71365 -static struct snd_ac97_build_ops patch_alc655_ops = {
71366 +static const struct snd_ac97_build_ops patch_alc655_ops = {
71367 .build_specific = patch_alc655_specific,
71368 .update_jacks = alc655_update_jacks
71369 };
71370 @@ -2915,7 +2915,7 @@ static int patch_alc850_specific(struct
71371 return 0;
71372 }
71373
71374 -static struct snd_ac97_build_ops patch_alc850_ops = {
71375 +static const struct snd_ac97_build_ops patch_alc850_ops = {
71376 .build_specific = patch_alc850_specific,
71377 .update_jacks = alc850_update_jacks
71378 };
71379 @@ -2977,7 +2977,7 @@ static int patch_cm9738_specific(struct
71380 return patch_build_controls(ac97, snd_ac97_cm9738_controls, ARRAY_SIZE(snd_ac97_cm9738_controls));
71381 }
71382
71383 -static struct snd_ac97_build_ops patch_cm9738_ops = {
71384 +static const struct snd_ac97_build_ops patch_cm9738_ops = {
71385 .build_specific = patch_cm9738_specific,
71386 .update_jacks = cm9738_update_jacks
71387 };
71388 @@ -3068,7 +3068,7 @@ static int patch_cm9739_post_spdif(struc
71389 return patch_build_controls(ac97, snd_ac97_cm9739_controls_spdif, ARRAY_SIZE(snd_ac97_cm9739_controls_spdif));
71390 }
71391
71392 -static struct snd_ac97_build_ops patch_cm9739_ops = {
71393 +static const struct snd_ac97_build_ops patch_cm9739_ops = {
71394 .build_specific = patch_cm9739_specific,
71395 .build_post_spdif = patch_cm9739_post_spdif,
71396 .update_jacks = cm9739_update_jacks
71397 @@ -3242,7 +3242,7 @@ static int patch_cm9761_specific(struct
71398 return patch_build_controls(ac97, snd_ac97_cm9761_controls, ARRAY_SIZE(snd_ac97_cm9761_controls));
71399 }
71400
71401 -static struct snd_ac97_build_ops patch_cm9761_ops = {
71402 +static const struct snd_ac97_build_ops patch_cm9761_ops = {
71403 .build_specific = patch_cm9761_specific,
71404 .build_post_spdif = patch_cm9761_post_spdif,
71405 .update_jacks = cm9761_update_jacks
71406 @@ -3338,7 +3338,7 @@ static int patch_cm9780_specific(struct
71407 return patch_build_controls(ac97, cm9780_controls, ARRAY_SIZE(cm9780_controls));
71408 }
71409
71410 -static struct snd_ac97_build_ops patch_cm9780_ops = {
71411 +static const struct snd_ac97_build_ops patch_cm9780_ops = {
71412 .build_specific = patch_cm9780_specific,
71413 .build_post_spdif = patch_cm9761_post_spdif /* identical with CM9761 */
71414 };
71415 @@ -3458,7 +3458,7 @@ static int patch_vt1616_specific(struct
71416 return 0;
71417 }
71418
71419 -static struct snd_ac97_build_ops patch_vt1616_ops = {
71420 +static const struct snd_ac97_build_ops patch_vt1616_ops = {
71421 .build_specific = patch_vt1616_specific
71422 };
71423
71424 @@ -3812,7 +3812,7 @@ static int patch_it2646_specific(struct
71425 return 0;
71426 }
71427
71428 -static struct snd_ac97_build_ops patch_it2646_ops = {
71429 +static const struct snd_ac97_build_ops patch_it2646_ops = {
71430 .build_specific = patch_it2646_specific,
71431 .update_jacks = it2646_update_jacks
71432 };
71433 @@ -3846,7 +3846,7 @@ static int patch_si3036_specific(struct
71434 return 0;
71435 }
71436
71437 -static struct snd_ac97_build_ops patch_si3036_ops = {
71438 +static const struct snd_ac97_build_ops patch_si3036_ops = {
71439 .build_specific = patch_si3036_specific,
71440 };
71441
71442 @@ -3913,7 +3913,7 @@ static int patch_ucb1400_specific(struct
71443 return 0;
71444 }
71445
71446 -static struct snd_ac97_build_ops patch_ucb1400_ops = {
71447 +static const struct snd_ac97_build_ops patch_ucb1400_ops = {
71448 .build_specific = patch_ucb1400_specific,
71449 };
71450
71451 diff -urNp linux-2.6.32.41/sound/pci/hda/patch_intelhdmi.c linux-2.6.32.41/sound/pci/hda/patch_intelhdmi.c
71452 --- linux-2.6.32.41/sound/pci/hda/patch_intelhdmi.c 2011-03-27 14:31:47.000000000 -0400
71453 +++ linux-2.6.32.41/sound/pci/hda/patch_intelhdmi.c 2011-04-17 15:56:46.000000000 -0400
71454 @@ -511,10 +511,10 @@ static void hdmi_non_intrinsic_event(str
71455 cp_ready);
71456
71457 /* TODO */
71458 - if (cp_state)
71459 - ;
71460 - if (cp_ready)
71461 - ;
71462 + if (cp_state) {
71463 + }
71464 + if (cp_ready) {
71465 + }
71466 }
71467
71468
71469 diff -urNp linux-2.6.32.41/sound/pci/intel8x0m.c linux-2.6.32.41/sound/pci/intel8x0m.c
71470 --- linux-2.6.32.41/sound/pci/intel8x0m.c 2011-03-27 14:31:47.000000000 -0400
71471 +++ linux-2.6.32.41/sound/pci/intel8x0m.c 2011-04-23 12:56:12.000000000 -0400
71472 @@ -1264,7 +1264,7 @@ static struct shortname_table {
71473 { 0x5455, "ALi M5455" },
71474 { 0x746d, "AMD AMD8111" },
71475 #endif
71476 - { 0 },
71477 + { 0, },
71478 };
71479
71480 static int __devinit snd_intel8x0m_probe(struct pci_dev *pci,
71481 diff -urNp linux-2.6.32.41/sound/pci/ymfpci/ymfpci_main.c linux-2.6.32.41/sound/pci/ymfpci/ymfpci_main.c
71482 --- linux-2.6.32.41/sound/pci/ymfpci/ymfpci_main.c 2011-03-27 14:31:47.000000000 -0400
71483 +++ linux-2.6.32.41/sound/pci/ymfpci/ymfpci_main.c 2011-05-04 17:56:28.000000000 -0400
71484 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct sn
71485 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
71486 break;
71487 }
71488 - if (atomic_read(&chip->interrupt_sleep_count)) {
71489 - atomic_set(&chip->interrupt_sleep_count, 0);
71490 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
71491 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
71492 wake_up(&chip->interrupt_sleep);
71493 }
71494 __end:
71495 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct s
71496 continue;
71497 init_waitqueue_entry(&wait, current);
71498 add_wait_queue(&chip->interrupt_sleep, &wait);
71499 - atomic_inc(&chip->interrupt_sleep_count);
71500 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
71501 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
71502 remove_wait_queue(&chip->interrupt_sleep, &wait);
71503 }
71504 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(
71505 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
71506 spin_unlock(&chip->reg_lock);
71507
71508 - if (atomic_read(&chip->interrupt_sleep_count)) {
71509 - atomic_set(&chip->interrupt_sleep_count, 0);
71510 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
71511 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
71512 wake_up(&chip->interrupt_sleep);
71513 }
71514 }
71515 @@ -2369,7 +2369,7 @@ int __devinit snd_ymfpci_create(struct s
71516 spin_lock_init(&chip->reg_lock);
71517 spin_lock_init(&chip->voice_lock);
71518 init_waitqueue_head(&chip->interrupt_sleep);
71519 - atomic_set(&chip->interrupt_sleep_count, 0);
71520 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
71521 chip->card = card;
71522 chip->pci = pci;
71523 chip->irq = -1;
71524 diff -urNp linux-2.6.32.41/tools/gcc/Makefile linux-2.6.32.41/tools/gcc/Makefile
71525 --- linux-2.6.32.41/tools/gcc/Makefile 1969-12-31 19:00:00.000000000 -0500
71526 +++ linux-2.6.32.41/tools/gcc/Makefile 2011-06-04 20:52:13.000000000 -0400
71527 @@ -0,0 +1,11 @@
71528 +#CC := gcc
71529 +#PLUGIN_SOURCE_FILES := pax_plugin.c
71530 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
71531 +GCCPLUGINS_DIR := $(shell $(HOSTCC) -print-file-name=plugin)
71532 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W
71533 +
71534 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include
71535 +
71536 +hostlibs-y := pax_plugin.so
71537 +always := $(hostlibs-y)
71538 +pax_plugin-objs := pax_plugin.o
71539 diff -urNp linux-2.6.32.41/tools/gcc/pax_plugin.c linux-2.6.32.41/tools/gcc/pax_plugin.c
71540 --- linux-2.6.32.41/tools/gcc/pax_plugin.c 1969-12-31 19:00:00.000000000 -0500
71541 +++ linux-2.6.32.41/tools/gcc/pax_plugin.c 2011-06-04 20:52:13.000000000 -0400
71542 @@ -0,0 +1,242 @@
71543 +/*
71544 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
71545 + * Licensed under the GPL v2
71546 + *
71547 + * Note: the choice of the license means that the compilation process is
71548 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
71549 + * but for the kernel it doesn't matter since it doesn't link against
71550 + * any of the gcc libraries
71551 + *
71552 + * gcc plugin to help implement various PaX features
71553 + *
71554 + * - track lowest stack pointer
71555 + *
71556 + * TODO:
71557 + * - initialize all local variables
71558 + *
71559 + * BUGS:
71560 + */
71561 +#include "gcc-plugin.h"
71562 +#include "plugin-version.h"
71563 +#include "config.h"
71564 +#include "system.h"
71565 +#include "coretypes.h"
71566 +#include "tm.h"
71567 +#include "toplev.h"
71568 +#include "basic-block.h"
71569 +#include "gimple.h"
71570 +//#include "expr.h" where are you...
71571 +#include "diagnostic.h"
71572 +#include "rtl.h"
71573 +#include "emit-rtl.h"
71574 +#include "function.h"
71575 +#include "tree.h"
71576 +#include "tree-pass.h"
71577 +#include "intl.h"
71578 +
71579 +int plugin_is_GPL_compatible;
71580 +
71581 +static int track_frame_size = -1;
71582 +static const char track_function[] = "pax_track_stack";
71583 +static bool init_locals;
71584 +
71585 +static struct plugin_info pax_plugin_info = {
71586 + .version = "201106030000",
71587 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
71588 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
71589 +};
71590 +
71591 +static bool gate_pax_track_stack(void);
71592 +static unsigned int execute_pax_tree_instrument(void);
71593 +static unsigned int execute_pax_final(void);
71594 +
71595 +static struct gimple_opt_pass pax_tree_instrument_pass = {
71596 + .pass = {
71597 + .type = GIMPLE_PASS,
71598 + .name = "pax_tree_instrument",
71599 + .gate = gate_pax_track_stack,
71600 + .execute = execute_pax_tree_instrument,
71601 + .sub = NULL,
71602 + .next = NULL,
71603 + .static_pass_number = 0,
71604 + .tv_id = TV_NONE,
71605 + .properties_required = PROP_gimple_leh | PROP_cfg,
71606 + .properties_provided = 0,
71607 + .properties_destroyed = 0,
71608 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
71609 + .todo_flags_finish = TODO_verify_stmts // | TODO_dump_func
71610 + }
71611 +};
71612 +
71613 +static struct rtl_opt_pass pax_final_rtl_opt_pass = {
71614 + .pass = {
71615 + .type = RTL_PASS,
71616 + .name = "pax_final",
71617 + .gate = gate_pax_track_stack,
71618 + .execute = execute_pax_final,
71619 + .sub = NULL,
71620 + .next = NULL,
71621 + .static_pass_number = 0,
71622 + .tv_id = TV_NONE,
71623 + .properties_required = 0,
71624 + .properties_provided = 0,
71625 + .properties_destroyed = 0,
71626 + .todo_flags_start = 0,
71627 + .todo_flags_finish = 0
71628 + }
71629 +};
71630 +
71631 +static bool gate_pax_track_stack(void)
71632 +{
71633 + return track_frame_size >= 0;
71634 +}
71635 +
71636 +static void pax_add_instrumentation(gimple_stmt_iterator *gsi, bool before)
71637 +{
71638 + gimple call;
71639 + tree decl, type;
71640 +
71641 + // insert call to void pax_track_stack(void)
71642 + type = build_function_type_list(void_type_node, NULL_TREE);
71643 + decl = build_fn_decl(track_function, type);
71644 + DECL_ASSEMBLER_NAME(decl); // for LTO
71645 + call = gimple_build_call(decl, 0);
71646 + if (before)
71647 + gsi_insert_before(gsi, call, GSI_CONTINUE_LINKING);
71648 + else
71649 + gsi_insert_after(gsi, call, GSI_CONTINUE_LINKING);
71650 +}
71651 +
71652 +static unsigned int execute_pax_tree_instrument(void)
71653 +{
71654 + basic_block bb;
71655 + gimple_stmt_iterator gsi;
71656 +
71657 + // 1. loop through BBs and GIMPLE statements
71658 + FOR_EACH_BB(bb) {
71659 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
71660 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
71661 + tree decl;
71662 + gimple stmt = gsi_stmt(gsi);
71663 +
71664 + if (!is_gimple_call(stmt))
71665 + continue;
71666 + decl = gimple_call_fndecl(stmt);
71667 + if (!decl)
71668 + continue;
71669 + if (TREE_CODE(decl) != FUNCTION_DECL)
71670 + continue;
71671 + if (!DECL_BUILT_IN(decl))
71672 + continue;
71673 + if (DECL_BUILT_IN_CLASS(decl) != BUILT_IN_NORMAL)
71674 + continue;
71675 + if (DECL_FUNCTION_CODE(decl) != BUILT_IN_ALLOCA)
71676 + continue;
71677 +
71678 + // 2. insert track call after each __builtin_alloca call
71679 + pax_add_instrumentation(&gsi, false);
71680 +// print_node(stderr, "pax", decl, 4);
71681 + }
71682 + }
71683 +
71684 + // 3. insert track call at the beginning
71685 + bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
71686 + gsi = gsi_start_bb(bb);
71687 + pax_add_instrumentation(&gsi, true);
71688 +
71689 + return 0;
71690 +}
71691 +
71692 +static unsigned int execute_pax_final(void)
71693 +{
71694 + rtx insn;
71695 +
71696 + if (cfun->calls_alloca)
71697 + return 0;
71698 +
71699 + // 1. find pax_track_stack calls
71700 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
71701 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
71702 + rtx body;
71703 +
71704 + if (!CALL_P(insn))
71705 + continue;
71706 + body = PATTERN(insn);
71707 + if (GET_CODE(body) != CALL)
71708 + continue;
71709 + body = XEXP(body, 0);
71710 + if (GET_CODE(body) != MEM)
71711 + continue;
71712 + body = XEXP(body, 0);
71713 + if (GET_CODE(body) != SYMBOL_REF)
71714 + continue;
71715 + if (strcmp(XSTR(body, 0), track_function))
71716 + continue;
71717 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
71718 + // 2. delete call if function frame is not big enough
71719 + if (get_frame_size() >= track_frame_size)
71720 + continue;
71721 + delete_insn_and_edges(insn);
71722 + }
71723 +
71724 +// print_simple_rtl(stderr, get_insns());
71725 +// print_rtl(stderr, get_insns());
71726 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
71727 +
71728 + return 0;
71729 +}
71730 +
71731 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
71732 +{
71733 + const char * const plugin_name = plugin_info->base_name;
71734 + const int argc = plugin_info->argc;
71735 + const struct plugin_argument * const argv = plugin_info->argv;
71736 + int i;
71737 + struct register_pass_info pax_tree_instrument_pass_info = {
71738 + .pass = &pax_tree_instrument_pass.pass,
71739 +// .reference_pass_name = "tree_profile",
71740 + .reference_pass_name = "optimized",
71741 + .ref_pass_instance_number = 0,
71742 + .pos_op = PASS_POS_INSERT_AFTER
71743 + };
71744 + struct register_pass_info pax_final_pass_info = {
71745 + .pass = &pax_final_rtl_opt_pass.pass,
71746 + .reference_pass_name = "final",
71747 + .ref_pass_instance_number = 0,
71748 + .pos_op = PASS_POS_INSERT_BEFORE
71749 + };
71750 +
71751 + if (!plugin_default_version_check(version, &gcc_version)) {
71752 + error(G_("incompatible gcc/plugin versions"));
71753 + return 1;
71754 + }
71755 +
71756 + register_callback(plugin_name, PLUGIN_INFO, NULL, &pax_plugin_info);
71757 +
71758 + for (i = 0; i < argc; ++i) {
71759 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
71760 + if (!argv[i].value) {
71761 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
71762 + continue;
71763 + }
71764 + track_frame_size = atoi(argv[i].value);
71765 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
71766 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
71767 + continue;
71768 + }
71769 + if (!strcmp(argv[i].key, "initialize-locals")) {
71770 + if (argv[i].value) {
71771 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
71772 + continue;
71773 + }
71774 + init_locals = true;
71775 + continue;
71776 + }
71777 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
71778 + }
71779 +
71780 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pax_tree_instrument_pass_info);
71781 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pax_final_pass_info);
71782 +
71783 + return 0;
71784 +}
71785 Binary files linux-2.6.32.41/tools/gcc/pax_plugin.so and linux-2.6.32.41/tools/gcc/pax_plugin.so differ
71786 diff -urNp linux-2.6.32.41/usr/gen_init_cpio.c linux-2.6.32.41/usr/gen_init_cpio.c
71787 --- linux-2.6.32.41/usr/gen_init_cpio.c 2011-03-27 14:31:47.000000000 -0400
71788 +++ linux-2.6.32.41/usr/gen_init_cpio.c 2011-04-17 15:56:46.000000000 -0400
71789 @@ -299,7 +299,7 @@ static int cpio_mkfile(const char *name,
71790 int retval;
71791 int rc = -1;
71792 int namesize;
71793 - int i;
71794 + unsigned int i;
71795
71796 mode |= S_IFREG;
71797
71798 @@ -383,9 +383,10 @@ static char *cpio_replace_env(char *new_
71799 *env_var = *expanded = '\0';
71800 strncat(env_var, start + 2, end - start - 2);
71801 strncat(expanded, new_location, start - new_location);
71802 - strncat(expanded, getenv(env_var), PATH_MAX);
71803 - strncat(expanded, end + 1, PATH_MAX);
71804 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
71805 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
71806 strncpy(new_location, expanded, PATH_MAX);
71807 + new_location[PATH_MAX] = 0;
71808 } else
71809 break;
71810 }
71811 diff -urNp linux-2.6.32.41/virt/kvm/kvm_main.c linux-2.6.32.41/virt/kvm/kvm_main.c
71812 --- linux-2.6.32.41/virt/kvm/kvm_main.c 2011-03-27 14:31:47.000000000 -0400
71813 +++ linux-2.6.32.41/virt/kvm/kvm_main.c 2011-04-23 21:41:37.000000000 -0400
71814 @@ -1748,6 +1748,7 @@ static int kvm_vcpu_release(struct inode
71815 return 0;
71816 }
71817
71818 +/* cannot be const */
71819 static struct file_operations kvm_vcpu_fops = {
71820 .release = kvm_vcpu_release,
71821 .unlocked_ioctl = kvm_vcpu_ioctl,
71822 @@ -2344,6 +2345,7 @@ static int kvm_vm_mmap(struct file *file
71823 return 0;
71824 }
71825
71826 +/* cannot be const */
71827 static struct file_operations kvm_vm_fops = {
71828 .release = kvm_vm_release,
71829 .unlocked_ioctl = kvm_vm_ioctl,
71830 @@ -2431,6 +2433,7 @@ out:
71831 return r;
71832 }
71833
71834 +/* cannot be const */
71835 static struct file_operations kvm_chardev_ops = {
71836 .unlocked_ioctl = kvm_dev_ioctl,
71837 .compat_ioctl = kvm_dev_ioctl,
71838 @@ -2494,7 +2497,7 @@ asmlinkage void kvm_handle_fault_on_rebo
71839 if (kvm_rebooting)
71840 /* spin while reset goes on */
71841 while (true)
71842 - ;
71843 + cpu_relax();
71844 /* Fault while not rebooting. We want the trace. */
71845 BUG();
71846 }
71847 @@ -2714,7 +2717,7 @@ static void kvm_sched_out(struct preempt
71848 kvm_arch_vcpu_put(vcpu);
71849 }
71850
71851 -int kvm_init(void *opaque, unsigned int vcpu_size,
71852 +int kvm_init(const void *opaque, unsigned int vcpu_size,
71853 struct module *module)
71854 {
71855 int r;
71856 @@ -2767,7 +2770,7 @@ int kvm_init(void *opaque, unsigned int
71857 /* A kmem cache lets us meet the alignment requirements of fx_save. */
71858 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
71859 __alignof__(struct kvm_vcpu),
71860 - 0, NULL);
71861 + SLAB_USERCOPY, NULL);
71862 if (!kvm_vcpu_cache) {
71863 r = -ENOMEM;
71864 goto out_free_5;